From 6d8f122a5160f6d9e4c51579f2429dfaa62c7271 Mon Sep 17 00:00:00 2001 From: Christopher Speller Date: Fri, 16 Feb 2018 06:47:51 -0800 Subject: Upgrading server dependancies (#8308) --- vendor/github.com/davecgh/go-spew/spew/bypass.go | 187 +- .../github.com/davecgh/go-spew/spew/bypasssafe.go | 2 +- .../github.com/davecgh/go-spew/spew/dump_test.go | 2 +- .../github.com/davecgh/go-spew/spew/format_test.go | 2 +- .../davecgh/go-spew/spew/internalunsafe_test.go | 11 +- vendor/github.com/go-ini/ini/file_test.go | 97 +- vendor/github.com/go-ini/ini/ini.go | 2 +- vendor/github.com/go-ini/ini/ini_test.go | 3 + .../go-ini/ini/testdata/TestFile_WriteTo.golden | 86 + vendor/github.com/go-redis/redis/cluster.go | 42 +- vendor/github.com/go-redis/redis/command.go | 4 +- .../go-redis/redis/example_instrumentation_test.go | 69 +- .../go-redis/redis/internal/proto/reader.go | 66 +- vendor/github.com/go-redis/redis/pubsub.go | 10 +- vendor/github.com/go-redis/redis/redis.go | 102 +- vendor/github.com/go-redis/redis/redis_context.go | 5 +- .../github.com/go-redis/redis/redis_no_context.go | 5 +- vendor/github.com/go-redis/redis/ring.go | 29 +- vendor/github.com/go-redis/redis/sentinel.go | 14 +- vendor/github.com/go-redis/redis/tx.go | 11 +- .../golang/protobuf/proto/extensions_test.go | 2 +- .../protobuf/protoc-gen-go/generator/generator.go | 6 +- vendor/github.com/gorilla/schema/.travis.yml | 18 + vendor/github.com/gorilla/schema/LICENSE | 27 + vendor/github.com/gorilla/schema/README.md | 90 + vendor/github.com/gorilla/schema/cache.go | 264 + vendor/github.com/gorilla/schema/converter.go | 145 + vendor/github.com/gorilla/schema/decoder.go | 420 + vendor/github.com/gorilla/schema/decoder_test.go | 1693 ++ vendor/github.com/gorilla/schema/doc.go | 148 + vendor/github.com/gorilla/schema/encoder.go | 195 + vendor/github.com/gorilla/schema/encoder_test.go | 420 + vendor/github.com/gorilla/websocket/conn.go | 15 +- .../hashicorp/go-immutable-radix/iradix.go | 7 +- .../hashicorp/go-immutable-radix/iradix_test.go | 37 +- vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go | 2 +- vendor/github.com/hashicorp/golang-lru/2q.go | 21 +- vendor/github.com/hashicorp/golang-lru/arc.go | 16 +- vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/lru.go | 28 +- vendor/github.com/hashicorp/golang-lru/lru_test.go | 4 +- .../hashicorp/golang-lru/simplelru/lru.go | 17 +- .../golang-lru/simplelru/lru_interface.go | 37 + .../hashicorp/golang-lru/simplelru/lru_test.go | 4 +- vendor/github.com/hashicorp/memberlist/README.md | 2 +- vendor/github.com/lib/pq/.travis.yml | 1 - .../github.com/magiconair/properties/CHANGELOG.md | 21 +- vendor/github.com/magiconair/properties/LICENSE | 2 +- vendor/github.com/magiconair/properties/README.md | 50 +- vendor/github.com/magiconair/properties/load.go | 2 +- .../github.com/magiconair/properties/properties.go | 69 +- .../magiconair/properties/properties_test.go | 28 + .../minio/minio-go/api-compose-object.go | 4 +- vendor/github.com/minio/minio-go/api.go | 61 +- vendor/github.com/minio/minio-go/docs/API.md | 19 +- vendor/github.com/mitchellh/mapstructure/README.md | 2 +- .../mitchellh/mapstructure/decode_hooks.go | 19 + .../mitchellh/mapstructure/decode_hooks_test.go | 30 + .../mitchellh/mapstructure/mapstructure.go | 247 +- .../mitchellh/mapstructure/mapstructure_test.go | 340 + vendor/github.com/olivere/elastic/.travis.yml | 2 +- vendor/github.com/olivere/elastic/CONTRIBUTORS | 11 +- vendor/github.com/olivere/elastic/README.md | 16 +- .../github.com/olivere/elastic/bulk_processor.go | 61 +- vendor/github.com/olivere/elastic/client.go | 8 +- vendor/github.com/olivere/elastic/errors.go | 8 + vendor/github.com/olivere/elastic/msearch.go | 39 +- vendor/github.com/olivere/elastic/msearch_test.go | 105 + .../olivere/elastic/recipes/bulk_processor/main.go | 149 + vendor/github.com/olivere/elastic/reindex.go | 10 + vendor/github.com/olivere/elastic/run-es.sh | 4 +- vendor/github.com/olivere/elastic/search.go | 3 +- vendor/github.com/olivere/elastic/search_aggs.go | 70 + .../elastic/search_aggs_bucket_composite.go | 498 + .../elastic/search_aggs_bucket_composite_test.go | 92 + .../elastic/search_aggs_bucket_date_range.go | 9 + .../elastic/search_aggs_bucket_date_range_test.go | 4 +- .../github.com/olivere/elastic/search_aggs_test.go | 441 +- .../olivere/elastic/search_queries_terms_set.go | 96 + .../elastic/search_queries_terms_set_test.go | 75 + .../github.com/olivere/elastic/search_request.go | 74 +- vendor/github.com/olivere/elastic/search_test.go | 55 + .../prometheus/client_golang/ISSUE_TEMPLATE.md | 8 + .../github.com/prometheus/client_golang/README.md | 47 +- .../prometheus/client_golang/prometheus/doc.go | 8 +- .../prometheus/client_golang/prometheus/http.go | 5 +- .../client_golang/prometheus/process_collector.go | 14 +- .../client_golang/prometheus/promhttp/delegator.go | 6 +- .../prometheus/promhttp/delegator_1_8.go | 2 +- .../client_golang/prometheus/promhttp/http.go | 129 +- .../client_golang/prometheus/promhttp/http_test.go | 119 + .../prometheus/promhttp/instrument_server_test.go | 26 + .../client_golang/prometheus/push/deprecated.go | 172 + .../push/example_add_from_gatherer_test.go | 32 +- .../client_golang/prometheus/push/examples_test.go | 11 +- .../client_golang/prometheus/push/push.go | 244 +- .../client_golang/prometheus/push/push_test.go | 78 +- .../client_golang/prometheus/registry.go | 291 +- vendor/github.com/prometheus/procfs/fs.go | 4 +- vendor/github.com/prometheus/procfs/nfs/parse.go | 13 +- .../github.com/prometheus/procfs/nfs/parse_nfs.go | 10 +- .../prometheus/procfs/nfs/parse_nfs_test.go | 127 +- vendor/github.com/rsc/letsencrypt/LICENSE | 27 - vendor/github.com/rsc/letsencrypt/README | 177 - vendor/github.com/rsc/letsencrypt/lets.go | 781 - .../vendor/github.com/miekg/dns/.gitignore | 4 - .../vendor/github.com/miekg/dns/.travis.yml | 14 - .../vendor/github.com/miekg/dns/AUTHORS | 1 - .../vendor/github.com/miekg/dns/CONTRIBUTORS | 9 - .../vendor/github.com/miekg/dns/COPYRIGHT | 9 - .../vendor/github.com/miekg/dns/LICENSE | 32 - .../vendor/github.com/miekg/dns/README.md | 162 - .../vendor/github.com/miekg/dns/client.go | 467 - .../vendor/github.com/miekg/dns/client_test.go | 511 - .../vendor/github.com/miekg/dns/clientconfig.go | 131 - .../github.com/miekg/dns/clientconfig_test.go | 87 - .../github.com/miekg/dns/compress_generate.go | 184 - .../vendor/github.com/miekg/dns/dane.go | 43 - .../vendor/github.com/miekg/dns/defaults.go | 285 - .../letsencrypt/vendor/github.com/miekg/dns/dns.go | 104 - .../vendor/github.com/miekg/dns/dns_bench_test.go | 211 - .../vendor/github.com/miekg/dns/dns_test.go | 450 - .../vendor/github.com/miekg/dns/dnssec.go | 720 - .../vendor/github.com/miekg/dns/dnssec_keygen.go | 156 - .../vendor/github.com/miekg/dns/dnssec_keyscan.go | 249 - .../vendor/github.com/miekg/dns/dnssec_privkey.go | 85 - .../vendor/github.com/miekg/dns/dnssec_test.go | 733 - .../letsencrypt/vendor/github.com/miekg/dns/doc.go | 251 - .../vendor/github.com/miekg/dns/dyn_test.go | 3 - .../vendor/github.com/miekg/dns/edns.go | 597 - .../vendor/github.com/miekg/dns/edns_test.go | 68 - .../vendor/github.com/miekg/dns/example_test.go | 146 - .../vendor/github.com/miekg/dns/format.go | 87 - .../vendor/github.com/miekg/dns/fuzz_test.go | 25 - .../vendor/github.com/miekg/dns/generate.go | 159 - .../vendor/github.com/miekg/dns/issue_test.go | 68 - .../vendor/github.com/miekg/dns/labels.go | 171 - .../vendor/github.com/miekg/dns/labels_test.go | 203 - .../letsencrypt/vendor/github.com/miekg/dns/msg.go | 1159 -- .../vendor/github.com/miekg/dns/msg_generate.go | 349 - .../vendor/github.com/miekg/dns/msg_helpers.go | 633 - .../vendor/github.com/miekg/dns/msg_test.go | 124 - .../vendor/github.com/miekg/dns/nsecx.go | 106 - .../vendor/github.com/miekg/dns/nsecx_test.go | 133 - .../vendor/github.com/miekg/dns/parse_test.go | 1540 -- .../vendor/github.com/miekg/dns/privaterr.go | 149 - .../vendor/github.com/miekg/dns/privaterr_test.go | 171 - .../vendor/github.com/miekg/dns/rawmsg.go | 49 - .../vendor/github.com/miekg/dns/remote_test.go | 19 - .../vendor/github.com/miekg/dns/reverse.go | 38 - .../vendor/github.com/miekg/dns/sanitize.go | 84 - .../vendor/github.com/miekg/dns/sanitize_test.go | 84 - .../vendor/github.com/miekg/dns/scan.go | 987 -- .../vendor/github.com/miekg/dns/scan_rr.go | 2184 --- .../vendor/github.com/miekg/dns/scanner.go | 43 - .../vendor/github.com/miekg/dns/server.go | 734 - .../vendor/github.com/miekg/dns/server_test.go | 719 - .../vendor/github.com/miekg/dns/sig0.go | 218 - .../vendor/github.com/miekg/dns/sig0_test.go | 89 - .../vendor/github.com/miekg/dns/singleinflight.go | 57 - .../vendor/github.com/miekg/dns/smimea.go | 47 - .../vendor/github.com/miekg/dns/tlsa.go | 47 - .../vendor/github.com/miekg/dns/tsig.go | 383 - .../vendor/github.com/miekg/dns/tsig_test.go | 37 - .../vendor/github.com/miekg/dns/types.go | 1287 -- .../vendor/github.com/miekg/dns/types_generate.go | 271 - .../vendor/github.com/miekg/dns/types_test.go | 74 - .../letsencrypt/vendor/github.com/miekg/dns/udp.go | 34 - .../vendor/github.com/miekg/dns/udp_linux.go | 105 - .../vendor/github.com/miekg/dns/udp_other.go | 15 - .../vendor/github.com/miekg/dns/udp_windows.go | 29 - .../vendor/github.com/miekg/dns/update.go | 106 - .../vendor/github.com/miekg/dns/update_test.go | 145 - .../letsencrypt/vendor/github.com/miekg/dns/xfr.go | 255 - .../vendor/github.com/miekg/dns/xfr_test.go | 184 - .../vendor/github.com/miekg/dns/zcompress.go | 119 - .../vendor/github.com/miekg/dns/zmsg.go | 3565 ---- .../vendor/github.com/miekg/dns/ztypes.go | 857 - .../vendor/github.com/xenolf/lego/.gitcookies.enc | Bin 480 -> 0 bytes .../vendor/github.com/xenolf/lego/.gitignore | 4 - .../vendor/github.com/xenolf/lego/.travis.yml | 16 - .../vendor/github.com/xenolf/lego/CHANGELOG.md | 94 - .../vendor/github.com/xenolf/lego/CONTRIBUTING.md | 32 - .../vendor/github.com/xenolf/lego/Dockerfile | 14 - .../vendor/github.com/xenolf/lego/LICENSE | 21 - .../vendor/github.com/xenolf/lego/README.md | 267 - .../github.com/xenolf/lego/acme/challenges.go | 16 - .../vendor/github.com/xenolf/lego/acme/client.go | 825 - .../github.com/xenolf/lego/acme/client_test.go | 269 - .../vendor/github.com/xenolf/lego/acme/crypto.go | 347 - .../github.com/xenolf/lego/acme/crypto_test.go | 93 - .../github.com/xenolf/lego/acme/dns_challenge.go | 305 - .../xenolf/lego/acme/dns_challenge_manual.go | 53 - .../xenolf/lego/acme/dns_challenge_test.go | 206 - .../vendor/github.com/xenolf/lego/acme/error.go | 94 - .../vendor/github.com/xenolf/lego/acme/http.go | 148 - .../github.com/xenolf/lego/acme/http_challenge.go | 41 - .../xenolf/lego/acme/http_challenge_server.go | 79 - .../xenolf/lego/acme/http_challenge_test.go | 57 - .../github.com/xenolf/lego/acme/http_test.go | 100 - .../vendor/github.com/xenolf/lego/acme/jws.go | 131 - .../vendor/github.com/xenolf/lego/acme/messages.go | 115 - .../github.com/xenolf/lego/acme/pop_challenge.go | 1 - .../vendor/github.com/xenolf/lego/acme/provider.go | 28 - .../xenolf/lego/acme/testdata/resolv.conf.1 | 5 - .../xenolf/lego/acme/tls_sni_challenge.go | 67 - .../xenolf/lego/acme/tls_sni_challenge_server.go | 62 - .../xenolf/lego/acme/tls_sni_challenge_test.go | 65 - .../vendor/github.com/xenolf/lego/acme/utils.go | 29 - .../github.com/xenolf/lego/acme/utils_test.go | 26 - .../vendor/golang.org/x/crypto/.gitattributes | 10 - .../vendor/golang.org/x/crypto/.gitignore | 2 - .../letsencrypt/vendor/golang.org/x/crypto/AUTHORS | 3 - .../vendor/golang.org/x/crypto/CONTRIBUTING.md | 31 - .../vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - .../letsencrypt/vendor/golang.org/x/crypto/LICENSE | 27 - .../letsencrypt/vendor/golang.org/x/crypto/PATENTS | 22 - .../letsencrypt/vendor/golang.org/x/crypto/README | 3 - .../vendor/golang.org/x/crypto/codereview.cfg | 1 - .../vendor/golang.org/x/crypto/ocsp/ocsp.go | 778 - .../vendor/golang.org/x/crypto/ocsp/ocsp_test.go | 875 - .../vendor/golang.org/x/net/.gitattributes | 10 - .../letsencrypt/vendor/golang.org/x/net/.gitignore | 2 - .../letsencrypt/vendor/golang.org/x/net/AUTHORS | 3 - .../vendor/golang.org/x/net/CONTRIBUTING.md | 31 - .../vendor/golang.org/x/net/CONTRIBUTORS | 3 - .../letsencrypt/vendor/golang.org/x/net/LICENSE | 27 - .../letsencrypt/vendor/golang.org/x/net/PATENTS | 22 - .../rsc/letsencrypt/vendor/golang.org/x/net/README | 3 - .../vendor/golang.org/x/net/codereview.cfg | 1 - .../vendor/golang.org/x/net/context/context.go | 156 - .../golang.org/x/net/context/context_test.go | 583 - .../vendor/golang.org/x/net/context/go17.go | 72 - .../vendor/golang.org/x/net/context/pre_go17.go | 300 - .../golang.org/x/net/context/withtimeout_test.go | 26 - .../vendor/golang.org/x/net/publicsuffix/gen.go | 713 - .../vendor/golang.org/x/net/publicsuffix/list.go | 135 - .../golang.org/x/net/publicsuffix/list_test.go | 416 - .../vendor/golang.org/x/net/publicsuffix/table.go | 9253 ----------- .../golang.org/x/net/publicsuffix/table_test.go | 16474 ------------------- .../letsencrypt/vendor/golang.org/x/time/AUTHORS | 3 - .../vendor/golang.org/x/time/CONTRIBUTING.md | 31 - .../vendor/golang.org/x/time/CONTRIBUTORS | 3 - .../letsencrypt/vendor/golang.org/x/time/LICENSE | 27 - .../letsencrypt/vendor/golang.org/x/time/PATENTS | 22 - .../letsencrypt/vendor/golang.org/x/time/README | 1 - .../vendor/golang.org/x/time/rate/rate.go | 380 - .../vendor/golang.org/x/time/rate/rate_go16.go | 21 - .../vendor/golang.org/x/time/rate/rate_go17.go | 21 - .../vendor/golang.org/x/time/rate/rate_test.go | 449 - .../gopkg.in/square/go-jose.v1/.gitcookies.sh.enc | 1 - .../vendor/gopkg.in/square/go-jose.v1/.gitignore | 7 - .../vendor/gopkg.in/square/go-jose.v1/.travis.yml | 45 - .../gopkg.in/square/go-jose.v1/BUG-BOUNTY.md | 10 - .../gopkg.in/square/go-jose.v1/CONTRIBUTING.md | 14 - .../vendor/gopkg.in/square/go-jose.v1/LICENSE | 202 - .../vendor/gopkg.in/square/go-jose.v1/README.md | 212 - .../gopkg.in/square/go-jose.v1/asymmetric.go | 520 - .../gopkg.in/square/go-jose.v1/asymmetric_test.go | 468 - .../gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go | 196 - .../square/go-jose.v1/cipher/cbc_hmac_test.go | 498 - .../square/go-jose.v1/cipher/concat_kdf.go | 75 - .../square/go-jose.v1/cipher/concat_kdf_test.go | 150 - .../gopkg.in/square/go-jose.v1/cipher/ecdh_es.go | 62 - .../square/go-jose.v1/cipher/ecdh_es_test.go | 115 - .../gopkg.in/square/go-jose.v1/cipher/key_wrap.go | 109 - .../square/go-jose.v1/cipher/key_wrap_test.go | 133 - .../vendor/gopkg.in/square/go-jose.v1/crypter.go | 416 - .../gopkg.in/square/go-jose.v1/crypter_test.go | 785 - .../vendor/gopkg.in/square/go-jose.v1/doc.go | 26 - .../vendor/gopkg.in/square/go-jose.v1/doc_test.go | 226 - .../vendor/gopkg.in/square/go-jose.v1/encoding.go | 193 - .../gopkg.in/square/go-jose.v1/encoding_test.go | 173 - .../vendor/gopkg.in/square/go-jose.v1/json/LICENSE | 27 - .../gopkg.in/square/go-jose.v1/json/README.md | 13 - .../gopkg.in/square/go-jose.v1/json/bench_test.go | 223 - .../gopkg.in/square/go-jose.v1/json/decode.go | 1183 -- .../gopkg.in/square/go-jose.v1/json/decode_test.go | 1474 -- .../gopkg.in/square/go-jose.v1/json/encode.go | 1197 -- .../gopkg.in/square/go-jose.v1/json/encode_test.go | 538 - .../gopkg.in/square/go-jose.v1/json/indent.go | 141 - .../gopkg.in/square/go-jose.v1/json/number_test.go | 133 - .../gopkg.in/square/go-jose.v1/json/scanner.go | 623 - .../square/go-jose.v1/json/scanner_test.go | 316 - .../gopkg.in/square/go-jose.v1/json/stream.go | 480 - .../gopkg.in/square/go-jose.v1/json/stream_test.go | 354 - .../gopkg.in/square/go-jose.v1/json/tagkey_test.go | 115 - .../vendor/gopkg.in/square/go-jose.v1/json/tags.go | 44 - .../gopkg.in/square/go-jose.v1/json/tags_test.go | 28 - .../square/go-jose.v1/json/testdata/code.json.gz | Bin 120432 -> 0 bytes .../gopkg.in/square/go-jose.v1/json_fork_test.go | 116 - .../vendor/gopkg.in/square/go-jose.v1/jwe.go | 280 - .../vendor/gopkg.in/square/go-jose.v1/jwe_test.go | 537 - .../vendor/gopkg.in/square/go-jose.v1/jwk.go | 457 - .../vendor/gopkg.in/square/go-jose.v1/jwk_test.go | 662 - .../vendor/gopkg.in/square/go-jose.v1/jws.go | 272 - .../vendor/gopkg.in/square/go-jose.v1/jws_test.go | 312 - .../vendor/gopkg.in/square/go-jose.v1/shared.go | 224 - .../vendor/gopkg.in/square/go-jose.v1/signing.go | 258 - .../gopkg.in/square/go-jose.v1/signing_test.go | 451 - .../vendor/gopkg.in/square/go-jose.v1/symmetric.go | 349 - .../gopkg.in/square/go-jose.v1/symmetric_test.go | 131 - .../vendor/gopkg.in/square/go-jose.v1/utils.go | 74 - .../gopkg.in/square/go-jose.v1/utils_test.go | 225 - .../github.com/rsc/letsencrypt/vendor/vendor.json | 49 - vendor/github.com/spf13/afero/.travis.yml | 2 +- vendor/github.com/spf13/cast/cast_test.go | 25 + vendor/github.com/spf13/cast/caste.go | 20 + vendor/github.com/spf13/cobra/.circleci/config.yml | 38 + vendor/github.com/spf13/cobra/README.md | 20 +- vendor/github.com/spf13/cobra/bash_completions.go | 122 +- .../spf13/cobra/bash_completions_test.go | 5 +- vendor/github.com/spf13/cobra/command.go | 25 +- vendor/github.com/spf13/cobra/command_test.go | 63 + vendor/github.com/spf13/pflag/duration_slice.go | 128 + .../github.com/spf13/pflag/duration_slice_test.go | 165 + vendor/github.com/stretchr/objx/.codeclimate.yml | 8 + vendor/github.com/stretchr/objx/.travis.yml | 13 +- vendor/github.com/stretchr/objx/README.md | 2 +- vendor/github.com/stretchr/objx/Taskfile.yml | 1 - vendor/github.com/stretchr/objx/accessors.go | 151 +- vendor/github.com/stretchr/objx/accessors_test.go | 204 +- .../stretchr/objx/codegen/template_test.txt | 169 +- .../stretchr/objx/codegen/types_list.txt | 2 +- vendor/github.com/stretchr/objx/constants.go | 13 - vendor/github.com/stretchr/objx/conversions.go | 9 +- vendor/github.com/stretchr/objx/map.go | 44 +- vendor/github.com/stretchr/objx/map_test.go | 55 + .../stretchr/objx/type_specific_codegen.go | 23 +- .../stretchr/objx/type_specific_codegen_test.go | 3306 ++-- vendor/github.com/stretchr/testify/.travis.yml | 2 +- vendor/github.com/stretchr/testify/Gopkg.lock | 8 +- vendor/github.com/stretchr/testify/Gopkg.toml | 34 +- vendor/github.com/stretchr/testify/README.md | 33 +- .../stretchr/testify/assert/assertion_format.go | 62 +- .../stretchr/testify/assert/assertion_forward.go | 124 +- .../stretchr/testify/assert/assertions.go | 64 +- vendor/github.com/stretchr/testify/mock/mock.go | 15 +- .../github.com/stretchr/testify/require/require.go | 124 +- .../stretchr/testify/require/require_forward.go | 124 +- .../vendor/github.com/davecgh/go-spew/.gitignore | 22 - .../vendor/github.com/davecgh/go-spew/.travis.yml | 14 - .../vendor/github.com/davecgh/go-spew/README.md | 205 - .../github.com/davecgh/go-spew/cov_report.sh | 22 - .../github.com/davecgh/go-spew/spew/common_test.go | 298 - .../github.com/davecgh/go-spew/spew/dump_test.go | 1042 -- .../davecgh/go-spew/spew/dumpcgo_test.go | 99 - .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 - .../davecgh/go-spew/spew/example_test.go | 226 - .../github.com/davecgh/go-spew/spew/format_test.go | 1558 -- .../davecgh/go-spew/spew/internal_test.go | 87 - .../davecgh/go-spew/spew/internalunsafe_test.go | 102 - .../github.com/davecgh/go-spew/spew/spew_test.go | 320 - .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 - .../github.com/davecgh/go-spew/test_coverage.txt | 61 - .../github.com/pmezard/go-difflib/.travis.yml | 5 - .../vendor/github.com/pmezard/go-difflib/README.md | 50 - .../pmezard/go-difflib/difflib/difflib.go | 54 +- .../pmezard/go-difflib/difflib/difflib_test.go | 352 - .../vendor/github.com/stretchr/objx/.gitignore | 22 - .../vendor/github.com/stretchr/objx/LICENSE | 22 + .../vendor/github.com/stretchr/objx/LICENSE.md | 23 - .../vendor/github.com/stretchr/objx/README.md | 3 - .../vendor/github.com/stretchr/objx/accessors.go | 10 +- .../github.com/stretchr/objx/accessors_test.go | 145 - .../stretchr/objx/codegen/array-access.txt | 14 - .../github.com/stretchr/objx/codegen/index.html | 86 - .../github.com/stretchr/objx/codegen/template.txt | 286 - .../stretchr/objx/codegen/types_list.txt | 20 - .../vendor/github.com/stretchr/objx/conversions.go | 19 +- .../github.com/stretchr/objx/conversions_test.go | 94 - .../testify/vendor/github.com/stretchr/objx/doc.go | 136 +- .../github.com/stretchr/objx/fixture_test.go | 98 - .../testify/vendor/github.com/stretchr/objx/map.go | 37 +- .../github.com/stretchr/objx/map_for_test.go | 10 - .../vendor/github.com/stretchr/objx/map_test.go | 147 - .../vendor/github.com/stretchr/objx/mutations.go | 17 +- .../github.com/stretchr/objx/mutations_test.go | 77 - .../vendor/github.com/stretchr/objx/security.go | 5 +- .../github.com/stretchr/objx/security_test.go | 12 - .../stretchr/objx/simple_example_test.go | 41 - .../vendor/github.com/stretchr/objx/tests_test.go | 24 - .../stretchr/objx/type_specific_codegen.go | 460 +- .../stretchr/objx/type_specific_codegen_test.go | 2867 ---- .../vendor/github.com/stretchr/objx/value.go | 43 + .../vendor/github.com/stretchr/objx/value_test.go | 1 - vendor/github.com/xenolf/lego/.gitcookies.enc | Bin 480 -> 0 bytes vendor/github.com/xenolf/lego/.gitignore | 4 - vendor/github.com/xenolf/lego/.travis.yml | 16 - vendor/github.com/xenolf/lego/CHANGELOG.md | 154 - vendor/github.com/xenolf/lego/CONTRIBUTING.md | 32 - vendor/github.com/xenolf/lego/Dockerfile | 15 - vendor/github.com/xenolf/lego/LICENSE | 21 - vendor/github.com/xenolf/lego/README.md | 273 - vendor/github.com/xenolf/lego/account.go | 109 - vendor/github.com/xenolf/lego/acme/challenges.go | 16 - vendor/github.com/xenolf/lego/acme/client.go | 825 - vendor/github.com/xenolf/lego/acme/client_test.go | 269 - vendor/github.com/xenolf/lego/acme/crypto.go | 347 - vendor/github.com/xenolf/lego/acme/crypto_test.go | 93 - .../github.com/xenolf/lego/acme/dns_challenge.go | 309 - .../xenolf/lego/acme/dns_challenge_manual.go | 53 - .../xenolf/lego/acme/dns_challenge_test.go | 200 - vendor/github.com/xenolf/lego/acme/error.go | 94 - vendor/github.com/xenolf/lego/acme/http.go | 160 - .../github.com/xenolf/lego/acme/http_challenge.go | 41 - .../xenolf/lego/acme/http_challenge_server.go | 79 - .../xenolf/lego/acme/http_challenge_test.go | 57 - vendor/github.com/xenolf/lego/acme/http_test.go | 100 - vendor/github.com/xenolf/lego/acme/jws.go | 131 - vendor/github.com/xenolf/lego/acme/messages.go | 115 - .../github.com/xenolf/lego/acme/pop_challenge.go | 1 - vendor/github.com/xenolf/lego/acme/provider.go | 28 - .../xenolf/lego/acme/testdata/resolv.conf.1 | 5 - .../xenolf/lego/acme/tls_sni_challenge.go | 67 - .../xenolf/lego/acme/tls_sni_challenge_server.go | 62 - .../xenolf/lego/acme/tls_sni_challenge_test.go | 65 - vendor/github.com/xenolf/lego/acme/utils.go | 29 - vendor/github.com/xenolf/lego/acme/utils_test.go | 26 - vendor/github.com/xenolf/lego/cli.go | 232 - vendor/github.com/xenolf/lego/cli_handlers.go | 423 - vendor/github.com/xenolf/lego/configuration.go | 76 - vendor/github.com/xenolf/lego/crypto.go | 56 - .../lego/providers/dns/auroradns/auroradns.go | 141 - .../lego/providers/dns/auroradns/auroradns_test.go | 148 - .../xenolf/lego/providers/dns/azure/azure.go | 151 - .../xenolf/lego/providers/dns/azure/azure_test.go | 89 - .../lego/providers/dns/cloudflare/cloudflare.go | 223 - .../providers/dns/cloudflare/cloudflare_test.go | 80 - .../providers/dns/digitalocean/digitalocean.go | 166 - .../dns/digitalocean/digitalocean_test.go | 117 - .../xenolf/lego/providers/dns/dns_providers.go | 86 - .../lego/providers/dns/dns_providers_test.go | 50 - .../xenolf/lego/providers/dns/dnsimple/dnsimple.go | 180 - .../lego/providers/dns/dnsimple/dnsimple_test.go | 140 - .../lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go | 248 - .../providers/dns/dnsmadeeasy/dnsmadeeasy_test.go | 37 - .../xenolf/lego/providers/dns/dnspod/dnspod.go | 146 - .../lego/providers/dns/dnspod/dnspod_test.go | 72 - .../xenolf/lego/providers/dns/dyn/dyn.go | 274 - .../xenolf/lego/providers/dns/dyn/dyn_test.go | 53 - .../xenolf/lego/providers/dns/exoscale/exoscale.go | 128 - .../lego/providers/dns/exoscale/exoscale_test.go | 103 - .../xenolf/lego/providers/dns/gandi/gandi.go | 472 - .../xenolf/lego/providers/dns/gandi/gandi_test.go | 939 -- .../xenolf/lego/providers/dns/godaddy/godaddy.go | 155 - .../lego/providers/dns/godaddy/godaddy_test.go | 60 - .../lego/providers/dns/googlecloud/googlecloud.go | 205 - .../providers/dns/googlecloud/googlecloud_test.go | 99 - .../xenolf/lego/providers/dns/linode/linode.go | 131 - .../lego/providers/dns/linode/linode_test.go | 317 - .../lego/providers/dns/namecheap/namecheap.go | 416 - .../lego/providers/dns/namecheap/namecheap_test.go | 402 - .../xenolf/lego/providers/dns/ns1/ns1.go | 97 - .../xenolf/lego/providers/dns/ns1/ns1_test.go | 67 - .../xenolf/lego/providers/dns/otc/mock.go | 152 - .../xenolf/lego/providers/dns/otc/otc.go | 388 - .../xenolf/lego/providers/dns/otc/otc_test.go | 112 - .../xenolf/lego/providers/dns/ovh/ovh.go | 159 - .../xenolf/lego/providers/dns/ovh/ovh_test.go | 103 - .../xenolf/lego/providers/dns/pdns/README.md | 7 - .../xenolf/lego/providers/dns/pdns/pdns.go | 343 - .../xenolf/lego/providers/dns/pdns/pdns_test.go | 80 - .../lego/providers/dns/rackspace/rackspace.go | 284 - .../lego/providers/dns/rackspace/rackspace_test.go | 220 - .../xenolf/lego/providers/dns/rfc2136/rfc2136.go | 152 - .../lego/providers/dns/rfc2136/rfc2136_test.go | 244 - .../lego/providers/dns/route53/fixtures_test.go | 39 - .../xenolf/lego/providers/dns/route53/route53.go | 185 - .../dns/route53/route53_integration_test.go | 70 - .../lego/providers/dns/route53/route53_test.go | 105 - .../lego/providers/dns/route53/testutil_test.go | 38 - .../xenolf/lego/providers/dns/vultr/vultr.go | 127 - .../xenolf/lego/providers/dns/vultr/vultr_test.go | 65 - .../xenolf/lego/providers/http/memcached/README.md | 15 - .../lego/providers/http/memcached/memcached.go | 60 - .../providers/http/memcached/memcached_test.go | 111 - .../xenolf/lego/providers/http/webroot/webroot.go | 58 - .../lego/providers/http/webroot/webroot_test.go | 46 - 479 files changed, 10210 insertions(+), 101648 deletions(-) create mode 100644 vendor/github.com/go-ini/ini/testdata/TestFile_WriteTo.golden create mode 100644 vendor/github.com/gorilla/schema/.travis.yml create mode 100644 vendor/github.com/gorilla/schema/LICENSE create mode 100644 vendor/github.com/gorilla/schema/README.md create mode 100644 vendor/github.com/gorilla/schema/cache.go create mode 100644 vendor/github.com/gorilla/schema/converter.go create mode 100644 vendor/github.com/gorilla/schema/decoder.go create mode 100644 vendor/github.com/gorilla/schema/decoder_test.go create mode 100644 vendor/github.com/gorilla/schema/doc.go create mode 100644 vendor/github.com/gorilla/schema/encoder.go create mode 100644 vendor/github.com/gorilla/schema/encoder_test.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go create mode 100644 vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go create mode 100644 vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go create mode 100644 vendor/github.com/olivere/elastic/search_queries_terms_set.go create mode 100644 vendor/github.com/olivere/elastic/search_queries_terms_set_test.go create mode 100644 vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go delete mode 100644 vendor/github.com/rsc/letsencrypt/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/README delete mode 100644 vendor/github.com/rsc/letsencrypt/lets.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.gitignore delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.travis.yml delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/AUTHORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/CONTRIBUTORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/COPYRIGHT delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/README.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/compress_generate.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dane.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/defaults.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_bench_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/doc.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dyn_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/example_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/format.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/fuzz_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/generate.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/issue_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_helpers.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/parse_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/rawmsg.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/remote_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/reverse.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan_rr.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scanner.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/smimea.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_linux.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_other.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zcompress.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zmsg.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitcookies.enc delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitignore delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.travis.yml delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CHANGELOG.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CONTRIBUTING.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/Dockerfile delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/README.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/pop_challenge.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitattributes delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitignore delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTING.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/README delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/codereview.cfg delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitattributes delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitignore delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTING.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/README delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/codereview.cfg delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/withtimeout_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/gen.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/list_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTING.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTORS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/PATENTS delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/README delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go16.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go17.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitignore delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.travis.yml delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/indent.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/number_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/shared.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils_test.go delete mode 100644 vendor/github.com/rsc/letsencrypt/vendor/vendor.json create mode 100644 vendor/github.com/spf13/cobra/.circleci/config.yml create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 vendor/github.com/spf13/pflag/duration_slice_test.go delete mode 100644 vendor/github.com/stretchr/objx/constants.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.gitignore delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.travis.yml delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/README.md delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/cov_report.sh delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/example_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internal_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/test_coverage.txt delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/.travis.yml delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/README.md delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore create mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go delete mode 100644 vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go delete mode 100644 vendor/github.com/xenolf/lego/.gitcookies.enc delete mode 100644 vendor/github.com/xenolf/lego/.gitignore delete mode 100644 vendor/github.com/xenolf/lego/.travis.yml delete mode 100644 vendor/github.com/xenolf/lego/CHANGELOG.md delete mode 100644 vendor/github.com/xenolf/lego/CONTRIBUTING.md delete mode 100644 vendor/github.com/xenolf/lego/Dockerfile delete mode 100644 vendor/github.com/xenolf/lego/LICENSE delete mode 100644 vendor/github.com/xenolf/lego/README.md delete mode 100644 vendor/github.com/xenolf/lego/account.go delete mode 100644 vendor/github.com/xenolf/lego/acme/challenges.go delete mode 100644 vendor/github.com/xenolf/lego/acme/client.go delete mode 100644 vendor/github.com/xenolf/lego/acme/client_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/crypto.go delete mode 100644 vendor/github.com/xenolf/lego/acme/crypto_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go delete mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/error.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge_server.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/jws.go delete mode 100644 vendor/github.com/xenolf/lego/acme/messages.go delete mode 100644 vendor/github.com/xenolf/lego/acme/pop_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/provider.go delete mode 100644 vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 delete mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go delete mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go delete mode 100644 vendor/github.com/xenolf/lego/acme/utils.go delete mode 100644 vendor/github.com/xenolf/lego/acme/utils_test.go delete mode 100644 vendor/github.com/xenolf/lego/cli.go delete mode 100644 vendor/github.com/xenolf/lego/cli_handlers.go delete mode 100644 vendor/github.com/xenolf/lego/configuration.go delete mode 100644 vendor/github.com/xenolf/lego/crypto.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/azure/azure.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/azure/azure_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dns_providers.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dns_providers_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/linode/linode.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/ns1/ns1.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/ns1/ns1_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/otc/mock.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/otc/otc.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/otc/otc_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/pdns/README.md delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/route53.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go delete mode 100644 vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/http/memcached/README.md delete mode 100644 vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go delete mode 100644 vendor/github.com/xenolf/lego/providers/http/memcached/memcached_test.go delete mode 100644 vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go delete mode 100644 vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go (limited to 'vendor/github.com') diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 7f166c3a3..792994785 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = ptrSize - offsetScalar = uintptr(0) - offsetFlag = ptrSize * 2 - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = flagKindWidth - 1 - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5..205c28d68 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go index 5aad9c7af..4a31a2ee3 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -768,7 +768,7 @@ func addUintptrDumpTests() { func addUnsafePointerDumpTests() { // Null pointer. - v := unsafe.Pointer(uintptr(0)) + v := unsafe.Pointer(nil) nv := (*unsafe.Pointer)(nil) pv := &v vAddr := fmt.Sprintf("%p", pv) diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go index 0719eb916..87ee9651e 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -1083,7 +1083,7 @@ func addUintptrFormatterTests() { func addUnsafePointerFormatterTests() { // Null pointer. - v := unsafe.Pointer(uintptr(0)) + v := unsafe.Pointer(nil) nv := (*unsafe.Pointer)(nil) pv := &v vAddr := fmt.Sprintf("%p", pv) diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go index a0c612ec3..80dc22177 100644 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -16,7 +16,7 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// +build !js,!appengine,!safe,!disableunsafe,go1.4 /* This test file is part of the spew package rather than than the spew_test @@ -30,7 +30,6 @@ import ( "bytes" "reflect" "testing" - "unsafe" ) // changeKind uses unsafe to intentionally change the kind of a reflect.Value to @@ -38,13 +37,13 @@ import ( // fallback code which punts to the standard fmt library for new types that // might get added to the language. func changeKind(v *reflect.Value, readOnly bool) { - rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) - *rvf = *rvf | ((1<\n", cmd) + err := old(cmd) + fmt.Printf("finished processing: <%s>\n", cmd) + return err + } }) - for { - ring.Ping() - } + cl.Ping() + // Output: starting processing: + // finished processing: } -func wrapRedisProcess(client *redis.Client) { - const precision = time.Microsecond - var count, avgDur uint32 - - go func() { - for range time.Tick(3 * time.Second) { - n := atomic.LoadUint32(&count) - dur := time.Duration(atomic.LoadUint32(&avgDur)) * precision - fmt.Printf("%s: processed=%d avg_dur=%s\n", client, n, dur) - } - }() - - client.WrapProcess(func(oldProcess func(redis.Cmder) error) func(redis.Cmder) error { - return func(cmd redis.Cmder) error { - start := time.Now() - err := oldProcess(cmd) - dur := time.Since(start) - - const decay = float64(1) / 100 - ms := float64(dur / precision) - for { - avg := atomic.LoadUint32(&avgDur) - newAvg := uint32((1-decay)*float64(avg) + decay*ms) - if atomic.CompareAndSwapUint32(&avgDur, avg, newAvg) { - break - } - } - atomic.AddUint32(&count, 1) +func Example_Pipeline_instrumentation() { + client := redis.NewClient(&redis.Options{ + Addr: ":6379", + }) + client.WrapProcessPipeline(func(old func([]redis.Cmder) error) func([]redis.Cmder) error { + return func(cmds []redis.Cmder) error { + fmt.Printf("pipeline starting processing: %v\n", cmds) + err := old(cmds) + fmt.Printf("pipeline finished processing: %v\n", cmds) return err } }) + + client.Pipelined(func(pipe redis.Pipeliner) error { + pipe.Ping() + pipe.Ping() + return nil + }) + // Output: pipeline starting processing: [ping: ping: ] + // pipeline finished processing: [ping: PONG ping: PONG] } diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader.go b/vendor/github.com/go-redis/redis/internal/proto/reader.go index cd94329d8..e5ae8a03e 100644 --- a/vendor/github.com/go-redis/redis/internal/proto/reader.go +++ b/vendor/github.com/go-redis/redis/internal/proto/reader.go @@ -37,25 +37,25 @@ func (r *Reader) Reset(rd io.Reader) { r.src.Reset(rd) } -func (p *Reader) PeekBuffered() []byte { - if n := p.src.Buffered(); n != 0 { - b, _ := p.src.Peek(n) +func (r *Reader) PeekBuffered() []byte { + if n := r.src.Buffered(); n != 0 { + b, _ := r.src.Peek(n) return b } return nil } -func (p *Reader) ReadN(n int) ([]byte, error) { - b, err := readN(p.src, p.buf, n) +func (r *Reader) ReadN(n int) ([]byte, error) { + b, err := readN(r.src, r.buf, n) if err != nil { return nil, err } - p.buf = b + r.buf = b return b, nil } -func (p *Reader) ReadLine() ([]byte, error) { - line, isPrefix, err := p.src.ReadLine() +func (r *Reader) ReadLine() ([]byte, error) { + line, isPrefix, err := r.src.ReadLine() if err != nil { return nil, err } @@ -71,8 +71,8 @@ func (p *Reader) ReadLine() ([]byte, error) { return line, nil } -func (p *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { - line, err := p.ReadLine() +func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { + line, err := r.ReadLine() if err != nil { return nil, err } @@ -85,19 +85,19 @@ func (p *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { case IntReply: return parseInt(line[1:], 10, 64) case StringReply: - return p.readTmpBytesValue(line) + return r.readTmpBytesValue(line) case ArrayReply: n, err := parseArrayLen(line) if err != nil { return nil, err } - return m(p, n) + return m(r, n) } return nil, fmt.Errorf("redis: can't parse %.100q", line) } -func (p *Reader) ReadIntReply() (int64, error) { - line, err := p.ReadLine() +func (r *Reader) ReadIntReply() (int64, error) { + line, err := r.ReadLine() if err != nil { return 0, err } @@ -111,8 +111,8 @@ func (p *Reader) ReadIntReply() (int64, error) { } } -func (p *Reader) ReadTmpBytesReply() ([]byte, error) { - line, err := p.ReadLine() +func (r *Reader) ReadTmpBytesReply() ([]byte, error) { + line, err := r.ReadLine() if err != nil { return nil, err } @@ -120,7 +120,7 @@ func (p *Reader) ReadTmpBytesReply() ([]byte, error) { case ErrorReply: return nil, ParseErrorReply(line) case StringReply: - return p.readTmpBytesValue(line) + return r.readTmpBytesValue(line) case StatusReply: return parseStatusValue(line), nil default: @@ -138,24 +138,24 @@ func (r *Reader) ReadBytesReply() ([]byte, error) { return cp, nil } -func (p *Reader) ReadStringReply() (string, error) { - b, err := p.ReadTmpBytesReply() +func (r *Reader) ReadStringReply() (string, error) { + b, err := r.ReadTmpBytesReply() if err != nil { return "", err } return string(b), nil } -func (p *Reader) ReadFloatReply() (float64, error) { - b, err := p.ReadTmpBytesReply() +func (r *Reader) ReadFloatReply() (float64, error) { + b, err := r.ReadTmpBytesReply() if err != nil { return 0, err } return parseFloat(b, 64) } -func (p *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { - line, err := p.ReadLine() +func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { + line, err := r.ReadLine() if err != nil { return nil, err } @@ -167,14 +167,14 @@ func (p *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { if err != nil { return nil, err } - return m(p, n) + return m(r, n) default: return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) } } -func (p *Reader) ReadArrayLen() (int64, error) { - line, err := p.ReadLine() +func (r *Reader) ReadArrayLen() (int64, error) { + line, err := r.ReadLine() if err != nil { return 0, err } @@ -188,8 +188,8 @@ func (p *Reader) ReadArrayLen() (int64, error) { } } -func (p *Reader) ReadScanReply() ([]string, uint64, error) { - n, err := p.ReadArrayLen() +func (r *Reader) ReadScanReply() ([]string, uint64, error) { + n, err := r.ReadArrayLen() if err != nil { return nil, 0, err } @@ -197,19 +197,19 @@ func (p *Reader) ReadScanReply() ([]string, uint64, error) { return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) } - cursor, err := p.ReadUint() + cursor, err := r.ReadUint() if err != nil { return nil, 0, err } - n, err = p.ReadArrayLen() + n, err = r.ReadArrayLen() if err != nil { return nil, 0, err } keys := make([]string, n) for i := int64(0); i < n; i++ { - key, err := p.ReadStringReply() + key, err := r.ReadStringReply() if err != nil { return nil, 0, err } @@ -219,7 +219,7 @@ func (p *Reader) ReadScanReply() ([]string, uint64, error) { return keys, cursor, err } -func (p *Reader) readTmpBytesValue(line []byte) ([]byte, error) { +func (r *Reader) readTmpBytesValue(line []byte) ([]byte, error) { if isNilReply(line) { return nil, internal.Nil } @@ -229,7 +229,7 @@ func (p *Reader) readTmpBytesValue(line []byte) ([]byte, error) { return nil, err } - b, err := p.ReadN(replyLen + 2) + b, err := r.ReadN(replyLen + 2) if err != nil { return nil, err } diff --git a/vendor/github.com/go-redis/redis/pubsub.go b/vendor/github.com/go-redis/redis/pubsub.go index 01f8a61aa..3ee4ea9d0 100644 --- a/vendor/github.com/go-redis/redis/pubsub.go +++ b/vendor/github.com/go-redis/redis/pubsub.go @@ -127,7 +127,7 @@ func (c *PubSub) Close() error { return nil } -// Subscribes the client to the specified channels. It returns +// Subscribe the client to the specified channels. It returns // empty subscription if there are no channels. func (c *PubSub) Subscribe(channels ...string) error { c.mu.Lock() @@ -137,7 +137,7 @@ func (c *PubSub) Subscribe(channels ...string) error { return err } -// Subscribes the client to the given patterns. It returns +// PSubscribe the client to the given patterns. It returns // empty subscription if there are no patterns. func (c *PubSub) PSubscribe(patterns ...string) error { c.mu.Lock() @@ -147,7 +147,7 @@ func (c *PubSub) PSubscribe(patterns ...string) error { return err } -// Unsubscribes the client from the given channels, or from all of +// Unsubscribe the client from the given channels, or from all of // them if none is given. func (c *PubSub) Unsubscribe(channels ...string) error { c.mu.Lock() @@ -157,7 +157,7 @@ func (c *PubSub) Unsubscribe(channels ...string) error { return err } -// Unsubscribes the client from the given patterns, or from all of +// PUnsubscribe the client from the given patterns, or from all of // them if none is given. func (c *PubSub) PUnsubscribe(patterns ...string) error { c.mu.Lock() @@ -196,7 +196,7 @@ func (c *PubSub) Ping(payload ...string) error { return err } -// Message received after a successful subscription to channel. +// Subscription received after a successful subscription to channel. type Subscription struct { // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". Kind string diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go index 37ffafd97..cf402986d 100644 --- a/vendor/github.com/go-redis/redis/redis.go +++ b/vendor/github.com/go-redis/redis/redis.go @@ -11,7 +11,7 @@ import ( "github.com/go-redis/redis/internal/proto" ) -// Redis nil reply returned when key does not exist. +// Nil reply redis returned when key does not exist. const Nil = internal.Nil func init() { @@ -22,6 +22,12 @@ func SetLogger(logger *log.Logger) { internal.Logger = logger } +func (c *baseClient) init() { + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline +} + func (c *baseClient) String() string { return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) } @@ -85,7 +91,8 @@ func (c *baseClient) initConn(cn *pool.Conn) error { connPool: pool.NewSingleConnPool(cn), }, } - conn.setProcessor(conn.Process) + conn.baseClient.init() + conn.statefulCmdable.setProcessor(conn.Process) _, err := conn.Pipelined(func(pipe Pipeliner) error { if c.opt.Password != "" { @@ -117,14 +124,11 @@ func (c *baseClient) initConn(cn *pool.Conn) error { // an input and returns the new wrapper process func. createWrapper should // use call the old process func within the new process func. func (c *baseClient) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) { - c.process = fn(c.defaultProcess) + c.process = fn(c.process) } func (c *baseClient) Process(cmd Cmder) error { - if c.process != nil { - return c.process(cmd) - } - return c.defaultProcess(cmd) + return c.process(cmd) } func (c *baseClient) defaultProcess(cmd Cmder) error { @@ -172,9 +176,9 @@ func (c *baseClient) retryBackoff(attempt int) time.Duration { func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { if timeout := cmd.readTimeout(); timeout != nil { return *timeout - } else { - return c.opt.ReadTimeout } + + return c.opt.ReadTimeout } // Close closes the client, releasing any open resources. @@ -198,35 +202,48 @@ func (c *baseClient) getAddr() string { return c.opt.Addr } +func (c *baseClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) + c.processTxPipeline = fn(c.processTxPipeline) +} + +func (c *baseClient) defaultProcessPipeline(cmds []Cmder) error { + return c.generalProcessPipeline(cmds, c.pipelineProcessCmds) +} + +func (c *baseClient) defaultProcessTxPipeline(cmds []Cmder) error { + return c.generalProcessPipeline(cmds, c.txPipelineProcessCmds) +} + type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error) -func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer { - return func(cmds []Cmder) error { - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } +func (c *baseClient) generalProcessPipeline(cmds []Cmder, p pipelineProcessor) error { + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } - cn, _, err := c.getConn() - if err != nil { - setCmdsErr(cmds, err) - return err - } + cn, _, err := c.getConn() + if err != nil { + setCmdsErr(cmds, err) + return err + } - canRetry, err := p(cn, cmds) + canRetry, err := p(cn, cmds) - if err == nil || internal.IsRedisError(err) { - _ = c.connPool.Put(cn) - break - } - _ = c.connPool.Remove(cn) + if err == nil || internal.IsRedisError(err) { + _ = c.connPool.Put(cn) + break + } + _ = c.connPool.Remove(cn) - if !canRetry || !internal.IsRetryableError(err, true) { - break - } + if !canRetry || !internal.IsRetryableError(err, true) { + break } - return firstCmdsErr(cmds) } + return firstCmdsErr(cmds) } func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) { @@ -324,14 +341,15 @@ type Client struct { } func newClient(opt *Options, pool pool.Pooler) *Client { - client := Client{ + c := Client{ baseClient: baseClient{ opt: opt, connPool: pool, }, } - client.setProcessor(client.Process) - return &client + c.baseClient.init() + c.cmdable.setProcessor(c.Process) + return &c } // NewClient returns a client to the Redis Server specified by Options. @@ -343,7 +361,7 @@ func NewClient(opt *Options) *Client { func (c *Client) copy() *Client { c2 := new(Client) *c2 = *c - c2.setProcessor(c2.Process) + c2.cmdable.setProcessor(c2.Process) return c2 } @@ -366,9 +384,9 @@ func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { func (c *Client) Pipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExecer(c.pipelineProcessCmds), + exec: c.processPipeline, } - pipe.setProcessor(pipe.Process) + pipe.statefulCmdable.setProcessor(pipe.Process) return &pipe } @@ -379,9 +397,9 @@ func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *Client) TxPipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), + exec: c.processTxPipeline, } - pipe.setProcessor(pipe.Process) + pipe.statefulCmdable.setProcessor(pipe.Process) return &pipe } @@ -430,9 +448,9 @@ func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { func (c *Conn) Pipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExecer(c.pipelineProcessCmds), + exec: c.processPipeline, } - pipe.setProcessor(pipe.Process) + pipe.statefulCmdable.setProcessor(pipe.Process) return &pipe } @@ -443,8 +461,8 @@ func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *Conn) TxPipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), + exec: c.processTxPipeline, } - pipe.setProcessor(pipe.Process) + pipe.statefulCmdable.setProcessor(pipe.Process) return &pipe } diff --git a/vendor/github.com/go-redis/redis/redis_context.go b/vendor/github.com/go-redis/redis/redis_context.go index 6ec811ca5..c00e505f6 100644 --- a/vendor/github.com/go-redis/redis/redis_context.go +++ b/vendor/github.com/go-redis/redis/redis_context.go @@ -12,7 +12,10 @@ type baseClient struct { connPool pool.Pooler opt *Options - process func(Cmder) error + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error + onClose func() error // hook called when client is closed ctx context.Context diff --git a/vendor/github.com/go-redis/redis/redis_no_context.go b/vendor/github.com/go-redis/redis/redis_no_context.go index 0752192f1..8555c5c09 100644 --- a/vendor/github.com/go-redis/redis/redis_no_context.go +++ b/vendor/github.com/go-redis/redis/redis_no_context.go @@ -10,6 +10,9 @@ type baseClient struct { connPool pool.Pooler opt *Options - process func(Cmder) error + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error + onClose func() error // hook called when client is closed } diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go index c11ef6bc2..10f33ed00 100644 --- a/vendor/github.com/go-redis/redis/ring.go +++ b/vendor/github.com/go-redis/redis/ring.go @@ -150,6 +150,8 @@ type Ring struct { shards map[string]*ringShard shardsList []*ringShard + processPipeline func([]Cmder) error + cmdsInfoOnce internal.Once cmdsInfo map[string]*CommandInfo @@ -158,7 +160,9 @@ type Ring struct { func NewRing(opt *RingOptions) *Ring { const nreplicas = 100 + opt.init() + ring := &Ring{ opt: opt, nreplicas: nreplicas, @@ -166,13 +170,17 @@ func NewRing(opt *RingOptions) *Ring { hash: consistenthash.New(nreplicas, nil), shards: make(map[string]*ringShard), } - ring.setProcessor(ring.Process) + ring.processPipeline = ring.defaultProcessPipeline + ring.cmdable.setProcessor(ring.Process) + for name, addr := range opt.Addrs { clopt := opt.clientOptions() clopt.Addr = addr ring.addShard(name, NewClient(clopt)) } + go ring.heartbeat() + return ring } @@ -354,6 +362,13 @@ func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { return c.shardByKey(firstKey) } +func (c *Ring) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) { + c.ForEachShard(func(c *Client) error { + c.WrapProcess(fn) + return nil + }) +} + func (c *Ring) Process(cmd Cmder) error { shard, err := c.cmdShard(cmd) if err != nil { @@ -436,9 +451,9 @@ func (c *Ring) Close() error { func (c *Ring) Pipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExec, + exec: c.processPipeline, } - pipe.setProcessor(pipe.Process) + pipe.cmdable.setProcessor(pipe.Process) return &pipe } @@ -446,7 +461,13 @@ func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(fn) } -func (c *Ring) pipelineExec(cmds []Cmder) error { +func (c *Ring) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) +} + +func (c *Ring) defaultProcessPipeline(cmds []Cmder) error { cmdsMap := make(map[string][]Cmder) for _, cmd := range cmds { cmdInfo := c.cmdInfo(cmd.Name()) diff --git a/vendor/github.com/go-redis/redis/sentinel.go b/vendor/github.com/go-redis/redis/sentinel.go index 37d06b482..3f56f08b3 100644 --- a/vendor/github.com/go-redis/redis/sentinel.go +++ b/vendor/github.com/go-redis/redis/sentinel.go @@ -76,7 +76,7 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { opt: opt, } - client := Client{ + c := Client{ baseClient: baseClient{ opt: opt, connPool: failover.Pool(), @@ -86,9 +86,10 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { }, }, } - client.setProcessor(client.Process) + c.baseClient.init() + c.setProcessor(c.Process) - return &client + return &c } //------------------------------------------------------------------------------ @@ -100,14 +101,15 @@ type sentinelClient struct { func newSentinel(opt *Options) *sentinelClient { opt.init() - client := sentinelClient{ + c := sentinelClient{ baseClient: baseClient{ opt: opt, connPool: newConnPool(opt), }, } - client.cmdable = cmdable{client.Process} - return &client + c.baseClient.init() + c.cmdable.setProcessor(c.Process) + return &c } func (c *sentinelClient) PubSub() *PubSub { diff --git a/vendor/github.com/go-redis/redis/tx.go b/vendor/github.com/go-redis/redis/tx.go index 11d5d5cb0..26c29bef5 100644 --- a/vendor/github.com/go-redis/redis/tx.go +++ b/vendor/github.com/go-redis/redis/tx.go @@ -5,7 +5,7 @@ import ( "github.com/go-redis/redis/internal/pool" ) -// Redis transaction failed. +// TxFailedErr transaction redis failed. const TxFailedErr = internal.RedisError("redis: transaction failed") // Tx implements Redis transactions as described in @@ -24,7 +24,8 @@ func (c *Client) newTx() *Tx { connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true), }, } - tx.setProcessor(tx.Process) + tx.baseClient.init() + tx.statefulCmdable.setProcessor(tx.Process) return &tx } @@ -42,7 +43,7 @@ func (c *Client) Watch(fn func(*Tx) error, keys ...string) error { return err } -// close closes the transaction, releasing any open resources. +// Close closes the transaction, releasing any open resources. func (c *Tx) Close() error { _ = c.Unwatch().Err() return c.baseClient.Close() @@ -75,9 +76,9 @@ func (c *Tx) Unwatch(keys ...string) *StatusCmd { func (c *Tx) Pipeline() Pipeliner { pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), + exec: c.processTxPipeline, } - pipe.setProcessor(pipe.Process) + pipe.statefulCmdable.setProcessor(pipe.Process) return &pipe } diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go index b6d9114c5..a25503088 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions_test.go +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -478,7 +478,7 @@ func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { t.Fatalf("[%s] Invalid extension", test.name) } if !reflect.DeepEqual(*ext, want) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, &want) } } } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index 60d524645..569451f80 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -2029,7 +2029,11 @@ func (g *Generator) generateMessage(message *Descriptor) { // TODO: Revisit this and consider reverting back to anonymous interfaces. for oi := range message.OneofDecl { dname := oneofDisc[int32(oi)] - g.P("type ", dname, " interface { ", dname, "() }") + g.P("type ", dname, " interface {") + g.In() + g.P(dname, "()") + g.Out() + g.P("}") } g.P() for _, field := range message.Field { diff --git a/vendor/github.com/gorilla/schema/.travis.yml b/vendor/github.com/gorilla/schema/.travis.yml new file mode 100644 index 000000000..5f51dce4e --- /dev/null +++ b/vendor/github.com/gorilla/schema/.travis.yml @@ -0,0 +1,18 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: 1.8 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/schema/LICENSE b/vendor/github.com/gorilla/schema/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/gorilla/schema/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/schema/README.md b/vendor/github.com/gorilla/schema/README.md new file mode 100644 index 000000000..2c3ecd8e3 --- /dev/null +++ b/vendor/github.com/gorilla/schema/README.md @@ -0,0 +1,90 @@ +schema +====== +[![GoDoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema) [![Build Status](https://travis-ci.org/gorilla/schema.png?branch=master)](https://travis-ci.org/gorilla/schema) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge) + + +Package gorilla/schema converts structs to and from form values. + +## Example + +Here's a quick example: we parse POST form values and then decode them into a struct: + +```go +// Set a Decoder instance as a package global, because it caches +// meta-data about structs, and an instance can be shared safely. +var decoder = schema.NewDecoder() + +type Person struct { + Name string + Phone string +} + +func MyHandler(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + // Handle error + } + + var person Person + + // r.PostForm is a map of our POST form values + err := decoder.Decode(&person, r.PostForm) + if err != nil { + // Handle error + } + + // Do something with person.Name or person.Phone +} +``` + +Conversely, contents of a struct can be encoded into form values. Here's a variant of the previous example using the Encoder: + +```go +var encoder = schema.NewEncoder() + +func MyHttpRequest() { + person := Person{"Jane Doe", "555-5555"} + form := url.Values{} + + err := encoder.Encode(person, form) + + if err != nil { + // Handle error + } + + // Use form values, for example, with an http client + client := new(http.Client) + res, err := client.PostForm("http://my-api.test", form) +} + +``` + +To define custom names for fields, use a struct tag "schema". To not populate certain fields, use a dash for the name and it will be ignored: + +```go +type Person struct { + Name string `schema:"name"` // custom name + Phone string `schema:"phone"` // custom name + Admin bool `schema:"-"` // this field is never set +} +``` + +The supported field types in the struct are: + +* bool +* float variants (float32, float64) +* int variants (int, int8, int16, int32, int64) +* string +* uint variants (uint, uint8, uint16, uint32, uint64) +* struct +* a pointer to one of the above types +* a slice or a pointer to a slice of one of the above types + +Unsupported types are simply ignored, however custom types can be registered to be converted. + +More examples are available on the Gorilla website: http://www.gorillatoolkit.org/pkg/schema + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/schema/cache.go b/vendor/github.com/gorilla/schema/cache.go new file mode 100644 index 000000000..afa20a3a3 --- /dev/null +++ b/vendor/github.com/gorilla/schema/cache.go @@ -0,0 +1,264 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "errors" + "reflect" + "strconv" + "strings" + "sync" +) + +var invalidPath = errors.New("schema: invalid path") + +// newCache returns a new cache. +func newCache() *cache { + c := cache{ + m: make(map[reflect.Type]*structInfo), + regconv: make(map[reflect.Type]Converter), + tag: "schema", + } + return &c +} + +// cache caches meta-data about a struct. +type cache struct { + l sync.RWMutex + m map[reflect.Type]*structInfo + regconv map[reflect.Type]Converter + tag string +} + +// registerConverter registers a converter function for a custom type. +func (c *cache) registerConverter(value interface{}, converterFunc Converter) { + c.regconv[reflect.TypeOf(value)] = converterFunc +} + +// parsePath parses a path in dotted notation verifying that it is a valid +// path to a struct field. +// +// It returns "path parts" which contain indices to fields to be used by +// reflect.Value.FieldByString(). Multiple parts are required for slices of +// structs. +func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) { + var struc *structInfo + var field *fieldInfo + var index64 int64 + var err error + parts := make([]pathPart, 0) + path := make([]string, 0) + keys := strings.Split(p, ".") + for i := 0; i < len(keys); i++ { + if t.Kind() != reflect.Struct { + return nil, invalidPath + } + if struc = c.get(t); struc == nil { + return nil, invalidPath + } + if field = struc.get(keys[i]); field == nil { + return nil, invalidPath + } + // Valid field. Append index. + path = append(path, field.name) + if field.ss { + // Parse a special case: slices of structs. + // i+1 must be the slice index. + // + // Now that struct can implements TextUnmarshaler interface, + // we don't need to force the struct's fields to appear in the path. + // So checking i+2 is not necessary anymore. + i++ + if i+1 > len(keys) { + return nil, invalidPath + } + if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil { + return nil, invalidPath + } + parts = append(parts, pathPart{ + path: path, + field: field, + index: int(index64), + }) + path = make([]string, 0) + + // Get the next struct type, dropping ptrs. + if field.typ.Kind() == reflect.Ptr { + t = field.typ.Elem() + } else { + t = field.typ + } + if t.Kind() == reflect.Slice { + t = t.Elem() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + } + } else if field.typ.Kind() == reflect.Ptr { + t = field.typ.Elem() + } else { + t = field.typ + } + } + // Add the remaining. + parts = append(parts, pathPart{ + path: path, + field: field, + index: -1, + }) + return parts, nil +} + +// get returns a cached structInfo, creating it if necessary. +func (c *cache) get(t reflect.Type) *structInfo { + c.l.RLock() + info := c.m[t] + c.l.RUnlock() + if info == nil { + info = c.create(t, nil) + c.l.Lock() + c.m[t] = info + c.l.Unlock() + } + return info +} + +// create creates a structInfo with meta-data about a struct. +func (c *cache) create(t reflect.Type, info *structInfo) *structInfo { + if info == nil { + info = &structInfo{fields: []*fieldInfo{}} + } + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.Anonymous { + ft := field.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + bef := len(info.fields) + c.create(ft, info) + for _, fi := range info.fields[bef:len(info.fields)] { + // exclude required check because duplicated to embedded field + fi.required = false + } + } + } + c.createField(field, info) + } + return info +} + +// createField creates a fieldInfo for the given field. +func (c *cache) createField(field reflect.StructField, info *structInfo) { + alias, options := fieldAlias(field, c.tag) + if alias == "-" { + // Ignore this field. + return + } + // Check if the type is supported and don't cache it if not. + // First let's get the basic type. + isSlice, isStruct := false, false + ft := field.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if isSlice = ft.Kind() == reflect.Slice; isSlice { + ft = ft.Elem() + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + if ft.Kind() == reflect.Array { + ft = ft.Elem() + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + if isStruct = ft.Kind() == reflect.Struct; !isStruct { + if c.converter(ft) == nil && builtinConverters[ft.Kind()] == nil { + // Type is not supported. + return + } + } + + info.fields = append(info.fields, &fieldInfo{ + typ: field.Type, + name: field.Name, + ss: isSlice && isStruct, + alias: alias, + anon: field.Anonymous, + required: options.Contains("required"), + }) +} + +// converter returns the converter for a type. +func (c *cache) converter(t reflect.Type) Converter { + return c.regconv[t] +} + +// ---------------------------------------------------------------------------- + +type structInfo struct { + fields []*fieldInfo +} + +func (i *structInfo) get(alias string) *fieldInfo { + for _, field := range i.fields { + if strings.EqualFold(field.alias, alias) { + return field + } + } + return nil +} + +type fieldInfo struct { + typ reflect.Type + name string // field name in the struct. + ss bool // true if this is a slice of structs. + alias string + anon bool // is an embedded field + required bool // tag option +} + +type pathPart struct { + field *fieldInfo + path []string // path to the field: walks structs using field names. + index int // struct index in slices of structs. +} + +// ---------------------------------------------------------------------------- + +// fieldAlias parses a field tag to get a field alias. +func fieldAlias(field reflect.StructField, tagName string) (alias string, options tagOptions) { + if tag := field.Tag.Get(tagName); tag != "" { + alias, options = parseTag(tag) + } + if alias == "" { + alias = field.Name + } + return alias, options +} + +// tagOptions is the string following a comma in a struct field's tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/gorilla/schema/converter.go b/vendor/github.com/gorilla/schema/converter.go new file mode 100644 index 000000000..4f2116a15 --- /dev/null +++ b/vendor/github.com/gorilla/schema/converter.go @@ -0,0 +1,145 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "reflect" + "strconv" +) + +type Converter func(string) reflect.Value + +var ( + invalidValue = reflect.Value{} + boolType = reflect.Bool + float32Type = reflect.Float32 + float64Type = reflect.Float64 + intType = reflect.Int + int8Type = reflect.Int8 + int16Type = reflect.Int16 + int32Type = reflect.Int32 + int64Type = reflect.Int64 + stringType = reflect.String + uintType = reflect.Uint + uint8Type = reflect.Uint8 + uint16Type = reflect.Uint16 + uint32Type = reflect.Uint32 + uint64Type = reflect.Uint64 +) + +// Default converters for basic types. +var builtinConverters = map[reflect.Kind]Converter{ + boolType: convertBool, + float32Type: convertFloat32, + float64Type: convertFloat64, + intType: convertInt, + int8Type: convertInt8, + int16Type: convertInt16, + int32Type: convertInt32, + int64Type: convertInt64, + stringType: convertString, + uintType: convertUint, + uint8Type: convertUint8, + uint16Type: convertUint16, + uint32Type: convertUint32, + uint64Type: convertUint64, +} + +func convertBool(value string) reflect.Value { + if value == "on" { + return reflect.ValueOf(true) + } else if v, err := strconv.ParseBool(value); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertFloat32(value string) reflect.Value { + if v, err := strconv.ParseFloat(value, 32); err == nil { + return reflect.ValueOf(float32(v)) + } + return invalidValue +} + +func convertFloat64(value string) reflect.Value { + if v, err := strconv.ParseFloat(value, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertInt(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 0); err == nil { + return reflect.ValueOf(int(v)) + } + return invalidValue +} + +func convertInt8(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 8); err == nil { + return reflect.ValueOf(int8(v)) + } + return invalidValue +} + +func convertInt16(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 16); err == nil { + return reflect.ValueOf(int16(v)) + } + return invalidValue +} + +func convertInt32(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 32); err == nil { + return reflect.ValueOf(int32(v)) + } + return invalidValue +} + +func convertInt64(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertString(value string) reflect.Value { + return reflect.ValueOf(value) +} + +func convertUint(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 0); err == nil { + return reflect.ValueOf(uint(v)) + } + return invalidValue +} + +func convertUint8(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 8); err == nil { + return reflect.ValueOf(uint8(v)) + } + return invalidValue +} + +func convertUint16(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 16); err == nil { + return reflect.ValueOf(uint16(v)) + } + return invalidValue +} + +func convertUint32(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 32); err == nil { + return reflect.ValueOf(uint32(v)) + } + return invalidValue +} + +func convertUint64(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} diff --git a/vendor/github.com/gorilla/schema/decoder.go b/vendor/github.com/gorilla/schema/decoder.go new file mode 100644 index 000000000..e49b53c87 --- /dev/null +++ b/vendor/github.com/gorilla/schema/decoder.go @@ -0,0 +1,420 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strings" +) + +// NewDecoder returns a new Decoder. +func NewDecoder() *Decoder { + return &Decoder{cache: newCache()} +} + +// Decoder decodes values from a map[string][]string to a struct. +type Decoder struct { + cache *cache + zeroEmpty bool + ignoreUnknownKeys bool +} + +// SetAliasTag changes the tag used to locate custom field aliases. +// The default tag is "schema". +func (d *Decoder) SetAliasTag(tag string) { + d.cache.tag = tag +} + +// ZeroEmpty controls the behaviour when the decoder encounters empty values +// in a map. +// If z is true and a key in the map has the empty string as a value +// then the corresponding struct field is set to the zero value. +// If z is false then empty strings are ignored. +// +// The default value is false, that is empty values do not change +// the value of the struct field. +func (d *Decoder) ZeroEmpty(z bool) { + d.zeroEmpty = z +} + +// IgnoreUnknownKeys controls the behaviour when the decoder encounters unknown +// keys in the map. +// If i is true and an unknown field is encountered, it is ignored. This is +// similar to how unknown keys are handled by encoding/json. +// If i is false then Decode will return an error. Note that any valid keys +// will still be decoded in to the target struct. +// +// To preserve backwards compatibility, the default value is false. +func (d *Decoder) IgnoreUnknownKeys(i bool) { + d.ignoreUnknownKeys = i +} + +// RegisterConverter registers a converter function for a custom type. +func (d *Decoder) RegisterConverter(value interface{}, converterFunc Converter) { + d.cache.registerConverter(value, converterFunc) +} + +// Decode decodes a map[string][]string to a struct. +// +// The first parameter must be a pointer to a struct. +// +// The second parameter is a map, typically url.Values from an HTTP request. +// Keys are "paths" in dotted notation to the struct fields and nested structs. +// +// See the package documentation for a full explanation of the mechanics. +func (d *Decoder) Decode(dst interface{}, src map[string][]string) error { + v := reflect.ValueOf(dst) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return errors.New("schema: interface must be a pointer to struct") + } + v = v.Elem() + t := v.Type() + errors := MultiError{} + for path, values := range src { + if parts, err := d.cache.parsePath(path, t); err == nil { + if err = d.decode(v, path, parts, values); err != nil { + errors[path] = err + } + } else if !d.ignoreUnknownKeys { + errors[path] = fmt.Errorf("schema: invalid path %q", path) + } + } + if len(errors) > 0 { + return errors + } + return d.checkRequired(t, src, "") +} + +// checkRequired checks whether required fields are empty +// +// check type t recursively if t has struct fields, and prefix is same as parsePath: in dotted notation +// +// src is the source map for decoding, we use it here to see if those required fields are included in src +func (d *Decoder) checkRequired(t reflect.Type, src map[string][]string, prefix string) error { + struc := d.cache.get(t) + if struc == nil { + // unexpect, cache.get never return nil + return errors.New("cache fail") + } + + for _, f := range struc.fields { + if f.typ.Kind() == reflect.Struct { + err := d.checkRequired(f.typ, src, prefix+f.alias+".") + if err != nil { + if !f.anon { + return err + } + // check embedded parent field. + err2 := d.checkRequired(f.typ, src, prefix) + if err2 != nil { + return err + } + } + } + if f.required { + key := f.alias + if prefix != "" { + key = prefix + key + } + if isEmpty(f.typ, src[key]) { + return fmt.Errorf("%v is empty", key) + } + } + } + return nil +} + +// isEmpty returns true if value is empty for specific type +func isEmpty(t reflect.Type, value []string) bool { + if len(value) == 0 { + return true + } + switch t.Kind() { + case boolType, float32Type, float64Type, intType, int8Type, int32Type, int64Type, stringType, uint8Type, uint16Type, uint32Type, uint64Type: + return len(value[0]) == 0 + } + return false +} + +// decode fills a struct field using a parsed path. +func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values []string) error { + // Get the field walking the struct fields by index. + for _, name := range parts[0].path { + if v.Type().Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.FieldByName(name) + } + // Don't even bother for unexported fields. + if !v.CanSet() { + return nil + } + + // Dereference if needed. + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + if v.IsNil() { + v.Set(reflect.New(t)) + } + v = v.Elem() + } + + // Slice of structs. Let's go recursive. + if len(parts) > 1 { + idx := parts[0].index + if v.IsNil() || v.Len() < idx+1 { + value := reflect.MakeSlice(t, idx+1, idx+1) + if v.Len() < idx+1 { + // Resize it. + reflect.Copy(value, v) + } + v.Set(value) + } + return d.decode(v.Index(idx), path, parts[1:], values) + } + + // Get the converter early in case there is one for a slice type. + conv := d.cache.converter(t) + m := isTextUnmarshaler(v) + if conv == nil && t.Kind() == reflect.Slice && m.IsSlice { + var items []reflect.Value + elemT := t.Elem() + isPtrElem := elemT.Kind() == reflect.Ptr + if isPtrElem { + elemT = elemT.Elem() + } + + // Try to get a converter for the element type. + conv := d.cache.converter(elemT) + if conv == nil { + conv = builtinConverters[elemT.Kind()] + if conv == nil { + // As we are not dealing with slice of structs here, we don't need to check if the type + // implements TextUnmarshaler interface + return fmt.Errorf("schema: converter not found for %v", elemT) + } + } + + for key, value := range values { + if value == "" { + if d.zeroEmpty { + items = append(items, reflect.Zero(elemT)) + } + } else if m.IsValid { + u := reflect.New(elemT) + if m.IsPtr { + u = reflect.New(reflect.PtrTo(elemT).Elem()) + } + if err := u.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil { + return ConversionError{ + Key: path, + Type: t, + Index: key, + Err: err, + } + } + if m.IsPtr { + items = append(items, u.Elem().Addr()) + } else if u.Kind() == reflect.Ptr { + items = append(items, u.Elem()) + } else { + items = append(items, u) + } + } else if item := conv(value); item.IsValid() { + if isPtrElem { + ptr := reflect.New(elemT) + ptr.Elem().Set(item) + item = ptr + } + if item.Type() != elemT && !isPtrElem { + item = item.Convert(elemT) + } + items = append(items, item) + } else { + if strings.Contains(value, ",") { + values := strings.Split(value, ",") + for _, value := range values { + if value == "" { + if d.zeroEmpty { + items = append(items, reflect.Zero(elemT)) + } + } else if item := conv(value); item.IsValid() { + if isPtrElem { + ptr := reflect.New(elemT) + ptr.Elem().Set(item) + item = ptr + } + if item.Type() != elemT && !isPtrElem { + item = item.Convert(elemT) + } + items = append(items, item) + } else { + return ConversionError{ + Key: path, + Type: elemT, + Index: key, + } + } + } + } else { + return ConversionError{ + Key: path, + Type: elemT, + Index: key, + } + } + } + } + value := reflect.Append(reflect.MakeSlice(t, 0, 0), items...) + v.Set(value) + } else { + val := "" + // Use the last value provided if any values were provided + if len(values) > 0 { + val = values[len(values)-1] + } + + if val == "" { + if d.zeroEmpty { + v.Set(reflect.Zero(t)) + } + } else if conv != nil { + if value := conv(val); value.IsValid() { + v.Set(value.Convert(t)) + } else { + return ConversionError{ + Key: path, + Type: t, + Index: -1, + } + } + } else if m.IsValid { + // If the value implements the encoding.TextUnmarshaler interface + // apply UnmarshalText as the converter + if err := m.Unmarshaler.UnmarshalText([]byte(val)); err != nil { + return ConversionError{ + Key: path, + Type: t, + Index: -1, + Err: err, + } + } + } else if conv := builtinConverters[t.Kind()]; conv != nil { + if value := conv(val); value.IsValid() { + v.Set(value.Convert(t)) + } else { + return ConversionError{ + Key: path, + Type: t, + Index: -1, + } + } + } else { + return fmt.Errorf("schema: converter not found for %v", t) + } + } + return nil +} + +func isTextUnmarshaler(v reflect.Value) unmarshaler { + + // Create a new unmarshaller instance + m := unmarshaler{} + + // As the UnmarshalText function should be applied + // to the pointer of the type, we convert the value to pointer. + if v.CanAddr() { + v = v.Addr() + } + if m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler); m.IsValid { + return m + } + + // if v is []T or *[]T create new T + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Slice { + // if t is a pointer slice, check if it implements encoding.TextUnmarshaler + m.IsSlice = true + if t = t.Elem(); t.Kind() == reflect.Ptr { + t = reflect.PtrTo(t.Elem()) + v = reflect.Zero(t) + m.IsPtr = true + m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler) + return m + } + } + + v = reflect.New(t) + m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler) + return m +} + +// TextUnmarshaler helpers ---------------------------------------------------- +// unmarshaller contains information about a TextUnmarshaler type +type unmarshaler struct { + Unmarshaler encoding.TextUnmarshaler + IsSlice bool + IsPtr bool + IsValid bool +} + +// Errors --------------------------------------------------------------------- + +// ConversionError stores information about a failed conversion. +type ConversionError struct { + Key string // key from the source map. + Type reflect.Type // expected type of elem + Index int // index for multi-value fields; -1 for single-value fields. + Err error // low-level error (when it exists) +} + +func (e ConversionError) Error() string { + var output string + + if e.Index < 0 { + output = fmt.Sprintf("schema: error converting value for %q", e.Key) + } else { + output = fmt.Sprintf("schema: error converting value for index %d of %q", + e.Index, e.Key) + } + + if e.Err != nil { + output = fmt.Sprintf("%s. Details: %s", output, e.Err) + } + + return output +} + +// MultiError stores multiple decoding errors. +// +// Borrowed from the App Engine SDK. +type MultiError map[string]error + +func (e MultiError) Error() string { + s := "" + for _, err := range e { + s = err.Error() + break + } + switch len(e) { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, len(e)-1) +} diff --git a/vendor/github.com/gorilla/schema/decoder_test.go b/vendor/github.com/gorilla/schema/decoder_test.go new file mode 100644 index 000000000..18b3b3270 --- /dev/null +++ b/vendor/github.com/gorilla/schema/decoder_test.go @@ -0,0 +1,1693 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "encoding/hex" + "errors" + "reflect" + "strings" + "testing" + "time" +) + +type IntAlias int + +type rudeBool bool + +func (id *rudeBool) UnmarshalText(text []byte) error { + value := string(text) + switch { + case strings.EqualFold("Yup", value): + *id = true + case strings.EqualFold("Nope", value): + *id = false + default: + return errors.New("value must be yup or nope") + } + return nil +} + +// All cases we want to cover, in a nutshell. +type S1 struct { + F01 int `schema:"f1"` + F02 *int `schema:"f2"` + F03 []int `schema:"f3"` + F04 []*int `schema:"f4"` + F05 *[]int `schema:"f5"` + F06 *[]*int `schema:"f6"` + F07 S2 `schema:"f7"` + F08 *S1 `schema:"f8"` + F09 int `schema:"-"` + F10 []S1 `schema:"f10"` + F11 []*S1 `schema:"f11"` + F12 *[]S1 `schema:"f12"` + F13 *[]*S1 `schema:"f13"` + F14 int `schema:"f14"` + F15 IntAlias `schema:"f15"` + F16 []IntAlias `schema:"f16"` + F17 S19 `schema:"f17"` + F18 rudeBool `schema:"f18"` + F19 *rudeBool `schema:"f19"` + F20 []rudeBool `schema:"f20"` + F21 []*rudeBool `schema:"f21"` +} + +type S2 struct { + F01 *[]*int `schema:"f1"` +} + +type S19 [2]byte + +func (id *S19) UnmarshalText(text []byte) error { + buf, err := hex.DecodeString(string(text)) + if err != nil { + return err + } + if len(buf) > len(*id) { + return errors.New("out of range") + } + for i := range buf { + (*id)[i] = buf[i] + } + return nil +} + +func TestAll(t *testing.T) { + v := map[string][]string{ + "f1": {"1"}, + "f2": {"2"}, + "f3": {"31", "32"}, + "f4": {"41", "42"}, + "f5": {"51", "52"}, + "f6": {"61", "62"}, + "f7.f1": {"71", "72"}, + "f8.f8.f7.f1": {"81", "82"}, + "f9": {"9"}, + "f10.0.f10.0.f6": {"101", "102"}, + "f10.0.f10.1.f6": {"103", "104"}, + "f11.0.f11.0.f6": {"111", "112"}, + "f11.0.f11.1.f6": {"113", "114"}, + "f12.0.f12.0.f6": {"121", "122"}, + "f12.0.f12.1.f6": {"123", "124"}, + "f13.0.f13.0.f6": {"131", "132"}, + "f13.0.f13.1.f6": {"133", "134"}, + "f14": {}, + "f15": {"151"}, + "f16": {"161", "162"}, + "f17": {"1a2b"}, + "f18": {"yup"}, + "f19": {"nope"}, + "f20": {"nope", "yup"}, + "f21": {"yup", "nope"}, + } + f2 := 2 + f41, f42 := 41, 42 + f61, f62 := 61, 62 + f71, f72 := 71, 72 + f81, f82 := 81, 82 + f101, f102, f103, f104 := 101, 102, 103, 104 + f111, f112, f113, f114 := 111, 112, 113, 114 + f121, f122, f123, f124 := 121, 122, 123, 124 + f131, f132, f133, f134 := 131, 132, 133, 134 + var f151 IntAlias = 151 + var f161, f162 IntAlias = 161, 162 + var f152, f153 rudeBool = true, false + e := S1{ + F01: 1, + F02: &f2, + F03: []int{31, 32}, + F04: []*int{&f41, &f42}, + F05: &[]int{51, 52}, + F06: &[]*int{&f61, &f62}, + F07: S2{ + F01: &[]*int{&f71, &f72}, + }, + F08: &S1{ + F08: &S1{ + F07: S2{ + F01: &[]*int{&f81, &f82}, + }, + }, + }, + F09: 0, + F10: []S1{ + S1{ + F10: []S1{ + S1{F06: &[]*int{&f101, &f102}}, + S1{F06: &[]*int{&f103, &f104}}, + }, + }, + }, + F11: []*S1{ + &S1{ + F11: []*S1{ + &S1{F06: &[]*int{&f111, &f112}}, + &S1{F06: &[]*int{&f113, &f114}}, + }, + }, + }, + F12: &[]S1{ + S1{ + F12: &[]S1{ + S1{F06: &[]*int{&f121, &f122}}, + S1{F06: &[]*int{&f123, &f124}}, + }, + }, + }, + F13: &[]*S1{ + &S1{ + F13: &[]*S1{ + &S1{F06: &[]*int{&f131, &f132}}, + &S1{F06: &[]*int{&f133, &f134}}, + }, + }, + }, + F14: 0, + F15: f151, + F16: []IntAlias{f161, f162}, + F17: S19{0x1a, 0x2b}, + F18: f152, + F19: &f153, + F20: []rudeBool{f153, f152}, + F21: []*rudeBool{&f152, &f153}, + } + + s := &S1{} + _ = NewDecoder().Decode(s, v) + + vals := func(values []*int) []int { + r := make([]int, len(values)) + for k, v := range values { + r[k] = *v + } + return r + } + + if s.F01 != e.F01 { + t.Errorf("f1: expected %v, got %v", e.F01, s.F01) + } + if s.F02 == nil { + t.Errorf("f2: expected %v, got nil", *e.F02) + } else if *s.F02 != *e.F02 { + t.Errorf("f2: expected %v, got %v", *e.F02, *s.F02) + } + if s.F03 == nil { + t.Errorf("f3: expected %v, got nil", e.F03) + } else if len(s.F03) != 2 || s.F03[0] != e.F03[0] || s.F03[1] != e.F03[1] { + t.Errorf("f3: expected %v, got %v", e.F03, s.F03) + } + if s.F04 == nil { + t.Errorf("f4: expected %v, got nil", e.F04) + } else { + if len(s.F04) != 2 || *(s.F04)[0] != *(e.F04)[0] || *(s.F04)[1] != *(e.F04)[1] { + t.Errorf("f4: expected %v, got %v", vals(e.F04), vals(s.F04)) + } + } + if s.F05 == nil { + t.Errorf("f5: expected %v, got nil", e.F05) + } else { + sF05, eF05 := *s.F05, *e.F05 + if len(sF05) != 2 || sF05[0] != eF05[0] || sF05[1] != eF05[1] { + t.Errorf("f5: expected %v, got %v", eF05, sF05) + } + } + if s.F06 == nil { + t.Errorf("f6: expected %v, got nil", vals(*e.F06)) + } else { + sF06, eF06 := *s.F06, *e.F06 + if len(sF06) != 2 || *(sF06)[0] != *(eF06)[0] || *(sF06)[1] != *(eF06)[1] { + t.Errorf("f6: expected %v, got %v", vals(eF06), vals(sF06)) + } + } + if s.F07.F01 == nil { + t.Errorf("f7.f1: expected %v, got nil", vals(*e.F07.F01)) + } else { + sF07, eF07 := *s.F07.F01, *e.F07.F01 + if len(sF07) != 2 || *(sF07)[0] != *(eF07)[0] || *(sF07)[1] != *(eF07)[1] { + t.Errorf("f7.f1: expected %v, got %v", vals(eF07), vals(sF07)) + } + } + if s.F08 == nil { + t.Errorf("f8: got nil") + } else if s.F08.F08 == nil { + t.Errorf("f8.f8: got nil") + } else if s.F08.F08.F07.F01 == nil { + t.Errorf("f8.f8.f7.f1: expected %v, got nil", vals(*e.F08.F08.F07.F01)) + } else { + sF08, eF08 := *s.F08.F08.F07.F01, *e.F08.F08.F07.F01 + if len(sF08) != 2 || *(sF08)[0] != *(eF08)[0] || *(sF08)[1] != *(eF08)[1] { + t.Errorf("f8.f8.f7.f1: expected %v, got %v", vals(eF08), vals(sF08)) + } + } + if s.F09 != e.F09 { + t.Errorf("f9: expected %v, got %v", e.F09, s.F09) + } + if s.F10 == nil { + t.Errorf("f10: got nil") + } else if len(s.F10) != 1 { + t.Errorf("f10: expected 1 element, got %v", s.F10) + } else { + if len(s.F10[0].F10) != 2 { + t.Errorf("f10.0.f10: expected 1 element, got %v", s.F10[0].F10) + } else { + sF10, eF10 := *s.F10[0].F10[0].F06, *e.F10[0].F10[0].F06 + if sF10 == nil { + t.Errorf("f10.0.f10.0.f6: expected %v, got nil", vals(eF10)) + } else { + if len(sF10) != 2 || *(sF10)[0] != *(eF10)[0] || *(sF10)[1] != *(eF10)[1] { + t.Errorf("f10.0.f10.0.f6: expected %v, got %v", vals(eF10), vals(sF10)) + } + } + sF10, eF10 = *s.F10[0].F10[1].F06, *e.F10[0].F10[1].F06 + if sF10 == nil { + t.Errorf("f10.0.f10.0.f6: expected %v, got nil", vals(eF10)) + } else { + if len(sF10) != 2 || *(sF10)[0] != *(eF10)[0] || *(sF10)[1] != *(eF10)[1] { + t.Errorf("f10.0.f10.0.f6: expected %v, got %v", vals(eF10), vals(sF10)) + } + } + } + } + if s.F11 == nil { + t.Errorf("f11: got nil") + } else if len(s.F11) != 1 { + t.Errorf("f11: expected 1 element, got %v", s.F11) + } else { + if len(s.F11[0].F11) != 2 { + t.Errorf("f11.0.f11: expected 1 element, got %v", s.F11[0].F11) + } else { + sF11, eF11 := *s.F11[0].F11[0].F06, *e.F11[0].F11[0].F06 + if sF11 == nil { + t.Errorf("f11.0.f11.0.f6: expected %v, got nil", vals(eF11)) + } else { + if len(sF11) != 2 || *(sF11)[0] != *(eF11)[0] || *(sF11)[1] != *(eF11)[1] { + t.Errorf("f11.0.f11.0.f6: expected %v, got %v", vals(eF11), vals(sF11)) + } + } + sF11, eF11 = *s.F11[0].F11[1].F06, *e.F11[0].F11[1].F06 + if sF11 == nil { + t.Errorf("f11.0.f11.0.f6: expected %v, got nil", vals(eF11)) + } else { + if len(sF11) != 2 || *(sF11)[0] != *(eF11)[0] || *(sF11)[1] != *(eF11)[1] { + t.Errorf("f11.0.f11.0.f6: expected %v, got %v", vals(eF11), vals(sF11)) + } + } + } + } + if s.F12 == nil { + t.Errorf("f12: got nil") + } else if len(*s.F12) != 1 { + t.Errorf("f12: expected 1 element, got %v", *s.F12) + } else { + sF12, eF12 := *(s.F12), *(e.F12) + if len(*sF12[0].F12) != 2 { + t.Errorf("f12.0.f12: expected 1 element, got %v", *sF12[0].F12) + } else { + sF122, eF122 := *(*sF12[0].F12)[0].F06, *(*eF12[0].F12)[0].F06 + if sF122 == nil { + t.Errorf("f12.0.f12.0.f6: expected %v, got nil", vals(eF122)) + } else { + if len(sF122) != 2 || *(sF122)[0] != *(eF122)[0] || *(sF122)[1] != *(eF122)[1] { + t.Errorf("f12.0.f12.0.f6: expected %v, got %v", vals(eF122), vals(sF122)) + } + } + sF122, eF122 = *(*sF12[0].F12)[1].F06, *(*eF12[0].F12)[1].F06 + if sF122 == nil { + t.Errorf("f12.0.f12.0.f6: expected %v, got nil", vals(eF122)) + } else { + if len(sF122) != 2 || *(sF122)[0] != *(eF122)[0] || *(sF122)[1] != *(eF122)[1] { + t.Errorf("f12.0.f12.0.f6: expected %v, got %v", vals(eF122), vals(sF122)) + } + } + } + } + if s.F13 == nil { + t.Errorf("f13: got nil") + } else if len(*s.F13) != 1 { + t.Errorf("f13: expected 1 element, got %v", *s.F13) + } else { + sF13, eF13 := *(s.F13), *(e.F13) + if len(*sF13[0].F13) != 2 { + t.Errorf("f13.0.f13: expected 1 element, got %v", *sF13[0].F13) + } else { + sF132, eF132 := *(*sF13[0].F13)[0].F06, *(*eF13[0].F13)[0].F06 + if sF132 == nil { + t.Errorf("f13.0.f13.0.f6: expected %v, got nil", vals(eF132)) + } else { + if len(sF132) != 2 || *(sF132)[0] != *(eF132)[0] || *(sF132)[1] != *(eF132)[1] { + t.Errorf("f13.0.f13.0.f6: expected %v, got %v", vals(eF132), vals(sF132)) + } + } + sF132, eF132 = *(*sF13[0].F13)[1].F06, *(*eF13[0].F13)[1].F06 + if sF132 == nil { + t.Errorf("f13.0.f13.0.f6: expected %v, got nil", vals(eF132)) + } else { + if len(sF132) != 2 || *(sF132)[0] != *(eF132)[0] || *(sF132)[1] != *(eF132)[1] { + t.Errorf("f13.0.f13.0.f6: expected %v, got %v", vals(eF132), vals(sF132)) + } + } + } + } + if s.F14 != e.F14 { + t.Errorf("f14: expected %v, got %v", e.F14, s.F14) + } + if s.F15 != e.F15 { + t.Errorf("f15: expected %v, got %v", e.F15, s.F15) + } + if s.F16 == nil { + t.Errorf("f16: nil") + } else if len(s.F16) != len(e.F16) { + t.Errorf("f16: expected len %d, got %d", len(e.F16), len(s.F16)) + } else if !reflect.DeepEqual(s.F16, e.F16) { + t.Errorf("f16: expected %v, got %v", e.F16, s.F16) + } + if s.F17 != e.F17 { + t.Errorf("f17: expected %v, got %v", e.F17, s.F17) + } + if s.F18 != e.F18 { + t.Errorf("f18: expected %v, got %v", e.F18, s.F18) + } + if *s.F19 != *e.F19 { + t.Errorf("f19: expected %v, got %v", *e.F19, *s.F19) + } + if s.F20 == nil { + t.Errorf("f20: nil") + } else if len(s.F20) != len(e.F20) { + t.Errorf("f20: expected %v, got %v", e.F20, s.F20) + } else if !reflect.DeepEqual(s.F20, e.F20) { + t.Errorf("f20: expected %v, got %v", e.F20, s.F20) + } + if s.F21 == nil { + t.Errorf("f21: nil") + } else if len(s.F21) != len(e.F21) { + t.Errorf("f21: expected length %d, got %d", len(e.F21), len(s.F21)) + } else if !reflect.DeepEqual(s.F21, e.F21) { + t.Errorf("f21: expected %v, got %v", e.F21, s.F21) + } +} + +func BenchmarkAll(b *testing.B) { + v := map[string][]string{ + "f1": {"1"}, + "f2": {"2"}, + "f3": {"31", "32"}, + "f4": {"41", "42"}, + "f5": {"51", "52"}, + "f6": {"61", "62"}, + "f7.f1": {"71", "72"}, + "f8.f8.f7.f1": {"81", "82"}, + "f9": {"9"}, + "f10.0.f10.0.f6": {"101", "102"}, + "f10.0.f10.1.f6": {"103", "104"}, + "f11.0.f11.0.f6": {"111", "112"}, + "f11.0.f11.1.f6": {"113", "114"}, + "f12.0.f12.0.f6": {"121", "122"}, + "f12.0.f12.1.f6": {"123", "124"}, + "f13.0.f13.0.f6": {"131", "132"}, + "f13.0.f13.1.f6": {"133", "134"}, + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + s := &S1{} + _ = NewDecoder().Decode(s, v) + } +} + +// ---------------------------------------------------------------------------- + +type S3 struct { + F01 bool + F02 float32 + F03 float64 + F04 int + F05 int8 + F06 int16 + F07 int32 + F08 int64 + F09 string + F10 uint + F11 uint8 + F12 uint16 + F13 uint32 + F14 uint64 +} + +func TestDefaultConverters(t *testing.T) { + v := map[string][]string{ + "F01": {"true"}, + "F02": {"4.2"}, + "F03": {"4.3"}, + "F04": {"-42"}, + "F05": {"-43"}, + "F06": {"-44"}, + "F07": {"-45"}, + "F08": {"-46"}, + "F09": {"foo"}, + "F10": {"42"}, + "F11": {"43"}, + "F12": {"44"}, + "F13": {"45"}, + "F14": {"46"}, + } + e := S3{ + F01: true, + F02: 4.2, + F03: 4.3, + F04: -42, + F05: -43, + F06: -44, + F07: -45, + F08: -46, + F09: "foo", + F10: 42, + F11: 43, + F12: 44, + F13: 45, + F14: 46, + } + s := &S3{} + _ = NewDecoder().Decode(s, v) + if s.F01 != e.F01 { + t.Errorf("F01: expected %v, got %v", e.F01, s.F01) + } + if s.F02 != e.F02 { + t.Errorf("F02: expected %v, got %v", e.F02, s.F02) + } + if s.F03 != e.F03 { + t.Errorf("F03: expected %v, got %v", e.F03, s.F03) + } + if s.F04 != e.F04 { + t.Errorf("F04: expected %v, got %v", e.F04, s.F04) + } + if s.F05 != e.F05 { + t.Errorf("F05: expected %v, got %v", e.F05, s.F05) + } + if s.F06 != e.F06 { + t.Errorf("F06: expected %v, got %v", e.F06, s.F06) + } + if s.F07 != e.F07 { + t.Errorf("F07: expected %v, got %v", e.F07, s.F07) + } + if s.F08 != e.F08 { + t.Errorf("F08: expected %v, got %v", e.F08, s.F08) + } + if s.F09 != e.F09 { + t.Errorf("F09: expected %v, got %v", e.F09, s.F09) + } + if s.F10 != e.F10 { + t.Errorf("F10: expected %v, got %v", e.F10, s.F10) + } + if s.F11 != e.F11 { + t.Errorf("F11: expected %v, got %v", e.F11, s.F11) + } + if s.F12 != e.F12 { + t.Errorf("F12: expected %v, got %v", e.F12, s.F12) + } + if s.F13 != e.F13 { + t.Errorf("F13: expected %v, got %v", e.F13, s.F13) + } + if s.F14 != e.F14 { + t.Errorf("F14: expected %v, got %v", e.F14, s.F14) + } +} + +func TestOn(t *testing.T) { + v := map[string][]string{ + "F01": {"on"}, + } + s := S3{} + err := NewDecoder().Decode(&s, v) + if err != nil { + t.Fatal(err) + } + if !s.F01 { + t.Fatal("Value was not set to true") + } +} + +// ---------------------------------------------------------------------------- + +func TestInlineStruct(t *testing.T) { + s1 := &struct { + F01 bool + }{} + s2 := &struct { + F01 int + }{} + v1 := map[string][]string{ + "F01": {"true"}, + } + v2 := map[string][]string{ + "F01": {"42"}, + } + decoder := NewDecoder() + _ = decoder.Decode(s1, v1) + if s1.F01 != true { + t.Errorf("s1: expected %v, got %v", true, s1.F01) + } + _ = decoder.Decode(s2, v2) + if s2.F01 != 42 { + t.Errorf("s2: expected %v, got %v", 42, s2.F01) + } +} + +// ---------------------------------------------------------------------------- + +type Foo struct { + F01 int + F02 Bar + Bif []Baz +} + +type Bar struct { + F01 string + F02 string + F03 string + F14 string + S05 string + Str string +} + +type Baz struct { + F99 []string +} + +func TestSimpleExample(t *testing.T) { + data := map[string][]string{ + "F01": {"1"}, + "F02.F01": {"S1"}, + "F02.F02": {"S2"}, + "F02.F03": {"S3"}, + "F02.F14": {"S4"}, + "F02.S05": {"S5"}, + "F02.Str": {"Str"}, + "Bif.0.F99": {"A", "B", "C"}, + } + + e := &Foo{ + F01: 1, + F02: Bar{ + F01: "S1", + F02: "S2", + F03: "S3", + F14: "S4", + S05: "S5", + Str: "Str", + }, + Bif: []Baz{{ + F99: []string{"A", "B", "C"}}, + }, + } + + s := &Foo{} + _ = NewDecoder().Decode(s, data) + + if s.F01 != e.F01 { + t.Errorf("F01: expected %v, got %v", e.F01, s.F01) + } + if s.F02.F01 != e.F02.F01 { + t.Errorf("F02.F01: expected %v, got %v", e.F02.F01, s.F02.F01) + } + if s.F02.F02 != e.F02.F02 { + t.Errorf("F02.F02: expected %v, got %v", e.F02.F02, s.F02.F02) + } + if s.F02.F03 != e.F02.F03 { + t.Errorf("F02.F03: expected %v, got %v", e.F02.F03, s.F02.F03) + } + if s.F02.F14 != e.F02.F14 { + t.Errorf("F02.F14: expected %v, got %v", e.F02.F14, s.F02.F14) + } + if s.F02.S05 != e.F02.S05 { + t.Errorf("F02.S05: expected %v, got %v", e.F02.S05, s.F02.S05) + } + if s.F02.Str != e.F02.Str { + t.Errorf("F02.Str: expected %v, got %v", e.F02.Str, s.F02.Str) + } + if len(s.Bif) != len(e.Bif) { + t.Errorf("Bif len: expected %d, got %d", len(e.Bif), len(s.Bif)) + } else { + if len(s.Bif[0].F99) != len(e.Bif[0].F99) { + t.Errorf("Bif[0].F99 len: expected %d, got %d", len(e.Bif[0].F99), len(s.Bif[0].F99)) + } + } +} + +// ---------------------------------------------------------------------------- + +type S4 struct { + F01 int64 + F02 float64 + F03 bool + F04 rudeBool +} + +func TestConversionError(t *testing.T) { + data := map[string][]string{ + "F01": {"foo"}, + "F02": {"bar"}, + "F03": {"baz"}, + "F04": {"not-a-yes-or-nope"}, + } + s := &S4{} + e := NewDecoder().Decode(s, data) + + m := e.(MultiError) + if len(m) != 4 { + t.Errorf("Expected 3 errors, got %v", m) + } +} + +// ---------------------------------------------------------------------------- + +type S5 struct { + F01 []string +} + +func TestEmptyValue(t *testing.T) { + data := map[string][]string{ + "F01": {"", "foo"}, + } + s := &S5{} + NewDecoder().Decode(s, data) + if len(s.F01) != 1 { + t.Errorf("Expected 1 values in F01") + } +} + +func TestEmptyValueZeroEmpty(t *testing.T) { + data := map[string][]string{ + "F01": {"", "foo"}, + } + s := S5{} + d := NewDecoder() + d.ZeroEmpty(true) + err := d.Decode(&s, data) + if err != nil { + t.Fatal(err) + } + if len(s.F01) != 2 { + t.Errorf("Expected 1 values in F01") + } +} + +// ---------------------------------------------------------------------------- + +type S6 struct { + id string +} + +func TestUnexportedField(t *testing.T) { + data := map[string][]string{ + "id": {"identifier"}, + } + s := &S6{} + NewDecoder().Decode(s, data) + if s.id != "" { + t.Errorf("Unexported field expected to be ignored") + } +} + +// ---------------------------------------------------------------------------- + +type S7 struct { + ID string +} + +func TestMultipleValues(t *testing.T) { + data := map[string][]string{ + "ID": {"0", "1"}, + } + + s := S7{} + NewDecoder().Decode(&s, data) + if s.ID != "1" { + t.Errorf("Last defined value must be used when multiple values for same field are provided") + } +} + +type S8 struct { + ID string `json:"id"` +} + +func TestSetAliasTag(t *testing.T) { + data := map[string][]string{ + "id": {"foo"}, + } + + s := S8{} + dec := NewDecoder() + dec.SetAliasTag("json") + dec.Decode(&s, data) + if s.ID != "foo" { + t.Fatalf("Bad value: got %q, want %q", s.ID, "foo") + } +} + +func TestZeroEmpty(t *testing.T) { + data := map[string][]string{ + "F01": {""}, + "F03": {"true"}, + } + s := S4{1, 1, false, false} + d := NewDecoder() + d.ZeroEmpty(true) + + err := d.Decode(&s, data) + if err != nil { + t.Fatal(err) + } + if s.F01 != 0 { + t.Errorf("F01: got %v, want %v", s.F01, 0) + } + if s.F02 != 1 { + t.Errorf("F02: got %v, want %v", s.F02, 1) + } + if s.F03 != true { + t.Errorf("F03: got %v, want %v", s.F03, true) + } +} + +func TestNoZeroEmpty(t *testing.T) { + data := map[string][]string{ + "F01": {""}, + "F03": {"true"}, + } + s := S4{1, 1, false, false} + d := NewDecoder() + d.ZeroEmpty(false) + err := d.Decode(&s, data) + if err != nil { + t.Fatal(err) + } + if s.F01 != 1 { + t.Errorf("F01: got %v, want %v", s.F01, 1) + } + if s.F02 != 1 { + t.Errorf("F02: got %v, want %v", s.F02, 1) + } + if s.F03 != true { + t.Errorf("F03: got %v, want %v", s.F03, true) + } + if s.F04 != false { + t.Errorf("F04: got %v, want %v", s.F04, false) + } +} + +// ---------------------------------------------------------------------------- + +type S9 struct { + Id string +} + +type S10 struct { + S9 +} + +func TestEmbeddedField(t *testing.T) { + data := map[string][]string{ + "Id": {"identifier"}, + } + s := &S10{} + NewDecoder().Decode(s, data) + if s.Id != "identifier" { + t.Errorf("Missing support for embedded fields") + } +} + +type S11 struct { + S10 +} + +func TestMultipleLevelEmbeddedField(t *testing.T) { + data := map[string][]string{ + "Id": {"identifier"}, + } + s := &S11{} + err := NewDecoder().Decode(s, data) + if s.Id != "identifier" { + t.Errorf("Missing support for multiple-level embedded fields (%v)", err) + } +} + +func TestInvalidPath(t *testing.T) { + data := map[string][]string{ + "Foo.Bar": {"baz"}, + } + s := S9{} + err := NewDecoder().Decode(&s, data) + expectedErr := `schema: invalid path "Foo.Bar"` + if err.Error() != expectedErr { + t.Fatalf("got %q, want %q", err, expectedErr) + } +} + +func TestInvalidPathIgnoreUnknownKeys(t *testing.T) { + data := map[string][]string{ + "Foo.Bar": {"baz"}, + } + s := S9{} + dec := NewDecoder() + dec.IgnoreUnknownKeys(true) + err := dec.Decode(&s, data) + if err != nil { + t.Fatal(err) + } +} + +// ---------------------------------------------------------------------------- + +type S1NT struct { + F1 int + F2 *int + F3 []int + F4 []*int + F5 *[]int + F6 *[]*int + F7 S2 + F8 *S1 + F9 int `schema:"-"` + F10 []S1 + F11 []*S1 + F12 *[]S1 + F13 *[]*S1 +} + +func TestAllNT(t *testing.T) { + v := map[string][]string{ + "f1": {"1"}, + "f2": {"2"}, + "f3": {"31", "32"}, + "f4": {"41", "42"}, + "f5": {"51", "52"}, + "f6": {"61", "62"}, + "f7.f1": {"71", "72"}, + "f8.f8.f7.f1": {"81", "82"}, + "f9": {"9"}, + "f10.0.f10.0.f6": {"101", "102"}, + "f10.0.f10.1.f6": {"103", "104"}, + "f11.0.f11.0.f6": {"111", "112"}, + "f11.0.f11.1.f6": {"113", "114"}, + "f12.0.f12.0.f6": {"121", "122"}, + "f12.0.f12.1.f6": {"123", "124"}, + "f13.0.f13.0.f6": {"131", "132"}, + "f13.0.f13.1.f6": {"133", "134"}, + } + f2 := 2 + f41, f42 := 41, 42 + f61, f62 := 61, 62 + f71, f72 := 71, 72 + f81, f82 := 81, 82 + f101, f102, f103, f104 := 101, 102, 103, 104 + f111, f112, f113, f114 := 111, 112, 113, 114 + f121, f122, f123, f124 := 121, 122, 123, 124 + f131, f132, f133, f134 := 131, 132, 133, 134 + e := S1NT{ + F1: 1, + F2: &f2, + F3: []int{31, 32}, + F4: []*int{&f41, &f42}, + F5: &[]int{51, 52}, + F6: &[]*int{&f61, &f62}, + F7: S2{ + F01: &[]*int{&f71, &f72}, + }, + F8: &S1{ + F08: &S1{ + F07: S2{ + F01: &[]*int{&f81, &f82}, + }, + }, + }, + F9: 0, + F10: []S1{ + S1{ + F10: []S1{ + S1{F06: &[]*int{&f101, &f102}}, + S1{F06: &[]*int{&f103, &f104}}, + }, + }, + }, + F11: []*S1{ + &S1{ + F11: []*S1{ + &S1{F06: &[]*int{&f111, &f112}}, + &S1{F06: &[]*int{&f113, &f114}}, + }, + }, + }, + F12: &[]S1{ + S1{ + F12: &[]S1{ + S1{F06: &[]*int{&f121, &f122}}, + S1{F06: &[]*int{&f123, &f124}}, + }, + }, + }, + F13: &[]*S1{ + &S1{ + F13: &[]*S1{ + &S1{F06: &[]*int{&f131, &f132}}, + &S1{F06: &[]*int{&f133, &f134}}, + }, + }, + }, + } + + s := &S1NT{} + _ = NewDecoder().Decode(s, v) + + vals := func(values []*int) []int { + r := make([]int, len(values)) + for k, v := range values { + r[k] = *v + } + return r + } + + if s.F1 != e.F1 { + t.Errorf("f1: expected %v, got %v", e.F1, s.F1) + } + if s.F2 == nil { + t.Errorf("f2: expected %v, got nil", *e.F2) + } else if *s.F2 != *e.F2 { + t.Errorf("f2: expected %v, got %v", *e.F2, *s.F2) + } + if s.F3 == nil { + t.Errorf("f3: expected %v, got nil", e.F3) + } else if len(s.F3) != 2 || s.F3[0] != e.F3[0] || s.F3[1] != e.F3[1] { + t.Errorf("f3: expected %v, got %v", e.F3, s.F3) + } + if s.F4 == nil { + t.Errorf("f4: expected %v, got nil", e.F4) + } else { + if len(s.F4) != 2 || *(s.F4)[0] != *(e.F4)[0] || *(s.F4)[1] != *(e.F4)[1] { + t.Errorf("f4: expected %v, got %v", vals(e.F4), vals(s.F4)) + } + } + if s.F5 == nil { + t.Errorf("f5: expected %v, got nil", e.F5) + } else { + sF5, eF5 := *s.F5, *e.F5 + if len(sF5) != 2 || sF5[0] != eF5[0] || sF5[1] != eF5[1] { + t.Errorf("f5: expected %v, got %v", eF5, sF5) + } + } + if s.F6 == nil { + t.Errorf("f6: expected %v, got nil", vals(*e.F6)) + } else { + sF6, eF6 := *s.F6, *e.F6 + if len(sF6) != 2 || *(sF6)[0] != *(eF6)[0] || *(sF6)[1] != *(eF6)[1] { + t.Errorf("f6: expected %v, got %v", vals(eF6), vals(sF6)) + } + } + if s.F7.F01 == nil { + t.Errorf("f7.f1: expected %v, got nil", vals(*e.F7.F01)) + } else { + sF7, eF7 := *s.F7.F01, *e.F7.F01 + if len(sF7) != 2 || *(sF7)[0] != *(eF7)[0] || *(sF7)[1] != *(eF7)[1] { + t.Errorf("f7.f1: expected %v, got %v", vals(eF7), vals(sF7)) + } + } + if s.F8 == nil { + t.Errorf("f8: got nil") + } else if s.F8.F08 == nil { + t.Errorf("f8.f8: got nil") + } else if s.F8.F08.F07.F01 == nil { + t.Errorf("f8.f8.f7.f1: expected %v, got nil", vals(*e.F8.F08.F07.F01)) + } else { + sF8, eF8 := *s.F8.F08.F07.F01, *e.F8.F08.F07.F01 + if len(sF8) != 2 || *(sF8)[0] != *(eF8)[0] || *(sF8)[1] != *(eF8)[1] { + t.Errorf("f8.f8.f7.f1: expected %v, got %v", vals(eF8), vals(sF8)) + } + } + if s.F9 != e.F9 { + t.Errorf("f9: expected %v, got %v", e.F9, s.F9) + } + if s.F10 == nil { + t.Errorf("f10: got nil") + } else if len(s.F10) != 1 { + t.Errorf("f10: expected 1 element, got %v", s.F10) + } else { + if len(s.F10[0].F10) != 2 { + t.Errorf("f10.0.f10: expected 1 element, got %v", s.F10[0].F10) + } else { + sF10, eF10 := *s.F10[0].F10[0].F06, *e.F10[0].F10[0].F06 + if sF10 == nil { + t.Errorf("f10.0.f10.0.f6: expected %v, got nil", vals(eF10)) + } else { + if len(sF10) != 2 || *(sF10)[0] != *(eF10)[0] || *(sF10)[1] != *(eF10)[1] { + t.Errorf("f10.0.f10.0.f6: expected %v, got %v", vals(eF10), vals(sF10)) + } + } + sF10, eF10 = *s.F10[0].F10[1].F06, *e.F10[0].F10[1].F06 + if sF10 == nil { + t.Errorf("f10.0.f10.0.f6: expected %v, got nil", vals(eF10)) + } else { + if len(sF10) != 2 || *(sF10)[0] != *(eF10)[0] || *(sF10)[1] != *(eF10)[1] { + t.Errorf("f10.0.f10.0.f6: expected %v, got %v", vals(eF10), vals(sF10)) + } + } + } + } + if s.F11 == nil { + t.Errorf("f11: got nil") + } else if len(s.F11) != 1 { + t.Errorf("f11: expected 1 element, got %v", s.F11) + } else { + if len(s.F11[0].F11) != 2 { + t.Errorf("f11.0.f11: expected 1 element, got %v", s.F11[0].F11) + } else { + sF11, eF11 := *s.F11[0].F11[0].F06, *e.F11[0].F11[0].F06 + if sF11 == nil { + t.Errorf("f11.0.f11.0.f6: expected %v, got nil", vals(eF11)) + } else { + if len(sF11) != 2 || *(sF11)[0] != *(eF11)[0] || *(sF11)[1] != *(eF11)[1] { + t.Errorf("f11.0.f11.0.f6: expected %v, got %v", vals(eF11), vals(sF11)) + } + } + sF11, eF11 = *s.F11[0].F11[1].F06, *e.F11[0].F11[1].F06 + if sF11 == nil { + t.Errorf("f11.0.f11.0.f6: expected %v, got nil", vals(eF11)) + } else { + if len(sF11) != 2 || *(sF11)[0] != *(eF11)[0] || *(sF11)[1] != *(eF11)[1] { + t.Errorf("f11.0.f11.0.f6: expected %v, got %v", vals(eF11), vals(sF11)) + } + } + } + } + if s.F12 == nil { + t.Errorf("f12: got nil") + } else if len(*s.F12) != 1 { + t.Errorf("f12: expected 1 element, got %v", *s.F12) + } else { + sF12, eF12 := *(s.F12), *(e.F12) + if len(*sF12[0].F12) != 2 { + t.Errorf("f12.0.f12: expected 1 element, got %v", *sF12[0].F12) + } else { + sF122, eF122 := *(*sF12[0].F12)[0].F06, *(*eF12[0].F12)[0].F06 + if sF122 == nil { + t.Errorf("f12.0.f12.0.f6: expected %v, got nil", vals(eF122)) + } else { + if len(sF122) != 2 || *(sF122)[0] != *(eF122)[0] || *(sF122)[1] != *(eF122)[1] { + t.Errorf("f12.0.f12.0.f6: expected %v, got %v", vals(eF122), vals(sF122)) + } + } + sF122, eF122 = *(*sF12[0].F12)[1].F06, *(*eF12[0].F12)[1].F06 + if sF122 == nil { + t.Errorf("f12.0.f12.0.f6: expected %v, got nil", vals(eF122)) + } else { + if len(sF122) != 2 || *(sF122)[0] != *(eF122)[0] || *(sF122)[1] != *(eF122)[1] { + t.Errorf("f12.0.f12.0.f6: expected %v, got %v", vals(eF122), vals(sF122)) + } + } + } + } + if s.F13 == nil { + t.Errorf("f13: got nil") + } else if len(*s.F13) != 1 { + t.Errorf("f13: expected 1 element, got %v", *s.F13) + } else { + sF13, eF13 := *(s.F13), *(e.F13) + if len(*sF13[0].F13) != 2 { + t.Errorf("f13.0.f13: expected 1 element, got %v", *sF13[0].F13) + } else { + sF132, eF132 := *(*sF13[0].F13)[0].F06, *(*eF13[0].F13)[0].F06 + if sF132 == nil { + t.Errorf("f13.0.f13.0.f6: expected %v, got nil", vals(eF132)) + } else { + if len(sF132) != 2 || *(sF132)[0] != *(eF132)[0] || *(sF132)[1] != *(eF132)[1] { + t.Errorf("f13.0.f13.0.f6: expected %v, got %v", vals(eF132), vals(sF132)) + } + } + sF132, eF132 = *(*sF13[0].F13)[1].F06, *(*eF13[0].F13)[1].F06 + if sF132 == nil { + t.Errorf("f13.0.f13.0.f6: expected %v, got nil", vals(eF132)) + } else { + if len(sF132) != 2 || *(sF132)[0] != *(eF132)[0] || *(sF132)[1] != *(eF132)[1] { + t.Errorf("f13.0.f13.0.f6: expected %v, got %v", vals(eF132), vals(sF132)) + } + } + } + } +} + +// ---------------------------------------------------------------------------- + +type S12A struct { + ID []int +} + +func TestCSVSlice(t *testing.T) { + data := map[string][]string{ + "ID": {"0,1"}, + } + + s := S12A{} + NewDecoder().Decode(&s, data) + if len(s.ID) != 2 { + t.Errorf("Expected two values in the result list, got %+v", s.ID) + } + if s.ID[0] != 0 || s.ID[1] != 1 { + t.Errorf("Expected []{0, 1} got %+v", s) + } +} + +type S12B struct { + ID []string +} + +//Decode should not split on , into a slice for string only +func TestCSVStringSlice(t *testing.T) { + data := map[string][]string{ + "ID": {"0,1"}, + } + + s := S12B{} + NewDecoder().Decode(&s, data) + if len(s.ID) != 1 { + t.Errorf("Expected one value in the result list, got %+v", s.ID) + } + if s.ID[0] != "0,1" { + t.Errorf("Expected []{0, 1} got %+v", s) + } +} + +//Invalid data provided by client should not panic (github issue 33) +func TestInvalidDataProvidedByClient(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Panicked calling decoder.Decode: %v", r) + } + }() + + type S struct { + f string + } + + data := map[string][]string{ + "f.f": {"v"}, + } + + err := NewDecoder().Decode(new(S), data) + if err == nil { + t.Errorf("invalid path in decoder.Decode should return an error.") + } +} + +// underlying cause of error in issue 33 +func TestInvalidPathInCacheParsePath(t *testing.T) { + type S struct { + f string + } + + typ := reflect.ValueOf(new(S)).Elem().Type() + c := newCache() + _, err := c.parsePath("f.f", typ) + if err == nil { + t.Errorf("invalid path in cache.parsePath should return an error.") + } +} + +// issue 32 +func TestDecodeToTypedField(t *testing.T) { + type Aa bool + s1 := &struct{ Aa }{} + v1 := map[string][]string{"Aa": {"true"}} + NewDecoder().Decode(s1, v1) + if s1.Aa != Aa(true) { + t.Errorf("s1: expected %v, got %v", true, s1.Aa) + } +} + +// issue 37 +func TestRegisterConverter(t *testing.T) { + type Aa int + type Bb int + s1 := &struct { + Aa + Bb + }{} + decoder := NewDecoder() + + decoder.RegisterConverter(s1.Aa, func(s string) reflect.Value { return reflect.ValueOf(1) }) + decoder.RegisterConverter(s1.Bb, func(s string) reflect.Value { return reflect.ValueOf(2) }) + + v1 := map[string][]string{"Aa": {"4"}, "Bb": {"5"}} + decoder.Decode(s1, v1) + + if s1.Aa != Aa(1) { + t.Errorf("s1.Aa: expected %v, got %v", 1, s1.Aa) + } + if s1.Bb != Bb(2) { + t.Errorf("s1.Bb: expected %v, got %v", 2, s1.Bb) + } +} + +// Issue #40 +func TestRegisterConverterSlice(t *testing.T) { + decoder := NewDecoder() + decoder.RegisterConverter([]string{}, func(input string) reflect.Value { + return reflect.ValueOf(strings.Split(input, ",")) + }) + + result := struct { + Multiple []string `schema:"multiple"` + }{} + + expected := []string{"one", "two", "three"} + decoder.Decode(&result, map[string][]string{ + "multiple": []string{"one,two,three"}, + }) + for i := range expected { + if got, want := expected[i], result.Multiple[i]; got != want { + t.Errorf("%d: got %s, want %s", i, got, want) + } + } +} + +func TestRegisterConverterMap(t *testing.T) { + decoder := NewDecoder() + decoder.IgnoreUnknownKeys(false) + decoder.RegisterConverter(map[string]string{}, func(input string) reflect.Value { + m := make(map[string]string) + for _, pair := range strings.Split(input, ",") { + parts := strings.Split(pair, ":") + switch len(parts) { + case 2: + m[parts[0]] = parts[1] + } + } + return reflect.ValueOf(m) + }) + + result := struct { + Multiple map[string]string `schema:"multiple"` + }{} + + err := decoder.Decode(&result, map[string][]string{ + "multiple": []string{"a:one,b:two"}, + }) + if err != nil { + t.Fatal(err) + } + expected := map[string]string{"a": "one", "b": "two"} + for k, v := range expected { + got, ok := result.Multiple[k] + if !ok { + t.Fatalf("got %v, want %v", result.Multiple, expected) + } + if got != v { + t.Errorf("got %s, want %s", got, v) + } + } +} + +type S13 struct { + Value []S14 +} + +type S14 struct { + F1 string + F2 string +} + +func (n *S14) UnmarshalText(text []byte) error { + textParts := strings.Split(string(text), " ") + if len(textParts) < 2 { + return errors.New("Not a valid name!") + } + + n.F1, n.F2 = textParts[0], textParts[len(textParts)-1] + return nil +} + +type S15 struct { + Value []S16 +} + +type S16 struct { + F1 string + F2 string +} + +func TestCustomTypeSlice(t *testing.T) { + data := map[string][]string{ + "Value.0": []string{"Louisa May Alcott"}, + "Value.1": []string{"Florence Nightingale"}, + "Value.2": []string{"Clara Barton"}, + } + + s := S13{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err != nil { + t.Fatal(err) + } + + if len(s.Value) != 3 { + t.Fatalf("Expected 3 values in the result list, got %+v", s.Value) + } + if s.Value[0].F1 != "Louisa" || s.Value[0].F2 != "Alcott" { + t.Errorf("Expected S14{'Louisa', 'Alcott'} got %+v", s.Value[0]) + } + if s.Value[1].F1 != "Florence" || s.Value[1].F2 != "Nightingale" { + t.Errorf("Expected S14{'Florence', 'Nightingale'} got %+v", s.Value[1]) + } + if s.Value[2].F1 != "Clara" || s.Value[2].F2 != "Barton" { + t.Errorf("Expected S14{'Clara', 'Barton'} got %+v", s.Value[2]) + } +} + +func TestCustomTypeSliceWithError(t *testing.T) { + data := map[string][]string{ + "Value.0": []string{"Louisa May Alcott"}, + "Value.1": []string{"Florence Nightingale"}, + "Value.2": []string{"Clara"}, + } + + s := S13{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err == nil { + t.Error("Not detecting error in conversion") + } +} + +func TestNoTextUnmarshalerTypeSlice(t *testing.T) { + data := map[string][]string{ + "Value.0": []string{"Louisa May Alcott"}, + "Value.1": []string{"Florence Nightingale"}, + "Value.2": []string{"Clara Barton"}, + } + + s := S15{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err == nil { + t.Error("Not detecting when there's no converter") + } +} + +// ---------------------------------------------------------------------------- + +type S17 struct { + Value S14 +} + +type S18 struct { + Value S16 +} + +func TestCustomType(t *testing.T) { + data := map[string][]string{ + "Value": []string{"Louisa May Alcott"}, + } + + s := S17{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err != nil { + t.Fatal(err) + } + + if s.Value.F1 != "Louisa" || s.Value.F2 != "Alcott" { + t.Errorf("Expected S14{'Louisa', 'Alcott'} got %+v", s.Value) + } +} + +func TestCustomTypeWithError(t *testing.T) { + data := map[string][]string{ + "Value": []string{"Louisa"}, + } + + s := S17{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err == nil { + t.Error("Not detecting error in conversion") + } +} + +func TestNoTextUnmarshalerType(t *testing.T) { + data := map[string][]string{ + "Value": []string{"Louisa May Alcott"}, + } + + s := S18{} + decoder := NewDecoder() + + if err := decoder.Decode(&s, data); err == nil { + t.Error("Not detecting when there's no converter") + } +} + +func TestExpectedType(t *testing.T) { + data := map[string][]string{ + "bools": []string{"1", "a"}, + "date": []string{"invalid"}, + "Foo.Bar": []string{"a", "b"}, + } + + type B struct { + Bar *int + } + type A struct { + Bools []bool `schema:"bools"` + Date time.Time `schema:"date"` + Foo B + } + + a := A{} + + err := NewDecoder().Decode(&a, data) + + e := err.(MultiError)["bools"].(ConversionError) + if e.Type != reflect.TypeOf(false) && e.Index == 1 { + t.Errorf("Expected bool, index: 1 got %+v, index: %d", e.Type, e.Index) + } + e = err.(MultiError)["date"].(ConversionError) + if e.Type != reflect.TypeOf(time.Time{}) { + t.Errorf("Expected time.Time got %+v", e.Type) + } + e = err.(MultiError)["Foo.Bar"].(ConversionError) + if e.Type != reflect.TypeOf(0) { + t.Errorf("Expected int got %+v", e.Type) + } +} + +type R1 struct { + A string `schema:"a,required"` + B struct { + C int `schema:"c,required"` + D float64 `schema:"d"` + E string `schema:"e,required"` + } `schema:"b"` + F []string `schema:"f,required"` + G []int `schema:"g,othertag"` + H bool `schema:"h,required"` +} + +func TestRequiredField(t *testing.T) { + var a R1 + v := map[string][]string{ + "a": []string{"bbb"}, + "b.c": []string{"88"}, + "b.d": []string{"9"}, + "f": []string{""}, + "h": []string{"true"}, + } + err := NewDecoder().Decode(&a, v) + if err == nil { + t.Errorf("error nil, b.e is empty expect") + return + } + // b.e empty + v["b.e"] = []string{""} // empty string + err = NewDecoder().Decode(&a, v) + if err == nil { + t.Errorf("error nil, b.e is empty expect") + return + } + + // all fields ok + v["b.e"] = []string{"nonempty"} + err = NewDecoder().Decode(&a, v) + if err != nil { + t.Errorf("error: %v", err) + return + } + + // set f empty + v["f"] = []string{} + err = NewDecoder().Decode(&a, v) + if err == nil { + t.Errorf("error nil, f is empty expect") + return + } + v["f"] = []string{"nonempty"} + + // b.c type int with empty string + v["b.c"] = []string{""} + err = NewDecoder().Decode(&a, v) + if err == nil { + t.Errorf("error nil, b.c is empty expect") + return + } + v["b.c"] = []string{"3"} + + // h type bool with empty string + v["h"] = []string{""} + err = NewDecoder().Decode(&a, v) + if err == nil { + t.Errorf("error nil, h is empty expect") + return + } +} + +type AS1 struct { + A int32 `schema:"a,required"` + E int32 `schema:"e,required"` +} +type AS2 struct { + AS1 + B string `schema:"b,required"` +} +type AS3 struct { + C int32 `schema:"c"` +} + +type AS4 struct { + AS3 + D string `schema:"d"` +} + +func TestAnonymousStructField(t *testing.T) { + patterns := []map[string][]string{ + { + "a": {"1"}, + "e": {"2"}, + "b": {"abc"}, + }, + { + "AS1.a": {"1"}, + "AS1.e": {"2"}, + "b": {"abc"}, + }, + } + for _, v := range patterns { + a := AS2{} + err := NewDecoder().Decode(&a, v) + if err != nil { + t.Errorf("Decode failed %s, %#v", err, v) + continue + } + if a.A != 1 { + t.Errorf("A: expected %v, got %v", 1, a.A) + } + if a.E != 2 { + t.Errorf("E: expected %v, got %v", 2, a.E) + } + if a.B != "abc" { + t.Errorf("B: expected %v, got %v", "abc", a.B) + } + if a.AS1.A != 1 { + t.Errorf("AS1.A: expected %v, got %v", 1, a.AS1.A) + } + if a.AS1.E != 2 { + t.Errorf("AS1.E: expected %v, got %v", 2, a.AS1.E) + } + } + a := AS2{} + err := NewDecoder().Decode(&a, map[string][]string{ + "e": {"2"}, + "b": {"abc"}, + }) + if err == nil { + t.Errorf("error nil, a is empty expect") + } + patterns = []map[string][]string{ + { + "c": {"1"}, + "d": {"abc"}, + }, + { + "AS3.c": {"1"}, + "d": {"abc"}, + }, + } + for _, v := range patterns { + a := AS4{} + err := NewDecoder().Decode(&a, v) + if err != nil { + t.Errorf("Decode failed %s, %#v", err, v) + continue + } + if a.C != 1 { + t.Errorf("C: expected %v, got %v", 1, a.C) + } + if a.D != "abc" { + t.Errorf("D: expected %v, got %v", "abc", a.D) + } + if a.AS3.C != 1 { + t.Errorf("AS3.C: expected %v, got %v", 1, a.AS3.C) + } + } +} + +// Test to ensure that a registered converter overrides the default text unmarshaler. +func TestRegisterConverterOverridesTextUnmarshaler(t *testing.T) { + type MyTime time.Time + s1 := &struct { + MyTime + }{} + decoder := NewDecoder() + + ts := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + decoder.RegisterConverter(s1.MyTime, func(s string) reflect.Value { return reflect.ValueOf(ts) }) + + v1 := map[string][]string{"MyTime": {"4"}, "Bb": {"5"}} + decoder.Decode(s1, v1) + + if s1.MyTime != MyTime(ts) { + t.Errorf("s1.Aa: expected %v, got %v", ts, s1.MyTime) + } +} + +type S20E string + +func (e *S20E) UnmarshalText(text []byte) error { + *e = S20E("x") + return nil +} + +type S20 []S20E + +func (s *S20) UnmarshalText(text []byte) error { + *s = S20{"a", "b", "c"} + return nil +} + +// Test to ensure that when a custom type based on a slice implements an +// encoding.TextUnmarshaler interface that it takes precedence over any +// implementations by its elements. +func TestTextUnmarshalerTypeSlice(t *testing.T) { + data := map[string][]string{ + "Value": []string{"a,b,c"}, + } + s := struct { + Value S20 + }{} + decoder := NewDecoder() + if err := decoder.Decode(&s, data); err != nil { + t.Error("Error while decoding:", err) + } + expected := S20{"a", "b", "c"} + if !reflect.DeepEqual(expected, s.Value) { + t.Errorf("Expected %v errors, got %v", expected, s.Value) + } +} diff --git a/vendor/github.com/gorilla/schema/doc.go b/vendor/github.com/gorilla/schema/doc.go new file mode 100644 index 000000000..aae9f33f9 --- /dev/null +++ b/vendor/github.com/gorilla/schema/doc.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/schema fills a struct with form values. + +The basic usage is really simple. Given this struct: + + type Person struct { + Name string + Phone string + } + +...we can fill it passing a map to the Decode() function: + + values := map[string][]string{ + "Name": {"John"}, + "Phone": {"999-999-999"}, + } + person := new(Person) + decoder := schema.NewDecoder() + decoder.Decode(person, values) + +This is just a simple example and it doesn't make a lot of sense to create +the map manually. Typically it will come from a http.Request object and +will be of type url.Values, http.Request.Form, or http.Request.MultipartForm: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + + if err != nil { + // Handle error + } + + decoder := schema.NewDecoder() + // r.PostForm is a map of our POST form values + err := decoder.Decode(person, r.PostForm) + + if err != nil { + // Handle error + } + + // Do something with person.Name or person.Phone + } + +Note: it is a good idea to set a Decoder instance as a package global, +because it caches meta-data about structs, and an instance can be shared safely: + + var decoder = schema.NewDecoder() + +To define custom names for fields, use a struct tag "schema". To not populate +certain fields, use a dash for the name and it will be ignored: + + type Person struct { + Name string `schema:"name"` // custom name + Phone string `schema:"phone"` // custom name + Admin bool `schema:"-"` // this field is never set + } + +The supported field types in the destination struct are: + + * bool + * float variants (float32, float64) + * int variants (int, int8, int16, int32, int64) + * string + * uint variants (uint, uint8, uint16, uint32, uint64) + * struct + * a pointer to one of the above types + * a slice or a pointer to a slice of one of the above types + +Non-supported types are simply ignored, however custom types can be registered +to be converted. + +To fill nested structs, keys must use a dotted notation as the "path" for the +field. So for example, to fill the struct Person below: + + type Phone struct { + Label string + Number string + } + + type Person struct { + Name string + Phone Phone + } + +...the source map must have the keys "Name", "Phone.Label" and "Phone.Number". +This means that an HTML form to fill a Person struct must look like this: + +
+ + + +
+ +Single values are filled using the first value for a key from the source map. +Slices are filled using all values for a key from the source map. So to fill +a Person with multiple Phone values, like: + + type Person struct { + Name string + Phones []Phone + } + +...an HTML form that accepts three Phone values would look like this: + +
+ + + + + + + +
+ +Notice that only for slices of structs the slice index is required. +This is needed for disambiguation: if the nested struct also had a slice +field, we could not translate multiple values to it if we did not use an +index for the parent struct. + +There's also the possibility to create a custom type that implements the +TextUnmarshaler interface, and in this case there's no need to register +a converter, like: + + type Person struct { + Emails []Email + } + + type Email struct { + *mail.Address + } + + func (e *Email) UnmarshalText(text []byte) (err error) { + e.Address, err = mail.ParseAddress(string(text)) + return + } + +...an HTML form that accepts three Email values would look like this: + +
+ + + +
+*/ +package schema diff --git a/vendor/github.com/gorilla/schema/encoder.go b/vendor/github.com/gorilla/schema/encoder.go new file mode 100644 index 000000000..bf1d511e6 --- /dev/null +++ b/vendor/github.com/gorilla/schema/encoder.go @@ -0,0 +1,195 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +type encoderFunc func(reflect.Value) string + +// Encoder encodes values from a struct into url.Values. +type Encoder struct { + cache *cache + regenc map[reflect.Type]encoderFunc +} + +// NewEncoder returns a new Encoder with defaults. +func NewEncoder() *Encoder { + return &Encoder{cache: newCache(), regenc: make(map[reflect.Type]encoderFunc)} +} + +// Encode encodes a struct into map[string][]string. +// +// Intended for use with url.Values. +func (e *Encoder) Encode(src interface{}, dst map[string][]string) error { + v := reflect.ValueOf(src) + + return e.encode(v, dst) +} + +// RegisterEncoder registers a converter for encoding a custom type. +func (e *Encoder) RegisterEncoder(value interface{}, encoder func(reflect.Value) string) { + e.regenc[reflect.TypeOf(value)] = encoder +} + +// SetAliasTag changes the tag used to locate custom field aliases. +// The default tag is "schema". +func (e *Encoder) SetAliasTag(tag string) { + e.cache.tag = tag +} + +// isValidStructPointer test if input value is a valid struct pointer. +func isValidStructPointer(v reflect.Value) bool { + return v.Type().Kind() == reflect.Ptr && v.Elem().IsValid() && v.Elem().Type().Kind() == reflect.Struct +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Func: + case reflect.Map, reflect.Slice: + return v.IsNil() || v.Len() == 0 + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + return v.Interface() == z.Interface() +} + +func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return errors.New("schema: interface must be a struct") + } + t := v.Type() + + errors := MultiError{} + + for i := 0; i < v.NumField(); i++ { + name, opts := fieldAlias(t.Field(i), e.cache.tag) + if name == "-" { + continue + } + + // Encode struct pointer types if the field is a valid pointer and a struct. + if isValidStructPointer(v.Field(i)) { + e.encode(v.Field(i).Elem(), dst) + continue + } + + encFunc := typeEncoder(v.Field(i).Type(), e.regenc) + + // Encode non-slice types and custom implementations immediately. + if encFunc != nil { + value := encFunc(v.Field(i)) + if opts.Contains("omitempty") && isZero(v.Field(i)) { + continue + } + + dst[name] = append(dst[name], value) + continue + } + + if v.Field(i).Type().Kind() == reflect.Struct { + e.encode(v.Field(i), dst) + continue + } + + if v.Field(i).Type().Kind() == reflect.Slice { + encFunc = typeEncoder(v.Field(i).Type().Elem(), e.regenc) + } + + if encFunc == nil { + errors[v.Field(i).Type().String()] = fmt.Errorf("schema: encoder not found for %v", v.Field(i)) + continue + } + + // Encode a slice. + if v.Field(i).Len() == 0 && opts.Contains("omitempty") { + continue + } + + dst[name] = []string{} + for j := 0; j < v.Field(i).Len(); j++ { + dst[name] = append(dst[name], encFunc(v.Field(i).Index(j))) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func typeEncoder(t reflect.Type, reg map[reflect.Type]encoderFunc) encoderFunc { + if f, ok := reg[t]; ok { + return f + } + + switch t.Kind() { + case reflect.Bool: + return encodeBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint + case reflect.Float32: + return encodeFloat32 + case reflect.Float64: + return encodeFloat64 + case reflect.Ptr: + f := typeEncoder(t.Elem(), reg) + return func(v reflect.Value) string { + if v.IsNil() { + return "null" + } + return f(v.Elem()) + } + case reflect.String: + return encodeString + default: + return nil + } +} + +func encodeBool(v reflect.Value) string { + return strconv.FormatBool(v.Bool()) +} + +func encodeInt(v reflect.Value) string { + return strconv.FormatInt(int64(v.Int()), 10) +} + +func encodeUint(v reflect.Value) string { + return strconv.FormatUint(uint64(v.Uint()), 10) +} + +func encodeFloat(v reflect.Value, bits int) string { + return strconv.FormatFloat(v.Float(), 'f', 6, bits) +} + +func encodeFloat32(v reflect.Value) string { + return encodeFloat(v, 32) +} + +func encodeFloat64(v reflect.Value) string { + return encodeFloat(v, 64) +} + +func encodeString(v reflect.Value) string { + return v.String() +} diff --git a/vendor/github.com/gorilla/schema/encoder_test.go b/vendor/github.com/gorilla/schema/encoder_test.go new file mode 100644 index 000000000..ac4cd6137 --- /dev/null +++ b/vendor/github.com/gorilla/schema/encoder_test.go @@ -0,0 +1,420 @@ +package schema + +import ( + "fmt" + "reflect" + "testing" +) + +type E1 struct { + F01 int `schema:"f01"` + F02 int `schema:"-"` + F03 string `schema:"f03"` + F04 string `schema:"f04,omitempty"` + F05 bool `schema:"f05"` + F06 bool `schema:"f06"` + F07 *string `schema:"f07"` + F08 *int8 `schema:"f08"` + F09 float64 `schema:"f09"` + F10 func() `schema:"f10"` + F11 inner +} +type inner struct { + F12 int +} + +func TestFilled(t *testing.T) { + f07 := "seven" + var f08 int8 = 8 + s := &E1{ + F01: 1, + F02: 2, + F03: "three", + F04: "four", + F05: true, + F06: false, + F07: &f07, + F08: &f08, + F09: 1.618, + F10: func() {}, + F11: inner{12}, + } + + vals := make(map[string][]string) + errs := NewEncoder().Encode(s, vals) + + valExists(t, "f01", "1", vals) + valNotExists(t, "f02", vals) + valExists(t, "f03", "three", vals) + valExists(t, "f05", "true", vals) + valExists(t, "f06", "false", vals) + valExists(t, "f07", "seven", vals) + valExists(t, "f08", "8", vals) + valExists(t, "f09", "1.618000", vals) + valExists(t, "F12", "12", vals) + + emptyErr := MultiError{} + if errs.Error() == emptyErr.Error() { + t.Errorf("Expected error got %v", errs) + } +} + +type Aa int + +type E3 struct { + F01 bool `schema:"f01"` + F02 float32 `schema:"f02"` + F03 float64 `schema:"f03"` + F04 int `schema:"f04"` + F05 int8 `schema:"f05"` + F06 int16 `schema:"f06"` + F07 int32 `schema:"f07"` + F08 int64 `schema:"f08"` + F09 string `schema:"f09"` + F10 uint `schema:"f10"` + F11 uint8 `schema:"f11"` + F12 uint16 `schema:"f12"` + F13 uint32 `schema:"f13"` + F14 uint64 `schema:"f14"` + F15 Aa `schema:"f15"` +} + +// Test compatibility with default decoder types. +func TestCompat(t *testing.T) { + src := &E3{ + F01: true, + F02: 4.2, + F03: 4.3, + F04: -42, + F05: -43, + F06: -44, + F07: -45, + F08: -46, + F09: "foo", + F10: 42, + F11: 43, + F12: 44, + F13: 45, + F14: 46, + F15: 1, + } + dst := &E3{} + + vals := make(map[string][]string) + encoder := NewEncoder() + decoder := NewDecoder() + + encoder.RegisterEncoder(src.F15, func(reflect.Value) string { return "1" }) + decoder.RegisterConverter(src.F15, func(string) reflect.Value { return reflect.ValueOf(1) }) + + err := encoder.Encode(src, vals) + if err != nil { + t.Errorf("Encoder has non-nil error: %v", err) + } + err = decoder.Decode(dst, vals) + if err != nil { + t.Errorf("Decoder has non-nil error: %v", err) + } + + if *src != *dst { + t.Errorf("Decoder-Encoder compatibility: expected %v, got %v\n", src, dst) + } +} + +func TestEmpty(t *testing.T) { + s := &E1{ + F01: 1, + F02: 2, + F03: "three", + } + + estr := "schema: encoder not found for " + vals := make(map[string][]string) + err := NewEncoder().Encode(s, vals) + if err.Error() != estr { + t.Errorf("Expected: %s, got %v", estr, err) + } + + valExists(t, "f03", "three", vals) + valNotExists(t, "f04", vals) +} + +func TestStruct(t *testing.T) { + estr := "schema: interface must be a struct" + vals := make(map[string][]string) + err := NewEncoder().Encode("hello world", vals) + + if err.Error() != estr { + t.Errorf("Expected: %s, got %v", estr, err) + } +} + +func TestSlices(t *testing.T) { + type oneAsWord int + ones := []oneAsWord{1, 2} + s1 := &struct { + ones []oneAsWord `schema:"ones"` + ints []int `schema:"ints"` + nonempty []int `schema:"nonempty"` + empty []int `schema:"empty,omitempty"` + }{ones, []int{1, 1}, []int{}, []int{}} + vals := make(map[string][]string) + + encoder := NewEncoder() + encoder.RegisterEncoder(ones[0], func(v reflect.Value) string { return "one" }) + err := encoder.Encode(s1, vals) + if err != nil { + t.Errorf("Encoder has non-nil error: %v", err) + } + + valsExist(t, "ones", []string{"one", "one"}, vals) + valsExist(t, "ints", []string{"1", "1"}, vals) + valsExist(t, "nonempty", []string{}, vals) + valNotExists(t, "empty", vals) +} + +func TestCompatSlices(t *testing.T) { + type oneAsWord int + type s1 struct { + Ones []oneAsWord `schema:"ones"` + Ints []int `schema:"ints"` + } + ones := []oneAsWord{1, 1} + src := &s1{ones, []int{1, 1}} + vals := make(map[string][]string) + dst := &s1{} + + encoder := NewEncoder() + encoder.RegisterEncoder(ones[0], func(v reflect.Value) string { return "one" }) + + decoder := NewDecoder() + decoder.RegisterConverter(ones[0], func(s string) reflect.Value { + if s == "one" { + return reflect.ValueOf(1) + } + return reflect.ValueOf(2) + }) + + err := encoder.Encode(src, vals) + if err != nil { + t.Errorf("Encoder has non-nil error: %v", err) + } + err = decoder.Decode(dst, vals) + if err != nil { + t.Errorf("Dncoder has non-nil error: %v", err) + } + + if len(src.Ints) != len(dst.Ints) || len(src.Ones) != len(src.Ones) { + t.Fatalf("Expected %v, got %v", src, dst) + } + + for i, v := range src.Ones { + if dst.Ones[i] != v { + t.Fatalf("Expected %v, got %v", v, dst.Ones[i]) + } + } + + for i, v := range src.Ints { + if dst.Ints[i] != v { + t.Fatalf("Expected %v, got %v", v, dst.Ints[i]) + } + } +} + +func TestRegisterEncoder(t *testing.T) { + type oneAsWord int + type twoAsWord int + type oneSliceAsWord []int + + s1 := &struct { + oneAsWord + twoAsWord + oneSliceAsWord + }{1, 2, []int{1, 1}} + v1 := make(map[string][]string) + + encoder := NewEncoder() + encoder.RegisterEncoder(s1.oneAsWord, func(v reflect.Value) string { return "one" }) + encoder.RegisterEncoder(s1.twoAsWord, func(v reflect.Value) string { return "two" }) + encoder.RegisterEncoder(s1.oneSliceAsWord, func(v reflect.Value) string { return "one" }) + + err := encoder.Encode(s1, v1) + if err != nil { + t.Errorf("Encoder has non-nil error: %v", err) + } + + valExists(t, "oneAsWord", "one", v1) + valExists(t, "twoAsWord", "two", v1) + valExists(t, "oneSliceAsWord", "one", v1) +} + +func TestEncoderOrder(t *testing.T) { + type builtinEncoderSimple int + type builtinEncoderSimpleOverridden int + type builtinEncoderSlice []int + type builtinEncoderSliceOverridden []int + type builtinEncoderStruct struct{ nr int } + type builtinEncoderStructOverridden struct{ nr int } + + s1 := &struct { + builtinEncoderSimple `schema:"simple"` + builtinEncoderSimpleOverridden `schema:"simple_overridden"` + builtinEncoderSlice `schema:"slice"` + builtinEncoderSliceOverridden `schema:"slice_overridden"` + builtinEncoderStruct `schema:"struct"` + builtinEncoderStructOverridden `schema:"struct_overridden"` + }{ + 1, + 1, + []int{2}, + []int{2}, + builtinEncoderStruct{3}, + builtinEncoderStructOverridden{3}, + } + v1 := make(map[string][]string) + + encoder := NewEncoder() + encoder.RegisterEncoder(s1.builtinEncoderSimpleOverridden, func(v reflect.Value) string { return "one" }) + encoder.RegisterEncoder(s1.builtinEncoderSliceOverridden, func(v reflect.Value) string { return "two" }) + encoder.RegisterEncoder(s1.builtinEncoderStructOverridden, func(v reflect.Value) string { return "three" }) + + err := encoder.Encode(s1, v1) + if err != nil { + t.Errorf("Encoder has non-nil error: %v", err) + } + + valExists(t, "simple", "1", v1) + valExists(t, "simple_overridden", "one", v1) + valExists(t, "slice", "2", v1) + valExists(t, "slice_overridden", "two", v1) + valExists(t, "nr", "3", v1) + valExists(t, "struct_overridden", "three", v1) +} + +func valExists(t *testing.T, key string, expect string, result map[string][]string) { + valsExist(t, key, []string{expect}, result) +} + +func valsExist(t *testing.T, key string, expect []string, result map[string][]string) { + vals, ok := result[key] + if !ok { + t.Fatalf("Key not found. Expected: %s", key) + } + + if len(expect) != len(vals) { + t.Fatalf("Expected: %v, got: %v", expect, vals) + } + + for i, v := range expect { + if vals[i] != v { + t.Fatalf("Unexpected value. Expected: %v, got %v", v, vals[i]) + } + } +} + +func valNotExists(t *testing.T, key string, result map[string][]string) { + if val, ok := result[key]; ok { + t.Error("Key not ommited. Expected: empty; got: " + val[0] + ".") + } +} + +type E4 struct { + ID string `json:"id"` +} + +func TestEncoderSetAliasTag(t *testing.T) { + data := map[string][]string{} + + s := E4{ + ID: "foo", + } + encoder := NewEncoder() + encoder.SetAliasTag("json") + encoder.Encode(&s, data) + valExists(t, "id", "foo", data) +} + +type E5 struct { + F01 int `schema:"f01,omitempty"` + F02 string `schema:"f02,omitempty"` + F03 *string `schema:"f03,omitempty"` + F04 *int8 `schema:"f04,omitempty"` + F05 float64 `schema:"f05,omitempty"` + F06 E5F06 `schema:"f06,omitempty"` + F07 E5F06 `schema:"f07,omitempty"` + F08 []string `schema:"f08,omitempty"` + F09 []string `schema:"f09,omitempty"` +} + +type E5F06 struct { + F0601 string `schema:"f0601,omitempty"` +} + +func TestEncoderWithOmitempty(t *testing.T) { + vals := map[string][]string{} + + s := E5{ + F02: "test", + F07: E5F06{ + F0601: "test", + }, + F09: []string{"test"}, + } + + encoder := NewEncoder() + encoder.Encode(&s, vals) + + valNotExists(t, "f01", vals) + valExists(t, "f02", "test", vals) + valNotExists(t, "f03", vals) + valNotExists(t, "f04", vals) + valNotExists(t, "f05", vals) + valNotExists(t, "f06", vals) + valExists(t, "f0601", "test", vals) + valNotExists(t, "f08", vals) + valsExist(t, "f09", []string{"test"}, vals) +} + +type E6 struct { + F01 *inner + F02 *inner + F03 *inner `schema:",omitempty"` +} + +func TestStructPointer(t *testing.T) { + vals := map[string][]string{} + s := E6{ + F01: &inner{2}, + } + + encoder := NewEncoder() + encoder.Encode(&s, vals) + valExists(t, "F12", "2", vals) + valExists(t, "F02", "null", vals) + valNotExists(t, "F03", vals) +} + +func TestRegisterEncoderCustomArrayType(t *testing.T) { + type CustomInt []int + type S1 struct { + SomeInts CustomInt `schema:",omitempty"` + } + + ss := []S1{ + {}, + {CustomInt{}}, + {CustomInt{1, 2, 3}}, + } + + for s := range ss { + vals := map[string][]string{} + + encoder := NewEncoder() + encoder.RegisterEncoder(CustomInt{}, func(value reflect.Value) string { + return fmt.Sprint(value.Interface()) + }) + + encoder.Encode(s, vals) + t.Log(vals) + } +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index cd3569d53..b051e9591 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -1051,8 +1051,9 @@ func (c *Conn) CloseHandler() func(code int, text string) error { // if the close message is empty. The default close handler sends a close // message back to the peer. // -// The application must read the connection to process close messages as -// described in the section on Control Messages above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. // // The connection read methods return a CloseError when a close message is // received. Most applications should handle close messages as part of their @@ -1079,8 +1080,9 @@ func (c *Conn) PingHandler() func(appData string) error { // The appData argument to h is the PING message application data. The default // ping handler sends a pong to the peer. // -// The application must read the connection to process ping messages as -// described in the section on Control Messages above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { @@ -1105,8 +1107,9 @@ func (c *Conn) PongHandler() func(appData string) error { // The appData argument to h is the PONG message application data. The default // pong handler does nothing. // -// The application must read the connection to process ping messages as -// described in the section on Control Messages above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go index c7172c406..e5e6e57f2 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -338,6 +338,11 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { if !n.isLeaf() { return nil, nil } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf // Remove the leaf node nc := t.writeNode(n, true) @@ -347,7 +352,7 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { if n != t.root && len(nc.edges) == 1 { t.mergeChild(nc) } - return nc, n.leaf + return nc, oldLeaf } // Look for an edge diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go index bc9c77c20..326299de8 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go @@ -173,7 +173,7 @@ func TestRoot(t *testing.T) { } val, ok := r.Get(nil) if !ok || val != true { - t.Fatalf("bad: %v %#v", val) + t.Fatalf("bad: %#v", val) } r, val, ok = r.Delete(nil) if !ok || val != true { @@ -1494,3 +1494,38 @@ func TestTrackMutate_cachedNodeChange(t *testing.T) { } } } + +func TestLenTxn(t *testing.T) { + r := New() + + if r.Len() != 0 { + t.Fatalf("not starting with empty tree") + } + + txn := r.Txn() + keys := []string{ + "foo/bar/baz", + "foo/baz/bar", + "foo/zip/zap", + "foobar", + "nochange", + } + for _, k := range keys { + txn.Insert([]byte(k), nil) + } + r = txn.Commit() + + if r.Len() != len(keys) { + t.Fatalf("bad: expected %d, got %d", len(keys), r.Len()) + } + + txn = r.Txn() + for _, k := range keys { + txn.Delete([]byte(k)) + } + r = txn.Commit() + + if r.Len() != 0 { + t.Fatalf("tree len should be zero, got %d", r.Len()) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go index 4b4e63808..90b6576f2 100644 --- a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go @@ -16,7 +16,7 @@ var ( // Centralize all regexps and regexp.Copy() where necessary. signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) - ifNameRE *regexp.Regexp = regexp.MustCompile(`^Ethernet adapter ([^\s:]+):`) + ifNameRE *regexp.Regexp = regexp.MustCompile(`^Ethernet adapter ([^:]+):`) ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) ) diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go index 337d96329..e474cd075 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -30,9 +30,9 @@ type TwoQueueCache struct { size int recentSize int - recent *simplelru.LRU - frequent *simplelru.LRU - recentEvict *simplelru.LRU + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache lock sync.RWMutex } @@ -84,7 +84,8 @@ func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCa return c, nil } -func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -105,6 +106,7 @@ func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { return nil, false } +// Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -160,12 +162,15 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { c.frequent.RemoveOldest() } +// Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() @@ -174,6 +179,7 @@ func (c *TwoQueueCache) Keys() []interface{} { return append(k1, k2...) } +// Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -188,6 +194,7 @@ func (c *TwoQueueCache) Remove(key interface{}) { } } +// Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() @@ -196,13 +203,17 @@ func (c *TwoQueueCache) Purge() { c.recentEvict.Purge() } +// Contains is used to check if the cache contains a key +// without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } -func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go index a2a252817..555225a21 100644 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -18,11 +18,11 @@ type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 - t1 *simplelru.LRU // T1 is the LRU for recently accessed items - b1 *simplelru.LRU // B1 is the LRU for evictions from t1 + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - t2 *simplelru.LRU // T2 is the LRU for frequently accessed items - b2 *simplelru.LRU // B2 is the LRU for evictions from t2 + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } @@ -60,11 +60,11 @@ func NewARC(size int) (*ARCCache, error) { } // Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (interface{}, bool) { +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() - // Ff the value is contained in T1 (recent), then + // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) @@ -153,7 +153,7 @@ func (c *ARCCache) Add(key, value interface{}) { // Remove from B2 c.b2.Remove(key) - // Add the key to the frequntly used list + // Add the key to the frequently used list c.t2.Add(key, value) return } @@ -247,7 +247,7 @@ func (c *ARCCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index a6285f989..c8d9b0a23 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -1,6 +1,3 @@ -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru package lru import ( @@ -11,11 +8,11 @@ import ( // Cache is a thread-safe fixed size LRU cache. type Cache struct { - lru *simplelru.LRU + lru simplelru.LRUCache lock sync.RWMutex } -// New creates an LRU of the given size +// New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } @@ -33,7 +30,7 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() @@ -41,30 +38,30 @@ func (c *Cache) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) bool { +func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() defer c.lock.Unlock() return c.lru.Add(key, value) } // Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (interface{}, bool) { +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() return c.lru.Get(key) } -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.lru.Contains(key) } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (interface{}, bool) { +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() return c.lru.Peek(key) @@ -73,16 +70,15 @@ func (c *Cache) Peek(key interface{}) (interface{}, bool) { // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false - } else { - evict := c.lru.Add(key, value) - return false, evict } + evicted = c.lru.Add(key, value) + return false, evicted } // Remove removes the provided key from the cache. diff --git a/vendor/github.com/hashicorp/golang-lru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/lru_test.go index 2b31218b0..e7e23505e 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru_test.go +++ b/vendor/github.com/hashicorp/golang-lru/lru_test.go @@ -72,7 +72,7 @@ func TestLRU(t *testing.T) { if k != v { t.Fatalf("Evict values not equal (%v!=%v)", k, v) } - evictCounter += 1 + evictCounter++ } l, err := NewWithEvict(128, onEvicted) if err != nil { @@ -136,7 +136,7 @@ func TestLRU(t *testing.T) { func TestLRUAdd(t *testing.T) { evictCounter := 0 onEvicted := func(k interface{}, v interface{}) { - evictCounter += 1 + evictCounter++ } l, err := NewWithEvict(1, onEvicted) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index cb416b394..5673773b2 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -36,7 +36,7 @@ func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { @@ -48,7 +48,7 @@ func (c *LRU) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) bool { +func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) @@ -78,17 +78,18 @@ func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { return } -// Check if a key is in the cache, without updating the recent-ness +// Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { + var ent *list.Element + if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok @@ -96,7 +97,7 @@ func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { // Remove removes the provided key from the cache, returning if the // key was contained. -func (c *LRU) Remove(key interface{}) bool { +func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true @@ -105,7 +106,7 @@ func (c *LRU) Remove(key interface{}) bool { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -116,7 +117,7 @@ func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (interface{}, interface{}, bool) { +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..744cac01c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,37 @@ +package simplelru + + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go index a958934f6..ca5676e1e 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go @@ -8,7 +8,7 @@ func TestLRU(t *testing.T) { if k != v { t.Fatalf("Evict values not equal (%v!=%v)", k, v) } - evictCounter += 1 + evictCounter++ } l, err := NewLRU(128, onEvicted) if err != nil { @@ -112,7 +112,7 @@ func TestLRU_GetOldest_RemoveOldest(t *testing.T) { func TestLRU_Add(t *testing.T) { evictCounter := 0 onEvicted := func(k interface{}, v interface{}) { - evictCounter += 1 + evictCounter++ } l, err := NewLRU(1, onEvicted) diff --git a/vendor/github.com/hashicorp/memberlist/README.md b/vendor/github.com/hashicorp/memberlist/README.md index 0adc075e8..e6fed4e64 100644 --- a/vendor/github.com/hashicorp/memberlist/README.md +++ b/vendor/github.com/hashicorp/memberlist/README.md @@ -65,7 +65,7 @@ For complete documentation, see the associated [Godoc](http://godoc.org/github.c ## Protocol -memberlist is based on ["SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol"](http://www.cs.cornell.edu/~asdas/research/dsn02-swim.pdf). However, we extend the protocol in a number of ways: +memberlist is based on ["SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol"](http://ieeexplore.ieee.org/document/1028914/). However, we extend the protocol in a number of ways: * Several extensions are made to increase propagation speed and convergence rate. diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml index 4e34e8857..79c59a81d 100644 --- a/vendor/github.com/lib/pq/.travis.yml +++ b/vendor/github.com/lib/pq/.travis.yml @@ -1,7 +1,6 @@ language: go go: - - 1.5.x - 1.6.x - 1.7.x - 1.8.x diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index b0d98d81d..adefa17e8 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,10 +1,29 @@ ## Changelog +### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 + + * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. + + See PR for an example. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 + + * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value + + Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail + with a `circular reference error`. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + ### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces + * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - Thanks to @mgurov for the fix. + + Thanks to [@mgurov](https://github.com/mgurov) for the fix. ### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE index 7eab43b6b..b387087c5 100644 --- a/vendor/github.com/magiconair/properties/LICENSE +++ b/vendor/github.com/magiconair/properties/LICENSE @@ -1,6 +1,6 @@ goproperties - properties file decoder for Go -Copyright (c) 2013-2014 - Frank Schroeder +Copyright (c) 2013-2018 - Frank Schroeder All rights reserved. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 258fbb2b5..15dfde5cb 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,7 +1,11 @@ -Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties) -======== +[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) +[![Build Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square)](https://travis-ci.org/magiconair/properties) +[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) +[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) -#### Current version: 1.7.4 +# Overview + +#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. properties is a Go library for reading and writing properties files. @@ -27,8 +31,7 @@ details. Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties) -Getting Started ---------------- +## Getting Started ```go import ( @@ -83,18 +86,43 @@ func main() { ``` -Installation and Upgrade ------------------------- +## Installation and Upgrade ``` $ go get -u github.com/magiconair/properties ``` -License -------- +## License 2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. -ToDo ----- +## ToDo + * Dump contents with passwords and secrets obscured + +## Updated Git tags + +#### 13 Feb 2018 + +I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags +and I've only recently learned that this doesn't play well with `git describe` 😞 + +I have replaced all lightweight tags with signed tags using this script which should +retain the commit date, name and email address. Please run `git pull --tags` to update them. + +Worst case you have to reclone the repo. + +```shell +#!/bin/bash +tag=$1 +echo "Updating $tag" +date=$(git show ${tag}^0 --format=%aD | head -1) +email=$(git show ${tag}^0 --format=%aE | head -1) +name=$(git show ${tag}^0 --format=%aN | head -1) +GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} +``` + +I apologize for the inconvenience. + +Frank + diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index 278cc2ea0..9c83fd638 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -218,7 +218,7 @@ func must(p *Properties, err error) *Properties { // with an empty string. Malformed expressions like "${ENV_VAR" will // be reported as error. func expandName(name string) (string, error) { - return expand(name, make(map[string]bool), "${", "}", make(map[string]string)) + return expand(name, []string{}, "${", "}", make(map[string]string)) } // Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index 85bb18618..53f5b2ff0 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -19,6 +19,8 @@ import ( "unicode/utf8" ) +const maxExpansionDepth = 64 + // ErrorHandlerFunc defines the type of function which handles failures // of the MustXXX() functions. An error handler function must exit // the application after handling the error. @@ -92,7 +94,7 @@ func (p *Properties) Get(key string) (value string, ok bool) { return "", false } - expanded, err := p.expand(v) + expanded, err := p.expand(key, v) // we guarantee that the expanded value is free of // circular references and malformed expressions @@ -525,7 +527,7 @@ func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { p.m[key] = value // now check for a circular reference - _, err = p.expand(value) + _, err = p.expand(key, value) if err != nil { // revert to the previous state @@ -696,56 +698,65 @@ outer: // check expands all values and returns an error if a circular reference or // a malformed expression was found. func (p *Properties) check() error { - for _, value := range p.m { - if _, err := p.expand(value); err != nil { + for key, value := range p.m { + if _, err := p.expand(key, value); err != nil { return err } } return nil } -func (p *Properties) expand(input string) (string, error) { +func (p *Properties) expand(key, input string) (string, error) { // no pre/postfix -> nothing to expand if p.Prefix == "" && p.Postfix == "" { return input, nil } - return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m) + return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) } // expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. // The function keeps track of the keys that were already expanded and stops if it // detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") +func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { + if len(keys) > maxExpansionDepth { + return "", fmt.Errorf("expansion too deep") } - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] + for { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } - if _, ok := keys[key]; ok { - return "", fmt.Errorf("circular reference") - } + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - // remember that we've seen the key - keys[key] = true + for _, k := range keys { + if key == k { + return "", fmt.Errorf("circular reference") + } + } - return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values) + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + new_val, err := expand(val, append(keys, key), prefix, postfix, values) + if err != nil { + return "", err + } + s = s[:start] + new_val + s[end+1:] + } + return s, nil } // encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. diff --git a/vendor/github.com/magiconair/properties/properties_test.go b/vendor/github.com/magiconair/properties/properties_test.go index a22f48df2..6401c77e2 100644 --- a/vendor/github.com/magiconair/properties/properties_test.go +++ b/vendor/github.com/magiconair/properties/properties_test.go @@ -90,6 +90,10 @@ var complexTests = [][]string{ {"key=value\nkey2=${key}bb", "key", "value", "key2", "valuebb"}, {"key=value\nkey2=aa${key}bb", "key", "value", "key2", "aavaluebb"}, {"key=value\nkey2=${key}\nkey3=${key2}", "key", "value", "key2", "value", "key3", "value"}, + {"key=value\nkey2=${key}${key}", "key", "value", "key2", "valuevalue"}, + {"key=value\nkey2=${key}${key}${key}${key}", "key", "value", "key2", "valuevaluevaluevalue"}, + {"key=value\nkey2=${key}${key3}\nkey3=${key}", "key", "value", "key2", "valuevalue", "key3", "value"}, + {"key=value\nkey2=${key3}${key}${key4}\nkey3=${key}\nkey4=${key}", "key", "value", "key2", "valuevaluevalue", "key3", "value", "key4", "value"}, {"key=${USER}", "key", os.Getenv("USER")}, {"key=${USER}\nUSER=value", "key", "value", "USER", "value"}, } @@ -446,6 +450,30 @@ func TestErrors(t *testing.T) { } } +func TestVeryDeep(t *testing.T) { + input := "key0=value\n" + prefix := "${" + postfix := "}" + i := 0 + for i = 0; i < maxExpansionDepth-1; i++ { + input += fmt.Sprintf("key%d=%skey%d%s\n", i+1, prefix, i, postfix) + } + + p, err := Load([]byte(input), ISO_8859_1) + assert.Equal(t, err, nil) + p.Prefix = prefix + p.Postfix = postfix + + assert.Equal(t, p.MustGet(fmt.Sprintf("key%d", i)), "value") + + // Nudge input over the edge + input += fmt.Sprintf("key%d=%skey%d%s\n", i+1, prefix, i, postfix) + + _, err = Load([]byte(input), ISO_8859_1) + assert.Equal(t, err != nil, true, "want error") + assert.Equal(t, strings.Contains(err.Error(), "expansion too deep"), true) +} + func TestDisableExpansion(t *testing.T) { input := "key=value\nkey2=${key}" p := mustParse(t, input) diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go index 81314e3b4..88b60d604 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -476,8 +476,8 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { // Single source object case (i.e. when only one source is // involved, it is being copied wholly and at most 5GiB in - // size). - if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize { + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { h := srcs[0].Headers // Add destination encryption headers for k, v := range dst.encryption.getSSEHeaders(false) { diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 681853849..fa8595bcd 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -81,12 +81,25 @@ type Client struct { // Random seed. random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Region string + BucketLookup BucketLookupType + // Add future fields here } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "4.0.6" + libraryVersion = "4.0.7" ) // User Agent should always following the below style. @@ -98,11 +111,21 @@ const ( libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion ) +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + // NewV2 - instantiate minio client with Amazon S3 signature version // '2' compatibility. func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -114,7 +137,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (* // '4' compatibility. func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -125,7 +148,7 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (* // New - instantiate minio client, adds automatic verification of signature. func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -144,7 +167,7 @@ func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, e // for retrieving credentials from various credentials provider such as // IAM, File, Env etc. func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { - return privateNew(endpoint, creds, secure, region) + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) } // NewWithRegion - instantiate minio client, with region configured. Unlike New(), @@ -152,7 +175,12 @@ func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure // Use this function when if your application deals with single region. func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - return privateNew(endpoint, creds, secure, region) + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithOptions - instantiate minio client with options +func NewWithOptions(endpoint string, opts *Options) (*Client, error) { + return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) } // lockedRandSource provides protected rand source, implements rand.Source interface. @@ -239,7 +267,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { return nil } -func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { @@ -276,6 +304,9 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Introduce a new locked random seed. clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = lookup // Return. return clnt, nil } @@ -824,8 +855,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // endpoint URL. if bucketName != "" { // Save if target url will have buckets which suppport virtual host. - isVirtualHostStyle := s3utils.IsVirtualHostSupported(*c.endpointURL, bucketName) - + isVirtualHostStyle := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support // virtual host style. @@ -850,3 +880,16 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que return url.Parse(urlStr) } + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md index 33b8c5891..cc59375f6 100644 --- a/vendor/github.com/minio/minio-go/docs/API.md +++ b/vendor/github.com/minio/minio-go/docs/API.md @@ -86,16 +86,27 @@ __Parameters__ ### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error) Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region. +### NewWithOptions(endpoint string, options *Options) (*Client, error) +Initializes minio client with options configured. + __Parameters__ |Param |Type |Description | |:---|:---| :---| |`endpoint` | _string_ |S3 compatible object storage endpoint | -|`accessKeyID` |_string_ |Access key for the object storage | -|`secretAccessKey` | _string_ |Secret key for the object storage | -|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | -|`region`| _string_ | Region for the object storage | +|`opts` |_minio.Options_ | Options for constructing a new client| +__minio.Options__ + +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.Creds` | _*credentials.Credentials_ | Access Credentials| +| `opts.Secure` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | +| `opts.Region` | _string_ | region | +| `opts.BucketLookup` | _BucketLookupType_ | Bucket lookup type can be one of the following values | +| | | _minio.BucketLookupDNS_ | +| | | _minio.BucketLookupPath_ | +| | | _minio.BucketLookupAuto_ | ## 2. Bucket operations diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md index 7ecc785e4..0018dc7d9 100644 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -1,4 +1,4 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchell/mapstructure?status.svg)](https://godoc.org/github.com/mitchell/mapstructure) +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) mapstructure is a Go library for decoding generic map values to structures and vice versa, while providing helpful error handling. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index afcfd5eed..2a727575a 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -115,6 +115,25 @@ func StringToTimeDurationHookFunc() DecodeHookFunc { } } +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + // WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to // the decoder. // diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go index 53289afcf..a81728e74 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -153,6 +153,36 @@ func TestStringToTimeDurationHookFunc(t *testing.T) { } } +func TestStringToTimeHookFunc(t *testing.T) { + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Time{}) + cases := []struct { + f, t reflect.Type + layout string + data interface{} + result interface{} + err bool + }{ + {strType, timeType, time.RFC3339, "2006-01-02T15:04:05Z", + time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), false}, + {strType, timeType, time.RFC3339, "5", time.Time{}, true}, + {strType, strType, time.RFC3339, "5", "5", false}, + } + + for i, tc := range cases { + f := StringToTimeHookFunc(tc.layout) + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + func TestWeaklyTypedHook(t *testing.T) { var f DecodeHookFunc = WeaklyTypedHook diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 39ec1e943..65977b654 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -114,12 +114,12 @@ type Metadata struct { Unused []string } -// Decode takes a map and uses reflection to convert it into the -// given Go native structure. val must be a pointer to a struct. -func Decode(m interface{}, rawVal interface{}) error { +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { config := &DecoderConfig{ Metadata: nil, - Result: rawVal, + Result: output, } decoder, err := NewDecoder(config) @@ -127,7 +127,7 @@ func Decode(m interface{}, rawVal interface{}) error { return err } - return decoder.Decode(m) + return decoder.Decode(input) } // WeakDecode is the same as Decode but is shorthand to enable @@ -147,6 +147,40 @@ func WeakDecode(input, output interface{}) error { return decoder.Decode(input) } +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + // NewDecoder returns a new decoder for the given configuration. Once // a decoder has been returned, the same configuration must not be used // again. @@ -184,70 +218,70 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. -func (d *Decoder) Decode(raw interface{}) error { - return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) } // Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { - if data == nil { - // If the data is nil, then we don't set anything. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + if input == nil { + // If the input is nil, then we don't set anything. return nil } - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - // If the data value is invalid, then we just set the value + inputVal := reflect.ValueOf(input) + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value // to be the zero value. - val.Set(reflect.Zero(val.Type())) + outVal.Set(reflect.Zero(outVal.Type())) return nil } if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the data. + // We have a DecodeHook, so let's pre-process the input. var err error - data, err = DecodeHookExec( + input, err = DecodeHookExec( d.config.DecodeHook, - dataVal.Type(), val.Type(), data) + inputVal.Type(), outVal.Type(), input) if err != nil { return fmt.Errorf("error decoding '%s': %s", name, err) } } var err error - dataKind := getKind(val) - switch dataKind { + inputKind := getKind(outVal) + switch inputKind { case reflect.Bool: - err = d.decodeBool(name, data, val) + err = d.decodeBool(name, input, outVal) case reflect.Interface: - err = d.decodeBasic(name, data, val) + err = d.decodeBasic(name, input, outVal) case reflect.String: - err = d.decodeString(name, data, val) + err = d.decodeString(name, input, outVal) case reflect.Int: - err = d.decodeInt(name, data, val) + err = d.decodeInt(name, input, outVal) case reflect.Uint: - err = d.decodeUint(name, data, val) + err = d.decodeUint(name, input, outVal) case reflect.Float32: - err = d.decodeFloat(name, data, val) + err = d.decodeFloat(name, input, outVal) case reflect.Struct: - err = d.decodeStruct(name, data, val) + err = d.decodeStruct(name, input, outVal) case reflect.Map: - err = d.decodeMap(name, data, val) + err = d.decodeMap(name, input, outVal) case reflect.Ptr: - err = d.decodePtr(name, data, val) + err = d.decodePtr(name, input, outVal) case reflect.Slice: - err = d.decodeSlice(name, data, val) + err = d.decodeSlice(name, input, outVal) case reflect.Array: - err = d.decodeArray(name, data, val) + err = d.decodeArray(name, input, outVal) case reflect.Func: - err = d.decodeFunc(name, data, val) + err = d.decodeFunc(name, input, outVal) default: // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + return fmt.Errorf("%s: unsupported type: %s", name, inputKind) } // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metadata. + // mark the key as used if we're tracking metainput. if d.config.Metadata != nil && name != "" { d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } @@ -258,6 +292,9 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error // This decodes a basic type (bool, int, string, etc.) and sets the // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } dataVal := reflect.ValueOf(data) if !dataVal.IsValid() { dataVal = reflect.Zero(val.Type()) @@ -499,34 +536,50 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er valMap = reflect.MakeMap(mapType) } - // Check input type + // Check input type and based on the input type jump to the proper func dataVal := reflect.Indirect(reflect.ValueOf(data)) - if dataVal.Kind() != reflect.Map { - // In weak mode, we accept a slice of maps as an input... - if d.config.WeaklyTypedInput { - switch dataVal.Kind() { - case reflect.Array, reflect.Slice: - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - fmt.Sprintf("%s[%d]", name, i), - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) - return nil - } + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) } + fallthrough + + default: return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() // Accumulate errors errors := make([]string, 0) @@ -563,22 +616,88 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er return nil } +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + // Determine the name of the key in the map + keyName := f.Name + tagValue := f.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + if tagValue == "-" { + continue + } + + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } - val.Set(realVal) + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } return nil } @@ -614,7 +733,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) val.Set(reflect.MakeSlice(sliceType, 0, 0)) return nil } - + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) // All other types we try to convert to the slice type // and "lift" it into it. i.e. a string becomes a string slice. default: @@ -622,7 +742,6 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) return d.decodeSlice(name, []interface{}{data}, val) } } - return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go index 89861edda..64b122a9d 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -128,6 +128,7 @@ type TypeConversionResult struct { FloatToBool bool FloatToString string SliceUint8ToString string + StringToSliceUint8 []byte ArrayUint8ToString string StringToInt int StringToUint uint @@ -248,6 +249,32 @@ func TestBasic_Merge(t *testing.T) { } } +// Test for issue #46. +func TestBasic_Struct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vdata": map[string]interface{}{ + "vstring": "foo", + }, + } + + var result, inner Basic + result.Vdata = &inner + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + expected := Basic{ + Vdata: &Basic{ + Vstring: "foo", + }, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + func TestDecode_BasicSquash(t *testing.T) { t.Parallel() @@ -626,6 +653,7 @@ func TestDecode_TypeConversion(t *testing.T) { "FloatToBool": 42.42, "FloatToString": 42.42, "SliceUint8ToString": []uint8("foo"), + "StringToSliceUint8": "foo", "ArrayUint8ToString": [3]uint8{'f', 'o', 'o'}, "StringToInt": "42", "StringToUint": "42", @@ -671,6 +699,7 @@ func TestDecode_TypeConversion(t *testing.T) { FloatToBool: true, FloatToString: "42.42", SliceUint8ToString: "foo", + StringToSliceUint8: []byte("foo"), ArrayUint8ToString: "foo", StringToInt: 42, StringToUint: 42, @@ -926,6 +955,56 @@ func TestNestedTypePointer(t *testing.T) { } } +// Test for issue #46. +func TestNestedTypeInterface(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + + "vdata": map[string]interface{}{ + "vstring": "bar", + }, + }, + } + + var result NestedPointer + result.Vbar = new(Basic) + result.Vbar.Vdata = new(Basic) + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + if result.Vbar.Vdata.(*Basic).Vstring != "bar" { + t.Errorf("vstring value should be 'bar': %#v", result.Vbar.Vdata.(*Basic).Vstring) + } +} + func TestSlice(t *testing.T) { t.Parallel() @@ -1112,6 +1191,197 @@ func TestArrayToMap(t *testing.T) { } } +func TestMapOutputForStructuredInputs(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in interface{} + target interface{} + out interface{} + wantErr bool + }{ + { + "basic struct input", + &Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vstring": "vstring", + "Vint": 2, + "Vuint": uint(3), + "Vbool": true, + "Vfloat": 4.56, + "Vextra": "vextra", + "Vdata": []byte("data"), + "VjsonInt": 0, + "VjsonFloat": 0.0, + "VjsonNumber": json.Number(""), + }, + false, + }, + { + "embedded struct input", + &Embedded{ + Vunique: "vunique", + Basic: Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vunique": "vunique", + "Basic": map[string]interface{}{ + "Vstring": "vstring", + "Vint": 2, + "Vuint": uint(3), + "Vbool": true, + "Vfloat": 4.56, + "Vextra": "vextra", + "Vdata": []byte("data"), + "VjsonInt": 0, + "VjsonFloat": 0.0, + "VjsonNumber": json.Number(""), + }, + }, + false, + }, + { + "slice input - should error", + []string{"foo", "bar"}, + &map[string]interface{}{}, + &map[string]interface{}{}, + true, + }, + { + "struct with slice property", + &Slice{ + Vfoo: "vfoo", + Vbar: []string{"foo", "bar"}, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vfoo": "vfoo", + "Vbar": []string{"foo", "bar"}, + }, + false, + }, + { + "struct with slice of struct property", + &SliceOfStruct{ + Value: []Basic{ + Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Value": []Basic{ + Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + }, + false, + }, + { + "struct with map property", + &Map{ + Vfoo: "vfoo", + Vother: map[string]string{"vother": "vother"}, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vfoo": "vfoo", + "Vother": map[string]string{ + "vother": "vother", + }}, + false, + }, + { + "tagged struct", + &Tagged{ + Extra: "extra", + Value: "value", + }, + &map[string]string{}, + &map[string]string{ + "bar": "extra", + "foo": "value", + }, + false, + }, + { + "omit tag struct", + &struct { + Value string `mapstructure:"value"` + Omit string `mapstructure:"-"` + }{ + Value: "value", + Omit: "omit", + }, + &map[string]string{}, + &map[string]string{ + "value": "value", + }, + false, + }, + { + "decode to wrong map type", + &struct { + Value string + }{ + Value: "string", + }, + &map[string]int{}, + &map[string]int{}, + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Decode(tt.in, tt.target); (err != nil) != tt.wantErr { + t.Fatalf("%q: TestMapOutputForStructuredInputs() unexpected error: %s", tt.name, err) + } + + if !reflect.DeepEqual(tt.out, tt.target) { + t.Fatalf("%q: TestMapOutputForStructuredInputs() expected: %#v, got: %#v", tt.name, tt.out, tt.target) + } + }) + } +} + func TestInvalidType(t *testing.T) { t.Parallel() @@ -1171,6 +1441,39 @@ func TestInvalidType(t *testing.T) { } } +func TestDecodeMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + + err := DecodeMetadata(input, &result, &md) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + func TestMetadata(t *testing.T) { t.Parallel() @@ -1311,6 +1614,43 @@ func TestWeakDecode(t *testing.T) { } } +func TestWeakDecodeMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + "unused": "value", + } + + var md Metadata + var result struct { + Foo int + Bar string + } + + if err := WeakDecodeMetadata(input, &result, &md); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } + + expectedKeys := []string{"Bar", "Foo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"unused"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { var result Slice err := Decode(input, &result) diff --git a/vendor/github.com/olivere/elastic/.travis.yml b/vendor/github.com/olivere/elastic/.travis.yml index b4322c13c..9658f873a 100644 --- a/vendor/github.com/olivere/elastic/.travis.yml +++ b/vendor/github.com/olivere/elastic/.travis.yml @@ -12,4 +12,4 @@ services: - docker before_install: - sudo sysctl -w vm.max_map_count=262144 - - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.1.2 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ + - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.1 elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ diff --git a/vendor/github.com/olivere/elastic/CONTRIBUTORS b/vendor/github.com/olivere/elastic/CONTRIBUTORS index d7f7f780f..ba06dac29 100644 --- a/vendor/github.com/olivere/elastic/CONTRIBUTORS +++ b/vendor/github.com/olivere/elastic/CONTRIBUTORS @@ -68,9 +68,11 @@ Joe Buck [@four2five](https://github.com/four2five) John Barker [@j16r](https://github.com/j16r) John Goodall [@jgoodall](https://github.com/jgoodall) John Stanford [@jxstanford](https://github.com/jxstanford) +Jonas Groenaas Drange [@semafor](https://github.com/semafor) Josh Chorlton [@jchorl](https://github.com/jchorl) jun [@coseyo](https://github.com/coseyo) Junpei Tsuji [@jun06t](https://github.com/jun06t) +kartlee [@kartlee](https://github.com/kartlee) Keith Hatton [@khatton-ft](https://github.com/khatton-ft) kel [@liketic](https://github.com/liketic) Kenta SUZUKI [@suzuken](https://github.com/suzuken) @@ -98,10 +100,13 @@ Orne Brocaar [@brocaar](https://github.com/brocaar) Paul [@eyeamera](https://github.com/eyeamera) Pete C [@peteclark-ft](https://github.com/peteclark-ft) Radoslaw Wesolowski [r--w](https://github.com/r--w) +Roman Colohanin [@zuzmic](https://github.com/zuzmic) Ryan Schmukler [@rschmukler](https://github.com/rschmukler) +Ryan Wynn [@rwynn](https://github.com/rwynn) Sacheendra talluri [@sacheendra](https://github.com/sacheendra) Sean DuBois [@Sean-Der](https://github.com/Sean-Der) Shalin LK [@shalinlk](https://github.com/shalinlk) +singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) Stuart Warren [@Woz](https://github.com/stuart-warren) Sulaiman [@salajlan](https://github.com/salajlan) @@ -111,13 +116,13 @@ Take [ww24](https://github.com/ww24) Tetsuya Morimoto [@t2y](https://github.com/t2y) TimeEmit [@TimeEmit](https://github.com/timeemit) TusharM [@tusharm](https://github.com/tusharm) -zhangxin [@visaxin](https://github.com/visaxin) wangtuo [@wangtuo](https://github.com/wangtuo) Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri) wolfkdy [@wolfkdy](https://github.com/wolfkdy) Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb) Yarden Bar [@ayashjorden](https://github.com/ayashjorden) zakthomas [@zakthomas](https://github.com/zakthomas) -singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) +Yuya Kusakabe [@higebu](https://github.com/higebu) +Zach [@snowzach](https://github.com/snowzach) +zhangxin [@visaxin](https://github.com/visaxin) @æž— [@zplzpl](https://github.com/zplzpl) -Roman Colohanin [@zuzmic](https://github.com/zuzmic) diff --git a/vendor/github.com/olivere/elastic/README.md b/vendor/github.com/olivere/elastic/README.md index f452b664d..d0cdd7821 100644 --- a/vendor/github.com/olivere/elastic/README.md +++ b/vendor/github.com/olivere/elastic/README.md @@ -199,6 +199,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Significant Terms - [x] Significant Text - [x] Terms + - [x] Composite - Pipeline Aggregations - [x] Avg Bucket - [x] Derivative @@ -212,6 +213,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Cumulative Sum - [x] Bucket Script - [x] Bucket Selector + - [ ] Bucket Sort - [x] Serial Differencing - [x] Matrix Aggregations - [x] Matrix Stats @@ -234,17 +236,17 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Update Indices Settings - [x] Get Settings - [x] Analyze + - [x] Explain Analyze - [x] Index Templates -- [ ] Shadow Replica Indices - [x] Indices Stats - [x] Indices Segments - [ ] Indices Recovery - [ ] Indices Shard Stores - [ ] Clear Cache - [x] Flush + - [x] Synced Flush - [x] Refresh - [x] Force Merge -- [ ] Upgrade ### cat APIs @@ -267,6 +269,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] cat shards - [ ] cat segments - [ ] cat snapshots +- [ ] cat templates ### Cluster APIs @@ -278,6 +281,8 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] Cluster Update Settings - [x] Nodes Stats - [x] Nodes Info +- [ ] Nodes Feature Usage +- [ ] Remote Cluster Info - [x] Task Management API - [ ] Nodes hot_threads - [ ] Cluster Allocation Explain API @@ -297,6 +302,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - Term level queries - [x] Term Query - [x] Terms Query + - [x] Terms Set Query - [x] Range Query - [x] Exists Query - [x] Prefix Query @@ -311,7 +317,6 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Dis Max Query - [x] Function Score Query - [x] Boosting Query - - [x] Indices Query - Joining queries - [x] Nested Query - [x] Has Child Query @@ -321,12 +326,9 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] GeoShape Query - [x] Geo Bounding Box Query - [x] Geo Distance Query - - [ ] Geo Distance Range Query - [x] Geo Polygon Query - - [ ] Geohash Cell Query - Specialized queries - [x] More Like This Query - - [x] Template Query - [x] Script Query - [x] Percolate Query - Span queries @@ -346,7 +348,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - Snapshot and Restore - [x] Repositories - - [ ] Snapshot + - [x] Snapshot - [ ] Restore - [ ] Snapshot status - [ ] Monitoring snapshot/restore status diff --git a/vendor/github.com/olivere/elastic/bulk_processor.go b/vendor/github.com/olivere/elastic/bulk_processor.go index b2709a880..6ee8a3dee 100644 --- a/vendor/github.com/olivere/elastic/bulk_processor.go +++ b/vendor/github.com/olivere/elastic/bulk_processor.go @@ -6,6 +6,7 @@ package elastic import ( "context" + "net" "sync" "sync/atomic" "time" @@ -121,7 +122,7 @@ func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { return s } -// Set the backoff strategy to use for errors +// Backoff sets the backoff strategy to use for errors. func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService { s.backoff = backoff return s @@ -248,6 +249,8 @@ type BulkProcessor struct { statsMu sync.Mutex // guards the following block stats *BulkProcessorStats + + stopReconnC chan struct{} // channel to signal stop reconnection attempts } func newBulkProcessor( @@ -293,6 +296,7 @@ func (p *BulkProcessor) Start(ctx context.Context) error { p.requestsC = make(chan BulkableRequest) p.executionId = 0 p.stats = newBulkProcessorStats(p.numWorkers) + p.stopReconnC = make(chan struct{}) // Create and start up workers. p.workers = make([]*bulkWorker, p.numWorkers) @@ -331,6 +335,12 @@ func (p *BulkProcessor) Close() error { return nil } + // Tell connection checkers to stop + if p.stopReconnC != nil { + close(p.stopReconnC) + p.stopReconnC = nil + } + // Stop flusher (if enabled) if p.flusherStopC != nil { p.flusherStopC <- struct{}{} @@ -436,29 +446,43 @@ func (w *bulkWorker) work(ctx context.Context) { var stop bool for !stop { + var err error select { case req, open := <-w.p.requestsC: if open { // Received a new request w.service.Add(req) if w.commitRequired() { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } } else { // Channel closed: Stop. stop = true if w.service.NumberOfActions() > 0 { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } } case <-w.flushC: // Commit outstanding requests if w.service.NumberOfActions() > 0 { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } w.flushAckC <- struct{}{} } + if !stop && err != nil { + waitForActive := func() { + // Add back pressure to prevent Add calls from filling up the request queue + ready := make(chan struct{}) + go w.waitForActiveConnection(ready) + <-ready + } + if _, ok := err.(net.Error); ok { + waitForActive() + } else if IsConnErr(err) { + waitForActive() + } + } } } @@ -511,6 +535,35 @@ func (w *bulkWorker) commit(ctx context.Context) error { return err } +func (w *bulkWorker) waitForActiveConnection(ready chan<- struct{}) { + defer close(ready) + + t := time.NewTicker(5 * time.Second) + defer t.Stop() + + client := w.p.c + stopReconnC := w.p.stopReconnC + w.p.c.errorf("elastic: bulk processor %q is waiting for an active connection", w.p.name) + + // loop until a health check finds at least 1 active connection or the reconnection channel is closed + for { + select { + case _, ok := <-stopReconnC: + if !ok { + w.p.c.errorf("elastic: bulk processor %q active connection check interrupted", w.p.name) + return + } + case <-t.C: + client.healthcheck(time.Duration(3)*time.Second, true) + if client.mustActiveConn() == nil { + // found an active connection + // exit and signal done to the WaitGroup + return + } + } + } +} + func (w *bulkWorker) updateStats(res *BulkResponse) { // Update stats if res != nil { diff --git a/vendor/github.com/olivere/elastic/client.go b/vendor/github.com/olivere/elastic/client.go index 1eb0ec54f..165a30526 100644 --- a/vendor/github.com/olivere/elastic/client.go +++ b/vendor/github.com/olivere/elastic/client.go @@ -26,7 +26,7 @@ import ( const ( // Version is the current version of Elastic. - Version = "6.1.4" + Version = "6.1.7" // DefaultURL is the default endpoint of Elasticsearch on the local machine. // It is used e.g. when initializing a new Client without a specific URL. @@ -1778,9 +1778,3 @@ func (c *Client) WaitForGreenStatus(timeout string) error { func (c *Client) WaitForYellowStatus(timeout string) error { return c.WaitForStatus("yellow", timeout) } - -// IsConnError unwraps the given error value and checks if it is equal to -// elastic.ErrNoClient. -func IsConnErr(err error) bool { - return errors.Cause(err) == ErrNoClient -} diff --git a/vendor/github.com/olivere/elastic/errors.go b/vendor/github.com/olivere/elastic/errors.go index 00a936621..e40cda845 100644 --- a/vendor/github.com/olivere/elastic/errors.go +++ b/vendor/github.com/olivere/elastic/errors.go @@ -9,6 +9,8 @@ import ( "fmt" "io/ioutil" "net/http" + + "github.com/pkg/errors" ) // checkResponse will return an error if the request/response indicates @@ -89,6 +91,12 @@ func (e *Error) Error() string { } } +// IsConnErr returns true if the error indicates that Elastic could not +// find an Elasticsearch host to connect to. +func IsConnErr(err error) bool { + return err == ErrNoClient || errors.Cause(err) == ErrNoClient +} + // IsNotFound returns true if the given error indicates that Elasticsearch // returned HTTP status 404. The err parameter can be of type *elastic.Error, // elastic.Error, *http.Response or int (indicating the HTTP status code). diff --git a/vendor/github.com/olivere/elastic/msearch.go b/vendor/github.com/olivere/elastic/msearch.go index ed54d3c2f..c1a589a97 100644 --- a/vendor/github.com/olivere/elastic/msearch.go +++ b/vendor/github.com/olivere/elastic/msearch.go @@ -14,19 +14,17 @@ import ( // MultiSearch executes one or more searches in one roundtrip. type MultiSearchService struct { - client *Client - requests []*SearchRequest - indices []string - pretty bool - routing string - preference string + client *Client + requests []*SearchRequest + indices []string + pretty bool + maxConcurrentRequests *int + preFilterShardSize *int } func NewMultiSearchService(client *Client) *MultiSearchService { builder := &MultiSearchService{ - client: client, - requests: make([]*SearchRequest, 0), - indices: make([]string, 0), + client: client, } return builder } @@ -46,6 +44,16 @@ func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { return s } +func (s *MultiSearchService) MaxConcurrentSearches(max int) *MultiSearchService { + s.maxConcurrentRequests = &max + return s +} + +func (s *MultiSearchService) PreFilterShardSize(size int) *MultiSearchService { + s.preFilterShardSize = &size + return s +} + func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { // Build url path := "/_msearch" @@ -55,6 +63,12 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } + if v := s.maxConcurrentRequests; v != nil { + params.Set("max_concurrent_searches", fmt.Sprintf("%v", *v)) + } + if v := s.preFilterShardSize; v != nil { + params.Set("pre_filter_shard_size", fmt.Sprintf("%v", *v)) + } // Set body var lines []string @@ -68,14 +82,14 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) if err != nil { return nil, err } - body, err := json.Marshal(sr.Body()) + body, err := sr.Body() if err != nil { return nil, err } lines = append(lines, string(header)) - lines = append(lines, string(body)) + lines = append(lines, body) } - body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + body := strings.Join(lines, "\n") + "\n" // add trailing \n // Get response res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ @@ -96,6 +110,7 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) return ret, nil } +// MultiSearchResult is the outcome of running a multi-search operation. type MultiSearchResult struct { Responses []*SearchResult `json:"responses,omitempty"` } diff --git a/vendor/github.com/olivere/elastic/msearch_test.go b/vendor/github.com/olivere/elastic/msearch_test.go index 79f2047e6..d25e2cc28 100644 --- a/vendor/github.com/olivere/elastic/msearch_test.go +++ b/vendor/github.com/olivere/elastic/msearch_test.go @@ -13,6 +13,7 @@ import ( func TestMultiSearch(t *testing.T) { client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndexAndLog(t) tweet1 := tweet{ User: "olivere", @@ -60,6 +61,110 @@ func TestMultiSearch(t *testing.T) { sreq2 := NewSearchRequest().Index(testIndexName).Type("doc"). Source(NewSearchSource().Query(q2)) + searchResult, err := client.MultiSearch(). + Add(sreq1, sreq2). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 2 { + t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + + sres = searchResult.Responses[1] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 2 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 2 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMultiSearchWithStrings(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndexAndLog(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). + Source(`{"query":{"match_all":{}}}`) + sreq2 := NewSearchRequest().Index(testIndexName).Type("doc"). + Source(`{"query":{"term":{"tags":"golang"}}}`) + searchResult, err := client.MultiSearch(). Add(sreq1, sreq2). Do(context.TODO()) diff --git a/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go b/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go new file mode 100644 index 000000000..f13243297 --- /dev/null +++ b/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go @@ -0,0 +1,149 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// BulkProcessor runs a bulk processing job that fills an index +// given certain criteria like flush interval etc. +// +// Example +// +// bulk_processor -url=http://127.0.0.1:9200/bulk-processor-test?sniff=false -n=100000 -flush-interval=1s +// +package main + +import ( + "context" + "flag" + "fmt" + "log" + "math/rand" + "os" + "os/signal" + "sync/atomic" + "syscall" + "time" + + "github.com/google/uuid" + + "github.com/olivere/elastic" + "github.com/olivere/elastic/config" +) + +func main() { + var ( + url = flag.String("url", "http://localhost:9200/bulk-processor-test", "Elasticsearch URL") + numWorkers = flag.Int("num-workers", 4, "Number of workers") + n = flag.Int64("n", -1, "Number of documents to process (-1 for unlimited)") + flushInterval = flag.Duration("flush-interval", 1*time.Second, "Flush interval") + bulkActions = flag.Int("bulk-actions", 0, "Number of bulk actions before committing") + bulkSize = flag.Int("bulk-size", 0, "Size of bulk requests before committing") + ) + flag.Parse() + log.SetFlags(0) + + rand.Seed(time.Now().UnixNano()) + + // Parse configuration from URL + cfg, err := config.Parse(*url) + if err != nil { + log.Fatal(err) + } + + // Create an Elasticsearch client from the parsed config + client, err := elastic.NewClientFromConfig(cfg) + if err != nil { + log.Fatal(err) + } + + // Drop old index + exists, err := client.IndexExists(cfg.Index).Do(context.Background()) + if err != nil { + log.Fatal(err) + } + if exists { + _, err = client.DeleteIndex(cfg.Index).Do(context.Background()) + if err != nil { + log.Fatal(err) + } + } + + // Create processor + bulkp := elastic.NewBulkProcessorService(client). + Name("bulk-test-processor"). + Stats(true). + Backoff(elastic.StopBackoff{}). + FlushInterval(*flushInterval). + Workers(*numWorkers) + if *bulkActions > 0 { + bulkp = bulkp.BulkActions(*bulkActions) + } + if *bulkSize > 0 { + bulkp = bulkp.BulkSize(*bulkSize) + } + p, err := bulkp.Do(context.Background()) + if err != nil { + log.Fatal(err) + } + + var created int64 + errc := make(chan error, 1) + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + <-c + errc <- nil + }() + + go func() { + defer func() { + if err := p.Close(); err != nil { + errc <- err + } + }() + + type Doc struct { + Timestamp time.Time `json:"@timestamp"` + } + + for { + current := atomic.AddInt64(&created, 1) + if *n > 0 && current >= *n { + errc <- nil + return + } + r := elastic.NewBulkIndexRequest(). + Index(cfg.Index). + Type("doc"). + Id(uuid.New().String()). + Doc(Doc{Timestamp: time.Now()}) + p.Add(r) + + time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond) + } + }() + + go func() { + t := time.NewTicker(1 * time.Second) + defer t.Stop() + for range t.C { + stats := p.Stats() + written := atomic.LoadInt64(&created) + var queued int64 + for _, w := range stats.Workers { + queued += w.Queued + } + fmt.Printf("Queued=%5d Written=%8d Succeeded=%8d Failed=%8d Comitted=%6d Flushed=%6d\n", + queued, + written, + stats.Succeeded, + stats.Failed, + stats.Committed, + stats.Flushed, + ) + } + }() + + if err := <-errc; err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/olivere/elastic/reindex.go b/vendor/github.com/olivere/elastic/reindex.go index 35440fa80..9cdd50a68 100644 --- a/vendor/github.com/olivere/elastic/reindex.go +++ b/vendor/github.com/olivere/elastic/reindex.go @@ -20,6 +20,7 @@ type ReindexService struct { waitForActiveShards string waitForCompletion *bool requestsPerSecond *int + slices *int body interface{} source *ReindexSource destination *ReindexDestination @@ -51,6 +52,12 @@ func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexServic return s } +// Slices specifies the number of slices this task should be divided into. Defaults to 1. +func (s *ReindexService) Slices(slices int) *ReindexService { + s.slices = &slices + return s +} + // Refresh indicates whether Elasticsearch should refresh the effected indexes // immediately. func (s *ReindexService) Refresh(refresh string) *ReindexService { @@ -179,6 +186,9 @@ func (s *ReindexService) buildURL() (string, url.Values, error) { if s.requestsPerSecond != nil { params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) } + if s.slices != nil { + params.Set("slices", fmt.Sprintf("%v", *s.slices)) + } if s.waitForActiveShards != "" { params.Set("wait_for_active_shards", s.waitForActiveShards) } diff --git a/vendor/github.com/olivere/elastic/run-es.sh b/vendor/github.com/olivere/elastic/run-es.sh index 1f4a851d4..624a864ed 100755 --- a/vendor/github.com/olivere/elastic/run-es.sh +++ b/vendor/github.com/olivere/elastic/run-es.sh @@ -1,3 +1,3 @@ #!/bin/sh -VERSION=${VERSION:=6.1.2} -docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:$VERSION elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ +VERSION=${VERSION:=6.2.1} +docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:$VERSION elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ diff --git a/vendor/github.com/olivere/elastic/search.go b/vendor/github.com/olivere/elastic/search.go index 12d51bf1f..034b12096 100644 --- a/vendor/github.com/olivere/elastic/search.go +++ b/vendor/github.com/olivere/elastic/search.go @@ -111,8 +111,7 @@ func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { } // SearchType sets the search operation type. Valid values are: -// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", -// "dfs_query_and_fetch", "count", "scan". +// "dfs_query_then_fetch" and "query_then_fetch". // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-type.html // for details. func (s *SearchService) SearchType(searchType string) *SearchService { diff --git a/vendor/github.com/olivere/elastic/search_aggs.go b/vendor/github.com/olivere/elastic/search_aggs.go index c5082b2b1..6359611b1 100644 --- a/vendor/github.com/olivere/elastic/search_aggs.go +++ b/vendor/github.com/olivere/elastic/search_aggs.go @@ -653,6 +653,23 @@ func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, return nil, false } +// Composite returns composite bucket aggregation results. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html +// for details. +func (a Aggregations) Composite(name string) (*AggregationBucketCompositeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketCompositeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + // -- Single value metric -- // AggregationValueMetric is a single-value metric, returned e.g. by a @@ -1448,3 +1465,56 @@ func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error a.Aggregations = aggs return nil } + +// -- Composite key items -- + +// AggregationBucketCompositeItems implements the response structure +// for a bucket aggregation of type composite. +type AggregationBucketCompositeItems struct { + Aggregations + + Buckets []*AggregationBucketCompositeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItems structure. +func (a *AggregationBucketCompositeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketCompositeItem is a single bucket of an AggregationBucketCompositeItems structure. +type AggregationBucketCompositeItem struct { + Aggregations + + Key map[string]interface{} //`json:"key"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItem structure. +func (a *AggregationBucketCompositeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go new file mode 100644 index 000000000..1d9132d2d --- /dev/null +++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go @@ -0,0 +1,498 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CompositeAggregation is a multi-bucket values source based aggregation +// that can be used to calculate unique composite values from source documents. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html +// for details. +type CompositeAggregation struct { + after map[string]interface{} + size *int + sources []CompositeAggregationValuesSource + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewCompositeAggregation creates a new CompositeAggregation. +func NewCompositeAggregation() *CompositeAggregation { + return &CompositeAggregation{ + sources: make([]CompositeAggregationValuesSource, 0), + subAggregations: make(map[string]Aggregation), + } +} + +// Size represents the number of composite buckets to return. +// Defaults to 10 as of Elasticsearch 6.1. +func (a *CompositeAggregation) Size(size int) *CompositeAggregation { + a.size = &size + return a +} + +// AggregateAfter sets the values that indicate which composite bucket this +// request should "aggregate after". +func (a *CompositeAggregation) AggregateAfter(after map[string]interface{}) *CompositeAggregation { + a.after = after + return a +} + +// Sources specifies the list of CompositeAggregationValuesSource instances to +// use in the aggregation. +func (a *CompositeAggregation) Sources(sources ...CompositeAggregationValuesSource) *CompositeAggregation { + a.sources = append(a.sources, sources...) + return a +} + +// SubAggregations of this aggregation. +func (a *CompositeAggregation) SubAggregation(name string, subAggregation Aggregation) *CompositeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CompositeAggregation) Meta(metaData map[string]interface{}) *CompositeAggregation { + a.meta = metaData + return a +} + +// Source returns the serializable JSON for this aggregation. +func (a *CompositeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "my_composite_agg" : { + // "composite" : { + // "sources": [ + // {"my_term": { "terms": { "field": "product" }}}, + // {"my_histo": { "histogram": { "field": "price", "interval": 5 }}}, + // {"my_date": { "date_histogram": { "field": "timestamp", "interval": "1d" }}}, + // ], + // "size" : 10, + // "after" : ["a", 2, "c"] + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["composite"] = opts + + sources := make([]interface{}, len(a.sources)) + for i, s := range a.sources { + src, err := s.Source() + if err != nil { + return nil, err + } + sources[i] = src + } + opts["sources"] = sources + + if a.size != nil { + opts["size"] = *a.size + } + + if a.after != nil { + opts["after"] = a.after + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Generic interface for CompositeAggregationValues -- + +// CompositeAggregationValuesSource specifies the interface that +// all implementations for CompositeAggregation's Sources method +// need to implement. +// +// The different implementations are described in +// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_values_source_2. +type CompositeAggregationValuesSource interface { + Source() (interface{}, error) +} + +// -- CompositeAggregationTermsValuesSource -- + +// CompositeAggregationTermsValuesSource is a source for the CompositeAggregation that handles terms +// it works very similar to a terms aggregation with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_terms +// for details. +type CompositeAggregationTermsValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string +} + +// NewCompositeAggregationTermsValuesSource creates and initializes +// a new CompositeAggregationTermsValuesSource. +func NewCompositeAggregationTermsValuesSource(name string) *CompositeAggregationTermsValuesSource { + return &CompositeAggregationTermsValuesSource{ + name: name, + } +} + +// Field to use for this source. +func (a *CompositeAggregationTermsValuesSource) Field(field string) *CompositeAggregationTermsValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationTermsValuesSource) Script(script *Script) *CompositeAggregationTermsValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationTermsValuesSource) ValueType(valueType string) *CompositeAggregationTermsValuesSource { + a.valueType = valueType + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationTermsValuesSource) Order(order string) *CompositeAggregationTermsValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationTermsValuesSource) Asc() *CompositeAggregationTermsValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationTermsValuesSource) Desc() *CompositeAggregationTermsValuesSource { + a.order = "desc" + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationTermsValuesSource) Missing(missing interface{}) *CompositeAggregationTermsValuesSource { + a.missing = missing + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationTermsValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["terms"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + return source, nil + +} + +// -- CompositeAggregationHistogramValuesSource -- + +// CompositeAggregationHistogramValuesSource is a source for the CompositeAggregation that handles histograms +// it works very similar to a terms histogram with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_histogram +// for details. +type CompositeAggregationHistogramValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string + interval float64 +} + +// NewCompositeAggregationHistogramValuesSource creates and initializes +// a new CompositeAggregationHistogramValuesSource. +func NewCompositeAggregationHistogramValuesSource(name string, interval float64) *CompositeAggregationHistogramValuesSource { + return &CompositeAggregationHistogramValuesSource{ + name: name, + interval: interval, + } +} + +// Field to use for this source. +func (a *CompositeAggregationHistogramValuesSource) Field(field string) *CompositeAggregationHistogramValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationHistogramValuesSource) Script(script *Script) *CompositeAggregationHistogramValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationHistogramValuesSource) ValueType(valueType string) *CompositeAggregationHistogramValuesSource { + a.valueType = valueType + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationHistogramValuesSource { + a.missing = missing + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationHistogramValuesSource) Order(order string) *CompositeAggregationHistogramValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationHistogramValuesSource) Asc() *CompositeAggregationHistogramValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationHistogramValuesSource) Desc() *CompositeAggregationHistogramValuesSource { + a.order = "desc" + return a +} + +// Interval specifies the interval to use. +func (a *CompositeAggregationHistogramValuesSource) Interval(interval float64) *CompositeAggregationHistogramValuesSource { + a.interval = interval + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationHistogramValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["histogram"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + // Histogram-related properties + values["interval"] = a.interval + + return source, nil + +} + +// -- CompositeAggregationDateHistogramValuesSource -- + +// CompositeAggregationDateHistogramValuesSource is a source for the CompositeAggregation that handles date histograms +// it works very similar to a date histogram aggregation with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_date_histogram +// for details. +type CompositeAggregationDateHistogramValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string + interval interface{} + timeZone string +} + +// NewCompositeAggregationDateHistogramValuesSource creates and initializes +// a new CompositeAggregationDateHistogramValuesSource. +func NewCompositeAggregationDateHistogramValuesSource(name string, interval interface{}) *CompositeAggregationDateHistogramValuesSource { + return &CompositeAggregationDateHistogramValuesSource{ + name: name, + interval: interval, + } +} + +// Field to use for this source. +func (a *CompositeAggregationDateHistogramValuesSource) Field(field string) *CompositeAggregationDateHistogramValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationDateHistogramValuesSource) Script(script *Script) *CompositeAggregationDateHistogramValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationDateHistogramValuesSource) ValueType(valueType string) *CompositeAggregationDateHistogramValuesSource { + a.valueType = valueType + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationDateHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationDateHistogramValuesSource { + a.missing = missing + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationDateHistogramValuesSource) Order(order string) *CompositeAggregationDateHistogramValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationDateHistogramValuesSource) Asc() *CompositeAggregationDateHistogramValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationDateHistogramValuesSource) Desc() *CompositeAggregationDateHistogramValuesSource { + a.order = "desc" + return a +} + +// Interval to use for the date histogram, e.g. "1d" or a numeric value like "60". +func (a *CompositeAggregationDateHistogramValuesSource) Interval(interval interface{}) *CompositeAggregationDateHistogramValuesSource { + a.interval = interval + return a +} + +// TimeZone to use for the dates. +func (a *CompositeAggregationDateHistogramValuesSource) TimeZone(timeZone string) *CompositeAggregationDateHistogramValuesSource { + a.timeZone = timeZone + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationDateHistogramValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["date_histogram"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + // DateHistogram-related properties + values["interval"] = a.interval + + // timeZone + if a.timeZone != "" { + values["time_zone"] = a.timeZone + } + + return source, nil +} diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go new file mode 100644 index 000000000..91d84dbdb --- /dev/null +++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCompositeAggregation(t *testing.T) { + agg := NewCompositeAggregation(). + Sources( + NewCompositeAggregationTermsValuesSource("my_terms").Field("a_term").Missing("N/A").Order("asc"), + NewCompositeAggregationHistogramValuesSource("my_histogram", 5).Field("price").Asc(), + NewCompositeAggregationDateHistogramValuesSource("my_date_histogram", "1d").Field("purchase_date").Desc(), + ). + Size(10). + AggregateAfter(map[string]interface{}{ + "my_terms": "1", + "my_histogram": 2, + "my_date_histogram": "3", + }) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"composite":{"after":{"my_date_histogram":"3","my_histogram":2,"my_terms":"1"},"size":10,"sources":[{"my_terms":{"terms":{"field":"a_term","missing":"N/A","order":"asc"}}},{"my_histogram":{"histogram":{"field":"price","interval":5,"order":"asc"}}},{"my_date_histogram":{"date_histogram":{"field":"purchase_date","interval":"1d","order":"desc"}}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationTermsValuesSource(t *testing.T) { + in := NewCompositeAggregationTermsValuesSource("products"). + Script(NewScript("doc['product'].value").Lang("painless")) + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"products":{"terms":{"script":{"lang":"painless","source":"doc['product'].value"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationHistogramValuesSource(t *testing.T) { + in := NewCompositeAggregationHistogramValuesSource("histo", 5). + Field("price") + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histo":{"histogram":{"field":"price","interval":5}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationDateHistogramValuesSource(t *testing.T) { + in := NewCompositeAggregationDateHistogramValuesSource("date", "1d"). + Field("timestamp") + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date":{"date_histogram":{"field":"timestamp","interval":"1d"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go index 5407dadb8..714fd3e11 100644 --- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go +++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go @@ -23,6 +23,7 @@ type DateRangeAggregation struct { meta map[string]interface{} keyed *bool unmapped *bool + timeZone string format string entries []DateRangeAggregationEntry } @@ -71,6 +72,11 @@ func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { return a } +func (a *DateRangeAggregation) TimeZone(timeZone string) *DateRangeAggregation { + a.timeZone = timeZone + return a +} + func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { a.format = format return a @@ -178,6 +184,9 @@ func (a *DateRangeAggregation) Source() (interface{}, error) { if a.unmapped != nil { opts["unmapped"] = *a.unmapped } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } if a.format != "" { opts["format"] = a.format } diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go index d1c909f3e..89ed495f3 100644 --- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go +++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go @@ -10,7 +10,7 @@ import ( ) func TestDateRangeAggregation(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at") + agg := NewDateRangeAggregation().Field("created_at").TimeZone("UTC") agg = agg.AddRange(nil, "2012-12-31") agg = agg.AddRange("2013-01-01", "2013-12-31") agg = agg.AddRange("2014-01-01", nil) @@ -23,7 +23,7 @@ func TestDateRangeAggregation(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}],"time_zone":"UTC"}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/github.com/olivere/elastic/search_aggs_test.go b/vendor/github.com/olivere/elastic/search_aggs_test.go index 9d6fa8d27..f1b6347b3 100644 --- a/vendor/github.com/olivere/elastic/search_aggs_test.go +++ b/vendor/github.com/olivere/elastic/search_aggs_test.go @@ -13,13 +13,15 @@ import ( ) func TestAggs(t *testing.T) { - // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) client := setupTestClientAndCreateIndex(t) - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } + /* + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + */ tweet1 := tweet{ User: "olivere", @@ -48,7 +50,7 @@ func TestAggs(t *testing.T) { } // Add all documents - _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -102,6 +104,11 @@ func TestAggs(t *testing.T) { topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) geoBoundsAgg := NewGeoBoundsAggregation().Field("location") geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5) + composite := NewCompositeAggregation().Sources( + NewCompositeAggregationTermsValuesSource("composite_users").Field("user"), + NewCompositeAggregationHistogramValuesSource("composite_retweets", 1).Field("retweets"), + NewCompositeAggregationDateHistogramValuesSource("composite_created", "1m").Field("created"), + ) // Run query builder := client.Search().Index(testIndexName).Query(all).Pretty(true) @@ -109,9 +116,7 @@ func TestAggs(t *testing.T) { builder = builder.Aggregation("users", usersAgg) builder = builder.Aggregation("retweets", retweetsAgg) builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) - if esversion >= "2.0" { - builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) - } + builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) builder = builder.Aggregation("minRetweets", minRetweetsAgg) builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) @@ -134,44 +139,41 @@ func TestAggs(t *testing.T) { builder = builder.Aggregation("top-tags", topTagsAgg) builder = builder.Aggregation("viewport", geoBoundsAgg) builder = builder.Aggregation("geohashed", geoHashAgg) - if esversion >= "1.4" { - // Unnamed filters - countByUserAgg := NewFiltersAggregation(). - Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) - builder = builder.Aggregation("countByUser", countByUserAgg) - // Named filters - countByUserAgg2 := NewFiltersAggregation(). - FilterWithName("olivere", NewTermQuery("user", "olivere")). - FilterWithName("sandrae", NewTermQuery("user", "sandrae")) - builder = builder.Aggregation("countByUser2", countByUserAgg2) - } - if esversion >= "2.0" { - // AvgBucket - dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("avgBucketDateHisto", dateHisto) - builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) - // MinBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("minBucketDateHisto", dateHisto) - builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) - // MaxBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("maxBucketDateHisto", dateHisto) - builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) - // SumBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("sumBucketDateHisto", dateHisto) - builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) - // MovAvg - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) - builder = builder.Aggregation("movingAvgDateHisto", dateHisto) - } + // Unnamed filters + countByUserAgg := NewFiltersAggregation(). + Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser", countByUserAgg) + // Named filters + countByUserAgg2 := NewFiltersAggregation(). + FilterWithName("olivere", NewTermQuery("user", "olivere")). + FilterWithName("sandrae", NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser2", countByUserAgg2) + // AvgBucket + dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("avgBucketDateHisto", dateHisto) + builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) + // MinBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("minBucketDateHisto", dateHisto) + builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) + // MaxBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("maxBucketDateHisto", dateHisto) + builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) + // SumBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("sumBucketDateHisto", dateHisto) + builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) + // MovAvg + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) + builder = builder.Aggregation("movingAvgDateHisto", dateHisto) + builder = builder.Aggregation("composite", composite) searchResult, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) @@ -308,26 +310,24 @@ func TestAggs(t *testing.T) { } // avgRetweetsWithMeta - if esversion >= "2.0" { - avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if avgMetaAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if avgMetaAggRes.Meta == nil { - t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) - } - metaDataValue, found := avgMetaAggRes.Meta["meta"] - if !found { - t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) - } - if flag, ok := metaDataValue.(bool); !ok { - t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) - } else if flag != true { - t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) - } + avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgMetaAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgMetaAggRes.Meta == nil { + t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) + } + metaDataValue, found := avgMetaAggRes.Meta["meta"] + if !found { + t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) + } + if flag, ok := metaDataValue.(bool); !ok { + t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) + } else if flag != true { + t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) } // minRetweets @@ -817,13 +817,11 @@ func TestAggs(t *testing.T) { if topTags == nil { t.Fatalf("expected != nil; got: nil") } - if esversion >= "1.4.0" { - if topTags.DocCountErrorUpperBound != 0 { - t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) - } - if topTags.SumOfOtherDocCount != 1 { - t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) - } + if topTags.DocCountErrorUpperBound != 0 { + t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) + } + if topTags.SumOfOtherDocCount != 1 { + t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) } if len(topTags.Buckets) != 3 { t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) @@ -924,62 +922,71 @@ func TestAggs(t *testing.T) { t.Fatalf("expected != nil; got: nil") } - if esversion >= "1.4" { - // Filters agg "countByUser" (unnamed) - countByUserAggRes, found := agg.Filters("countByUser") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if countByUserAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(countByUserAggRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) - } - if len(countByUserAggRes.NamedBuckets) != 0 { - t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets)) - } - if countByUserAggRes.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) - } - if countByUserAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) - } + // Filters agg "countByUser" (unnamed) + countByUserAggRes, found := agg.Filters("countByUser") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUserAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUserAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) + } + if len(countByUserAggRes.NamedBuckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets)) + } + if countByUserAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) + } + if countByUserAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) + } - // Filters agg "countByUser2" (named) - countByUser2AggRes, found := agg.Filters("countByUser2") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if countByUser2AggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(countByUser2AggRes.Buckets) != 0 { - t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets)) - } - if len(countByUser2AggRes.NamedBuckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets)) - } - b, found := countByUser2AggRes.NamedBuckets["olivere"] - if !found { - t.Fatalf("expected bucket %q; got: %v", "olivere", found) - } - if b == nil { - t.Fatalf("expected bucket %q; got: %v", "olivere", b) - } - if b.DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, b.DocCount) - } - b, found = countByUser2AggRes.NamedBuckets["sandrae"] - if !found { - t.Fatalf("expected bucket %q; got: %v", "sandrae", found) - } - if b == nil { - t.Fatalf("expected bucket %q; got: %v", "sandrae", b) - } - if b.DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, b.DocCount) - } + // Filters agg "countByUser2" (named) + countByUser2AggRes, found := agg.Filters("countByUser2") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUser2AggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUser2AggRes.Buckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets)) + } + if len(countByUser2AggRes.NamedBuckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets)) + } + b, found := countByUser2AggRes.NamedBuckets["olivere"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "olivere", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "olivere", b) + } + if b.DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, b.DocCount) + } + b, found = countByUser2AggRes.NamedBuckets["sandrae"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "sandrae", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "sandrae", b) + } + if b.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, b.DocCount) + } + + compositeAggRes, found := agg.Composite("composite") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if compositeAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if want, have := 3, len(compositeAggRes.Buckets); want != have { + t.Fatalf("expected %d; got: %d", want, have) } } @@ -3231,3 +3238,179 @@ func TestAggsPipelineSerialDiff(t *testing.T) { t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) } } + +func TestAggsComposite(t *testing.T) { + s := `{ + "the_composite" : { + "buckets" : [ + { + "key" : { + "composite_users" : "olivere", + "composite_retweets" : 0.0, + "composite_created" : 1349856720000 + }, + "doc_count" : 1 + }, + { + "key" : { + "composite_users" : "olivere", + "composite_retweets" : 108.0, + "composite_created" : 1355333880000 + }, + "doc_count" : 1 + }, + { + "key" : { + "composite_users" : "sandrae", + "composite_retweets" : 12.0, + "composite_created" : 1321009080000 + }, + "doc_count" : 1 + } + ] + } + }` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Composite("the_composite") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if want, have := 3, len(agg.Buckets); want != have { + t.Fatalf("expected aggregation buckets length = %v; got: %v", want, have) + } + + // 1st bucket + bucket := agg.Buckets[0] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found := bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok := v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "olivere", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok := v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 0.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1349856720000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + + // 2nd bucket + bucket = agg.Buckets[1] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found = bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok = v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "olivere", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 108.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1355333880000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + + // 3rd bucket + bucket = agg.Buckets[2] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found = bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok = v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "sandrae", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 12.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1321009080000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } +} diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_set.go b/vendor/github.com/olivere/elastic/search_queries_terms_set.go new file mode 100644 index 000000000..be410a1a7 --- /dev/null +++ b/vendor/github.com/olivere/elastic/search_queries_terms_set.go @@ -0,0 +1,96 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsSetQuery returns any documents that match with at least +// one or more of the provided terms. The terms are not analyzed +// and thus must match exactly. The number of terms that must +// match varies per document and is either controlled by a +// minimum should match field or computed per document in a +// minimum should match script. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-terms-set-query.html +type TermsSetQuery struct { + name string + values []interface{} + minimumShouldMatchField string + minimumShouldMatchScript *Script + queryName string + boost *float64 +} + +// NewTermsSetQuery creates and initializes a new TermsSetQuery. +func NewTermsSetQuery(name string, values ...interface{}) *TermsSetQuery { + q := &TermsSetQuery{ + name: name, + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// MinimumShouldMatchField specifies the field to match. +func (q *TermsSetQuery) MinimumShouldMatchField(minimumShouldMatchField string) *TermsSetQuery { + q.minimumShouldMatchField = minimumShouldMatchField + return q +} + +// MinimumShouldMatchScript specifies the script to match. +func (q *TermsSetQuery) MinimumShouldMatchScript(minimumShouldMatchScript *Script) *TermsSetQuery { + q.minimumShouldMatchScript = minimumShouldMatchScript + return q +} + +// Boost sets the boost for this query. +func (q *TermsSetQuery) Boost(boost float64) *TermsSetQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsSetQuery) QueryName(queryName string) *TermsSetQuery { + q.queryName = queryName + return q +} + +// Source creates the query source for the term query. +func (q *TermsSetQuery) Source() (interface{}, error) { + // {"terms_set":{"codes":{"terms":["abc","def"],"minimum_should_match_field":"required_matches"}}} + source := make(map[string]interface{}) + inner := make(map[string]interface{}) + params := make(map[string]interface{}) + inner[q.name] = params + source["terms_set"] = inner + + // terms + params["terms"] = q.values + + // minimum_should_match_field + if match := q.minimumShouldMatchField; match != "" { + params["minimum_should_match_field"] = match + } + + // minimum_should_match_script + if match := q.minimumShouldMatchScript; match != nil { + src, err := match.Source() + if err != nil { + return nil, err + } + params["minimum_should_match_script"] = src + } + + // Common parameters for all queries + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go b/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go new file mode 100644 index 000000000..e13fbfb2f --- /dev/null +++ b/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go @@ -0,0 +1,75 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "context" + "encoding/json" + "testing" +) + +func TestTermsSetQueryWithField(t *testing.T) { + q := NewTermsSetQuery("codes", "abc", "def", "ghi").MinimumShouldMatchField("required_matches") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms_set":{"codes":{"minimum_should_match_field":"required_matches","terms":["abc","def","ghi"]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsSetQueryWithScript(t *testing.T) { + q := NewTermsSetQuery("codes", "abc", "def", "ghi"). + MinimumShouldMatchScript( + NewScript(`Math.min(params.num_terms, doc['required_matches'].value)`), + ) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms_set":{"codes":{"minimum_should_match_script":{"source":"Math.min(params.num_terms, doc['required_matches'].value)"},"terms":["abc","def","ghi"]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchTermsSetQuery(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query( + NewTermsSetQuery("user", "olivere", "sandrae"). + MinimumShouldMatchField("retweets"), + ). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(3); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 3; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } +} diff --git a/vendor/github.com/olivere/elastic/search_request.go b/vendor/github.com/olivere/elastic/search_request.go index 6f40ff028..8f08e73ff 100644 --- a/vendor/github.com/olivere/elastic/search_request.go +++ b/vendor/github.com/olivere/elastic/search_request.go @@ -4,13 +4,16 @@ package elastic -import "strings" +import ( + "encoding/json" + "strings" +) // SearchRequest combines a search request and its // query details (see SearchSource). // It is used in combination with MultiSearch. type SearchRequest struct { - searchType string // default in ES is "query_then_fetch" + searchType string indices []string types []string routing *string @@ -28,38 +31,23 @@ func NewSearchRequest() *SearchRequest { return &SearchRequest{} } -// SearchRequest must be one of "query_then_fetch", "query_and_fetch", -// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". -// Use one of the constants defined via SearchType. +// SearchRequest must be one of "dfs_query_then_fetch" or +// "query_then_fetch". func (r *SearchRequest) SearchType(searchType string) *SearchRequest { r.searchType = searchType return r } +// SearchTypeDfsQueryThenFetch sets search type to dfs_query_then_fetch. func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { return r.SearchType("dfs_query_then_fetch") } -func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { - return r.SearchType("dfs_query_and_fetch") -} - +// SearchTypeQueryThenFetch sets search type to query_then_fetch. func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { return r.SearchType("query_then_fetch") } -func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { - return r.SearchType("query_and_fetch") -} - -func (r *SearchRequest) SearchTypeScan() *SearchRequest { - return r.SearchType("scan") -} - -func (r *SearchRequest) SearchTypeCount() *SearchRequest { - return r.SearchType("count") -} - func (r *SearchRequest) Index(indices ...string) *SearchRequest { r.indices = append(r.indices, indices...) return r @@ -130,17 +118,7 @@ func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest } func (r *SearchRequest) Source(source interface{}) *SearchRequest { - switch v := source.(type) { - case *SearchSource: - src, err := v.Source() - if err != nil { - // Do not do anything in case of an error - return r - } - r.source = src - default: - r.source = source - } + r.source = source return r } @@ -200,6 +178,34 @@ func (r *SearchRequest) header() interface{} { // Body is used e.g. by MultiSearch to get information about the search body // of one SearchRequest. // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html -func (r *SearchRequest) Body() interface{} { - return r.source +func (r *SearchRequest) Body() (string, error) { + switch t := r.source.(type) { + default: + body, err := json.Marshal(r.source) + if err != nil { + return "", err + } + return string(body), nil + case *SearchSource: + src, err := t.Source() + if err != nil { + return "", err + } + body, err := json.Marshal(src) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + if t != nil { + return *t, nil + } + return "{}", nil + } } diff --git a/vendor/github.com/olivere/elastic/search_test.go b/vendor/github.com/olivere/elastic/search_test.go index 097c26525..586089aaa 100644 --- a/vendor/github.com/olivere/elastic/search_test.go +++ b/vendor/github.com/olivere/elastic/search_test.go @@ -607,6 +607,61 @@ func TestSearchSource(t *testing.T) { } } +func TestSearchSourceWithString(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + searchResult, err := client.Search(). + Index(testIndexName). + Source(`{"query":{"match_all":{}}}`). // sets the JSON request + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + func TestSearchRawString(t *testing.T) { // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) client := setupTestClientAndCreateIndex(t) diff --git a/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md b/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..a456acf5b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md @@ -0,0 +1,8 @@ + diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md index 0eb0df1df..17d46ecaf 100644 --- a/vendor/github.com/prometheus/client_golang/README.md +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -2,6 +2,7 @@ [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) +[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang) This is the [Go](http://golang.org) client library for [Prometheus](http://prometheus.io). It has two separate parts, one for @@ -10,6 +11,50 @@ Prometheus HTTP API. __This library requires Go1.7 or later.__ +## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool + +While our goal is to follow [Semantic Versioning](https://semver.org/), this +repository is still pre-1.0.0. To quote the +[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may +change at any time. The public API should not be considered stable.†We know +that this is at odds with the widespread use of this library. However, just +declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working +towards a 1.0.0 release that actually deserves its major version number. + +Having said that, we aim for always keeping the tip of master in a workable +state and for only introducing â€mildly†breaking changes up to and including +[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that, +a number of â€hard†breaking changes are planned, see the +[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2), +which should get the library much closer to 1.0.0 state. + +Dependency management in Go projects is still in flux, and there are many tools +floating around. While [dep](https://golang.github.io/dep/) might develop into +the de-facto standard tool, it is still officially experimental. The roadmap +for this library has been laid out with a lot of sometimes painful experience +in mind. We really cannot adjust it every other month to the needs of the +currently most popular or most promising Go dependency management tool. The +recommended course of action with dependency management tools is the following: + +- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0 + release. If your dependency management tool expects strict post-1.0.0 semver + semantics, you have to wait. Sorry. +- If you want absolute certainty, please lock to a specific commit. You can + also lock to tags, but please don't ask for more tagging. This would suggest + some release or stability testing procedure that simply is not in place. As + said above, we are aiming for stability of the tip of master, but if we + tagged every single commit, locking to tags would be the same as locking to + commits. +- If you want to get the newer features and improvements and are willing to + take the minor risk of newly introduced bugs and “mild†breakage, just always + update to the tip of master (which is essentially the original idea of Go + dependency management). We recommend to not use features marked as + _deprecated_ in this case. +- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is + out, you could lock to v0.9.x to get bugfixes (and perhaps minor new + features) while avoiding the “hard†breakage that will come with post-0.9 + features. + ## Instrumenting applications [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) @@ -26,7 +71,7 @@ contains simple examples of instrumented code. ## Client for the Prometheus HTTP API -[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) The [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 36ef15567..f727c991d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,10 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless // specified otherwise. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index bfee5c6eb..dd0f8197f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -61,9 +61,8 @@ func giveBuf(buf *bytes.Buffer) { // name). // // Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is not instrumented, but can be instrumented with the tooling provided -// in package promhttp). +// InstrumentHandler. You might want to consider using +// promhttp.InstrumentedHandler instead. func Handler() http.Handler { return InstrumentHandler("prometheus", UninstrumentedHandler()) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 94b2553e1..32ac74a7f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -26,8 +26,11 @@ type processCollector struct { } // NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time for the given process ID under the given namespace. +// +// Currently, the collector depends on a Linux-style proc filesystem and +// therefore only exports metrics for Linux. func NewProcessCollector(pid int, namespace string) Collector { return NewProcessCollectorPIDFn( func() (int, error) { return pid, nil }, @@ -35,11 +38,8 @@ func NewProcessCollector(pid int, namespace string) Collector { ) } -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. +// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is +// determined on each collect anew by calling the given pidFn function. func NewProcessCollectorPIDFn( pidFn func() (int, error), namespace string, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 5ee095b09..9c1c66dcc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -102,10 +102,10 @@ func init() { return d } pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} + return &closeNotifierDelegator{d} } pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} + return &flusherDelegator{d} } pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 return struct { @@ -115,7 +115,7 @@ func init() { }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} } pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} + return &hijackerDelegator{d} } pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 return struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go index f4d386f7a..75a905e2f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -28,7 +28,7 @@ func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { func init() { pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} + return &pusherDelegator{d} } pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 return struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 2d67f2496..8dc260355 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -39,6 +39,7 @@ import ( "net/http" "strings" "sync" + "time" "github.com/prometheus/common/expfmt" @@ -67,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) { bufPool.Put(buf) } -// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The -// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP -// error, no error logging, and compression if requested by the client. +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. // -// If you want to create a Handler for the DefaultGatherer with different -// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and -// your desired HandlerOpts. +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. func Handler() http.Handler { - return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) } -// HandlerFor returns an http.Handler for the provided Gatherer. The behavior -// of the Handler is defined by the provided HandlerOpts. +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var inFlightSem chan struct{} + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + + h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(w, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { @@ -137,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if encoding != "" { header.Set(contentEncodingHeader, encoding) } - w.Write(buf.Bytes()) + if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { + opts.ErrorLog.Println("error while sending encoded metrics:", err) + } // TODO(beorn7): Consider streaming serving of metrics. }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) } // HandlerErrorHandling defines how a Handler serving metrics will handle @@ -183,6 +275,21 @@ type HandlerOpts struct { // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration } // decorateWriter wraps a writer to handle gzip compression if requested. It diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go index 413ff7baa..aeaa0b4d7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go @@ -19,7 +19,9 @@ import ( "log" "net/http" "net/http/httptest" + "strings" "testing" + "time" "github.com/prometheus/client_golang/prometheus" ) @@ -37,6 +39,23 @@ func (e errorCollector) Collect(ch chan<- prometheus.Metric) { ) } +type blockingCollector struct { + CollectStarted, Block chan struct{} +} + +func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil) +} + +func (b blockingCollector) Collect(ch chan<- prometheus.Metric) { + select { + case b.CollectStarted <- struct{}{}: + default: + } + // Collects nothing, just waits for a channel receive. + <-b.Block +} + func TestHandlerErrorHandling(t *testing.T) { // Create a registry that collects a MetricFamily with two elements, @@ -129,3 +148,103 @@ the_count 0 }() panicHandler.ServeHTTP(writer, request) } + +func TestInstrumentMetricHandler(t *testing.T) { + reg := prometheus.NewRegistry() + handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + // Do it again to test idempotency. + InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want := "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + + writer.Body.Reset() + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want = "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } +} + +func TestHandlerMaxRequestsInFlight(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1}) + w1 := httptest.NewRecorder() + w2 := httptest.NewRecorder() + w3 := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + rq1Done := make(chan struct{}) + go func() { + handler.ServeHTTP(w1, request) + close(rq1Done) + }() + <-c.CollectStarted + + handler.ServeHTTP(w2, request) + + if got, want := w2.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) + <-rq1Done + + handler.ServeHTTP(w3, request) + + if got, want := w3.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } +} + +func TestHandlerTimeout(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond}) + w := httptest.NewRecorder() + + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + handler.ServeHTTP(w, request) + + if got, want := w.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) // To not leak a goroutine. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go index e9af63e04..716c6f45e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go @@ -281,6 +281,16 @@ func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) { return 0, nil } +// testFlusher is an http.ResponseWriter that also implements http.Flusher. +type testFlusher struct { + flushCalled bool +} + +func (t *testFlusher) Header() http.Header { return nil } +func (t *testFlusher) Write([]byte) (int, error) { return 0, nil } +func (t *testFlusher) WriteHeader(int) {} +func (t *testFlusher) Flush() { t.flushCalled = true } + func TestInterfaceUpgrade(t *testing.T) { w := &testResponseWriter{} d := newDelegator(w, nil) @@ -299,6 +309,22 @@ func TestInterfaceUpgrade(t *testing.T) { if _, ok := d.(http.Hijacker); ok { t.Error("delegator unexpectedly implements http.Hijacker") } + + f := &testFlusher{} + d = newDelegator(f, nil) + if _, ok := d.(http.CloseNotifier); ok { + t.Error("delegator unexpectedly implements http.CloseNotifier") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + if _, ok := d.(io.ReaderFrom); ok { + t.Error("delegator unexpectedly implements io.ReaderFrom") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } } func ExampleInstrumentHandlerDuration() { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go b/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go new file mode 100644 index 000000000..3d62b5725 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +// This file contains only deprecated code. Remove after v0.9 is released. + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +// FromGatherer triggers a metric collection by the provided Gatherer (which is +// usually implemented by a prometheus.Registry) and pushes all gathered metrics +// to the Pushgateway specified by url, using the provided job name and the +// (optional) further grouping labels (the grouping map may be nil). See the +// Pushgateway documentation for detailed implications of the job and other +// grouping labels. Neither the job name nor any grouping label value may +// contain a "/". The metrics pushed must not contain a job label of their own +// nor any of the grouping labels. +// +// You can use just host:port or ip:port as url, in which case 'http://' is +// added automatically. You can also include the schema in the URL. However, do +// not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and other grouping +// labels will be replaced with the metrics pushed by this call. (It uses HTTP +// method 'PUT' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "PUT") +} + +// AddFromGatherer works like FromGatherer, but only previously pushed metrics +// with the same name (and the same job and other grouping labels) will be +// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "POST") +} + +func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + + if strings.Contains(job, "/") { + return fmt.Errorf("job contains '/': %s", job) + } + urlComponents := []string{url.QueryEscape(job)} + for ln, lv := range grouping { + if !model.LabelName(ln).IsValid() { + return fmt.Errorf("grouping label has invalid name: %s", ln) + } + if strings.Contains(lv, "/") { + return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + } + urlComponents = append(urlComponents, ln, lv) + } + pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + + mfs, err := g.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} + +// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, +// it collects from the provided collectors directly. It is a convenient way to +// push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "PUT", collectors...) +} + +// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. +// Instead, it collects from the provided collectors directly. It is a +// convenient way to push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "POST", collectors...) +} + +func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { + r := prometheus.NewRegistry() + for _, collector := range collectors { + if err := r.Register(collector); err != nil { + return err + } + } + return push(job, grouping, url, r, method) +} + +// HostnameGroupingKey returns a label map with the only entry +// {instance=""}. This can be conveniently used as the grouping +// parameter if metrics should be pushed with the hostname as label. The +// returned map is created upon each call so that the caller is free to add more +// labels to the map. +// +// Deprecated: Usually, metrics pushed to the Pushgateway should not be +// host-centric. (You would use https://github.com/prometheus/node_exporter in +// that case.) If you have the need to add the hostname to the grouping key, you +// are probably doing something wrong. See +// https://prometheus.io/docs/practices/pushing/ for details. +func HostnameGroupingKey() map[string]string { + hostname, err := os.Hostname() + if err != nil { + return map[string]string{"instance": "unknown"} + } + return map[string]string{"instance": hostname} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go index 5180c0745..dd22b526a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go @@ -11,12 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - package push_test import ( @@ -53,10 +47,14 @@ func performBackup() (int, error) { return 42, nil } -func ExampleAddFromGatherer() { +func ExamplePusher_Add() { + // We use a registry here to benefit from the consistency checks that + // happen during registration. registry := prometheus.NewRegistry() registry.MustRegister(completionTime, duration, records) - // Note that successTime is not registered at this time. + // Note that successTime is not registered. + + pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry) start := time.Now() n, err := performBackup() @@ -67,18 +65,16 @@ func ExampleAddFromGatherer() { if err != nil { fmt.Println("DB backup failed:", err) } else { - // Only now register successTime. - registry.MustRegister(successTime) + // Add successTime to pusher only in case of success. + // We could as well register it with the registry. + // This example, however, demonstrates that you can + // mix Gatherers and Collectors when handling a Pusher. + pusher.Collector(successTime) successTime.SetToCurrentTime() } - // AddFromGatherer is used here rather than FromGatherer to not delete a - // previously pushed success timestamp in case of a failure of this - // backup. - if err := push.AddFromGatherer( - "db_backup", nil, - "http://pushgateway:9091", - registry, - ); err != nil { + // Add is used here rather than Push to not delete a previously pushed + // success timestamp in case of a failure of this backup. + if err := pusher.Add(); err != nil { fmt.Println("Could not push to Pushgateway:", err) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go index 7e0ac66a5..fa5549a9e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go @@ -20,17 +20,16 @@ import ( "github.com/prometheus/client_golang/prometheus/push" ) -func ExampleCollectors() { +func ExamplePusher_Push() { completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_last_completion_timestamp_seconds", Help: "The timestamp of the last successful completion of a DB backup.", }) completionTime.SetToCurrentTime() - if err := push.Collectors( - "db_backup", push.HostnameGroupingKey(), - "http://pushgateway:9091", - completionTime, - ); err != nil { + if err := push.New("http://pushgateway:9091", "db_backup"). + Collector(completionTime). + Grouping("db", "customers"). + Push(); err != nil { fmt.Println("Could not push completion time to Pushgateway:", err) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index 8fb6f5f17..02be52c38 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -11,20 +11,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. +// Package push provides functions to push metrics to a Pushgateway. It uses a +// builder approach. Create a Pusher with New and then add the various options +// by using its methods, finally calling Add or Push, like this: // -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package push provides functions to push metrics to a Pushgateway. The metrics -// to push are either collected from a provided registry, or from explicitly -// listed collectors. +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() +// +// See the examples section for more detailed examples. // -// See the documentation of the Pushgateway to understand the meaning of the -// grouping parameters and the differences between push.Registry and -// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors -// on the other hand: https://github.com/prometheus/pushgateway +// See the documentation of the Pushgateway to understand the meaning of +// the grouping key and the differences between Push and Add: +// https://github.com/prometheus/pushgateway package push import ( @@ -33,7 +40,6 @@ import ( "io/ioutil" "net/http" "net/url" - "os" "strings" "github.com/prometheus/common/expfmt" @@ -44,57 +50,149 @@ import ( const contentTypeHeader = "Content-Type" -// FromGatherer triggers a metric collection by the provided Gatherer (which is -// usually implemented by a prometheus.Registry) and pushes all gathered metrics -// to the Pushgateway specified by url, using the provided job name and the -// (optional) further grouping labels (the grouping map may be nil). See the -// Pushgateway documentation for detailed implications of the job and other -// grouping labels. Neither the job name nor any grouping label value may -// contain a "/". The metrics pushed must not contain a job label of their own -// nor any of the grouping labels. +// Pusher manages a push to the Pushgateway. Use New to create one, configure it +// with its methods, and finally use the Add or Push method to push. +type Pusher struct { + error error + + url, job string + grouping map[string]string + + gatherers prometheus.Gatherers + registerer prometheus.Registerer + + client *http.Client + useBasicAuth bool + username, password string +} + +// New creates a new Pusher to push to the provided URL withe the provided job +// name. You can use just host:port or ip:port as url, in which case “http://†+// is added automatically. Alternatively, include the schema in the +// URL. However, do not include the “/metrics/jobs/…†part. // -// You can use just host:port or ip:port as url, in which case 'http://' is -// added automatically. You can also include the schema in the URL. However, do -// not include the '/metrics/jobs/...' part. +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, a “/†character in the job name is prohibited. +func New(url, job string) *Pusher { + var ( + reg = prometheus.NewRegistry() + err error + ) + if !strings.Contains(url, "://") { + url = "http://" + url + } + if strings.HasSuffix(url, "/") { + url = url[:len(url)-1] + } + if strings.Contains(job, "/") { + err = fmt.Errorf("job contains '/': %s", job) + } + + return &Pusher{ + error: err, + url: url, + job: job, + grouping: map[string]string{}, + gatherers: prometheus.Gatherers{reg}, + registerer: reg, + client: &http.Client{}, + } +} + +// Push collects/gathers all metrics from all Collectors and Gatherers added to +// this Pusher. Then, it pushes them to the Pushgateway configured while +// creating this Pusher, using the configured job name and any added grouping +// labels as grouping key. All previously pushed metrics with the same job and +// other grouping labels will be replaced with the metrics pushed by this +// call. (It uses HTTP method “PUT†to push to the Pushgateway.) // -// Note that all previously pushed metrics with the same job and other grouping -// labels will be replaced with the metrics pushed by this call. (It uses HTTP -// method 'PUT' to push to the Pushgateway.) -func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { - return push(job, grouping, url, g, "PUT") +// Push returns the first error encountered by any method call (including this +// one) in the lifetime of the Pusher. +func (p *Pusher) Push() error { + return p.push("PUT") } -// AddFromGatherer works like FromGatherer, but only previously pushed metrics -// with the same name (and the same job and other grouping labels) will be -// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) -func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { - return push(job, grouping, url, g, "POST") +// Add works like push, but only previously pushed metrics with the same name +// (and the same job and other grouping labels) will be replaced. (It uses HTTP +// method “POST†to push to the Pushgateway.) +func (p *Pusher) Add() error { + return p.push("POST") } -func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { - if !strings.Contains(pushURL, "://") { - pushURL = "http://" + pushURL - } - if strings.HasSuffix(pushURL, "/") { - pushURL = pushURL[:len(pushURL)-1] - } +// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered +// to push them to the Pushgateway. The gathered metrics must not contain a job +// label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { + p.gatherers = append(p.gatherers, g) + return p +} - if strings.Contains(job, "/") { - return fmt.Errorf("job contains '/': %s", job) +// Collector adds a Collector to the Pusher, from which metrics will be +// collected to push them to the Pushgateway. The collected metrics must not +// contain a job label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Collector(c prometheus.Collector) *Pusher { + if p.error == nil { + p.error = p.registerer.Register(c) } - urlComponents := []string{url.QueryEscape(job)} - for ln, lv := range grouping { - if !model.LabelName(ln).IsValid() { - return fmt.Errorf("grouping label has invalid name: %s", ln) + return p +} + +// Grouping adds a label pair to the grouping key of the Pusher, replacing any +// previously added label pair with the same label name. Note that setting any +// labels in the grouping key that are already contained in the metrics to push +// will lead to an error. +// +// For convenience, this method returns a pointer to the Pusher itself. +// +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, this method does not allow a “/†character in the label value. +func (p *Pusher) Grouping(name, value string) *Pusher { + if p.error == nil { + if !model.LabelName(name).IsValid() { + p.error = fmt.Errorf("grouping label has invalid name: %s", name) + return p } - if strings.Contains(lv, "/") { - return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + if strings.Contains(value, "/") { + p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value) + return p } + p.grouping[name] = value + } + return p +} + +// Client sets a custom HTTP client for the Pusher. For convenience, this method +// returns a pointer to the Pusher itself. +func (p *Pusher) Client(c *http.Client) *Pusher { + p.client = c + return p +} + +// BasicAuth configures the Pusher to use HTTP Basic Authentication with the +// provided username and password. For convenience, this method returns a +// pointer to the Pusher itself. +func (p *Pusher) BasicAuth(username, password string) *Pusher { + p.useBasicAuth = true + p.username = username + p.password = password + return p +} + +func (p *Pusher) push(method string) error { + if p.error != nil { + return p.error + } + urlComponents := []string{url.QueryEscape(p.job)} + for ln, lv := range p.grouping { urlComponents = append(urlComponents, ln, lv) } - pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/")) - mfs, err := g.Gather() + mfs, err := p.gatherers.Gather() if err != nil { return err } @@ -107,7 +205,7 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G if l.GetName() == "job" { return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) } - if _, ok := grouping[l.GetName()]; ok { + if _, ok := p.grouping[l.GetName()]; ok { return fmt.Errorf( "pushed metric %s (%s) already contains grouping label %s", mf.GetName(), m, l.GetName(), @@ -121,8 +219,11 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G if err != nil { return err } + if p.useBasicAuth { + req.SetBasicAuth(p.username, p.password) + } req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) - resp, err := http.DefaultClient.Do(req) + resp, err := p.client.Do(req) if err != nil { return err } @@ -133,40 +234,3 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G } return nil } - -// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, -// it collects from the provided collectors directly. It is a convenient way to -// push only a few metrics. -func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { - return pushCollectors(job, grouping, url, "PUT", collectors...) -} - -// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. -// Instead, it collects from the provided collectors directly. It is a -// convenient way to push only a few metrics. -func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { - return pushCollectors(job, grouping, url, "POST", collectors...) -} - -func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { - r := prometheus.NewRegistry() - for _, collector := range collectors { - if err := r.Register(collector); err != nil { - return err - } - } - return push(job, grouping, url, r, method) -} - -// HostnameGroupingKey returns a label map with the only entry -// {instance=""}. This can be conveniently used as the grouping -// parameter if metrics should be pushed with the hostname as label. The -// returned map is created upon each call so that the caller is free to add more -// labels to the map. -func HostnameGroupingKey() map[string]string { - hostname, err := os.Hostname() - if err != nil { - return map[string]string{"instance": "unknown"} - } - return map[string]string{"instance": hostname} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go index 28ed9b74b..34ec334bb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go @@ -11,12 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - package push import ( @@ -24,7 +18,6 @@ import ( "io/ioutil" "net/http" "net/http/httptest" - "os" "testing" "github.com/prometheus/common/expfmt" @@ -40,11 +33,6 @@ func TestPush(t *testing.T) { lastPath string ) - host, err := os.Hostname() - if err != nil { - t.Error(err) - } - // Fake a Pushgateway that always responds with 202. pgwOK := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -98,26 +86,32 @@ func TestPush(t *testing.T) { } wantBody := buf.Bytes() - // PushCollectors, all good. - if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil { + // Push some Collectors, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err != nil { t.Fatal(err) } if lastMethod != "PUT" { - t.Error("want method PUT for PushCollectors, got", lastMethod) + t.Error("want method PUT for Push, got", lastMethod) } if bytes.Compare(lastBody, wantBody) != 0 { t.Errorf("got body %v, want %v", lastBody, wantBody) } - if lastPath != "/metrics/job/testjob/instance/"+host { + if lastPath != "/metrics/job/testjob" { t.Error("unexpected path:", lastPath) } - // PushAddCollectors, with nil grouping, all good. - if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil { + // Add some Collectors, with nil grouping, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Add(); err != nil { t.Fatal(err) } if lastMethod != "POST" { - t.Error("want method POST for PushAddCollectors, got", lastMethod) + t.Error("want method POST for Add, got", lastMethod) } if bytes.Compare(lastBody, wantBody) != 0 { t.Errorf("got body %v, want %v", lastBody, wantBody) @@ -126,8 +120,11 @@ func TestPush(t *testing.T) { t.Error("unexpected path:", lastPath) } - // PushCollectors with a broken PGW. - if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil { + // Push some Collectors with a broken PGW. + if err := New(pgwErr.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { t.Error("push to broken Pushgateway succeeded") } else { if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { @@ -135,22 +132,39 @@ func TestPush(t *testing.T) { } } - // PushCollectors with invalid grouping or job. - if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + // Push some Collectors with invalid grouping or job. + if err := New(pgwOK.URL, "testjob"). + Grouping("foo", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { t.Error("push with grouping contained in metrics succeeded") } - if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil { + if err := New(pgwOK.URL, "test/job"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { t.Error("push with invalid job value succeeded") } - if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + if err := New(pgwOK.URL, "testjob"). + Grouping("foobar", "bu/ms"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { t.Error("push with invalid grouping succeeded") } - if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + if err := New(pgwOK.URL, "testjob"). + Grouping("foo-bar", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { t.Error("push with invalid grouping succeeded") } // Push registry, all good. - if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil { + if err := New(pgwOK.URL, "testjob"). + Gatherer(reg). + Push(); err != nil { t.Fatal(err) } if lastMethod != "PUT" { @@ -160,12 +174,16 @@ func TestPush(t *testing.T) { t.Errorf("got body %v, want %v", lastBody, wantBody) } - // PushAdd registry, all good. - if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil { + // Add registry, all good. + if err := New(pgwOK.URL, "testjob"). + Grouping("a", "x"). + Grouping("b", "y"). + Gatherer(reg). + Add(); err != nil { t.Fatal(err) } if lastMethod != "POST" { - t.Error("want method POSTT for PushAdd, got", lastMethod) + t.Error("want method POST for Add, got", lastMethod) } if bytes.Compare(lastBody, wantBody) != 0 { t.Errorf("got body %v, want %v", lastBody, wantBody) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index c84a4420e..bee370364 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "os" + "runtime" "sort" "sync" "unicode/utf8" @@ -36,13 +37,13 @@ const ( // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector) already registered. This approach to +// keep default instances as global state mirrors the approach of other packages +// in the Go standard library. Note that there are caveats. Change the variables +// with caution and only if you understand the consequences. Users who want to +// avoid global state altogether should not use the convenience functions and +// act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -202,6 +203,13 @@ func (errs MultiError) Error() string { return buf.String() } +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -368,22 +376,12 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { ) r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() + collectors := make(chan Collector, len(r.collectorsByID)) for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) + collectors <- collector } - // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -392,127 +390,174 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + // Drain metricChan in case of premature return. defer func() { for range metricChan { } }() - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, - )) - continue - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are aleady being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, )) - continue } - default: - panic("encountered MetricFamily with invalid type") + break collectLoop } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue - } - metricFamiliesByName[desc.fqName] = metricFamily + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + metricFamiliesByName[desc.fqName] = metricFamily } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil } // Gatherers is a slice of Gatherer instances that implements the Gatherer diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 36c1586a1..65f09223f 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -46,8 +46,8 @@ func (fs FS) XFSStats() (*xfs.Stats, error) { return xfs.ParseStats(f) } -// NFSdClientRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) { +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { f, err := os.Open(fs.Path("net/rpc/nfs")) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go index 3aa32563a..8f568f011 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -178,8 +178,17 @@ func parseV3Stats(v []uint64) (V3Stats, error) { func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { values := int(v[0]) - if len(v[1:]) != values || values < 59 { - return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues } return ClientV4Stats{ diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go index b5c0b15f3..c0d3a5ad9 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -32,12 +32,12 @@ func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { parts := strings.Fields(scanner.Text()) // require at least if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) + return nil, fmt.Errorf("invalid NFS metric line %q", line) } values, err := util.ParseUint64s(parts[1:]) if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) } switch metricLine := parts[0]; metricLine { @@ -52,15 +52,15 @@ func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { case "proc4": stats.ClientV4Stats, err = parseClientV4Stats(values) default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) } if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) } } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) + return nil, fmt.Errorf("error scanning NFS file: %s", err) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go index d821f1b4c..afe3db47e 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/nfs" ) -func TestNewNFSdClientRPCStats(t *testing.T) { +func TestNewNFSClientRPCStats(t *testing.T) { tests := []struct { name string content string @@ -32,6 +32,131 @@ func TestNewNFSdClientRPCStats(t *testing.T) { name: "invalid file", content: "invalid", invalid: true, + }, { + name: "good old kernel version file", + content: `net 70 70 69 45 +rpc 1218785755 374636 1218815394 +proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 +proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 +proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 1337 +`, + stats: &nfs.ClientRPCStats{ + Network: nfs.Network{ + NetCount: 70, + UDPCount: 70, + TCPCount: 69, + TCPConnect: 45, + }, + ClientRPC: nfs.ClientRPC{ + RPCCount: 1218785755, + Retransmissions: 374636, + AuthRefreshes: 1218815394, + }, + V2Stats: nfs.V2Stats{ + Null: 16, + GetAttr: 57, + SetAttr: 74, + Root: 52, + Lookup: 71, + ReadLink: 73, + Read: 45, + WrCache: 86, + Write: 0, + Create: 52, + Remove: 83, + Rename: 61, + Link: 17, + SymLink: 53, + MkDir: 50, + RmDir: 23, + ReadDir: 70, + FsStat: 82, + }, + V3Stats: nfs.V3Stats{ + Null: 0, + GetAttr: 1061909262, + SetAttr: 48906, + Lookup: 4077635, + Access: 117661341, + ReadLink: 5, + Read: 29391916, + Write: 2570425, + Create: 2993289, + MkDir: 590, + SymLink: 0, + MkNod: 0, + Remove: 7815, + RmDir: 15, + Rename: 1130, + Link: 0, + ReadDir: 3983, + ReadDirPlus: 92385, + FsStat: 13332, + FsInfo: 2, + PathConf: 1, + Commit: 23729, }, + ClientV4Stats: nfs.ClientV4Stats{ + Null: 98, + Read: 51, + Write: 54, + Commit: 83, + Open: 85, + OpenConfirm: 23, + OpenNoattr: 24, + OpenDowngrade: 1, + Close: 28, + Setattr: 73, + FsInfo: 68, + Renew: 83, + SetClientId: 12, + SetClientIdConfirm: 84, + Lock: 39, + Lockt: 68, + Locku: 59, + Access: 58, + Getattr: 88, + Lookup: 29, + LookupRoot: 74, + Remove: 69, + Rename: 96, + Link: 21, + Symlink: 84, + Create: 15, + Pathconf: 53, + StatFs: 86, + ReadLink: 54, + ReadDir: 66, + ServerCaps: 56, + DelegReturn: 97, + GetAcl: 36, + SetAcl: 49, + FsLocations: 32, + ReleaseLockowner: 85, + Secinfo: 81, + FsidPresent: 11, + ExchangeId: 58, + CreateSession: 32, + DestroySession: 67, + Sequence: 13, + GetLeaseTime: 28, + ReclaimComplete: 35, + LayoutGet: 90, + GetDeviceInfo: 1, + LayoutCommit: 26, + LayoutReturn: 1337, + SecinfoNoName: 0, + TestStateId: 0, + FreeStateId: 0, + GetDeviceList: 0, + BindConnToSession: 0, + DestroyClientId: 0, + Seek: 0, + Allocate: 0, + DeAllocate: 0, + LayoutStats: 0, + Clone: 0, + }, + }, }, { name: "good file", content: `net 18628 0 18628 6 diff --git a/vendor/github.com/rsc/letsencrypt/LICENSE b/vendor/github.com/rsc/letsencrypt/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/rsc/letsencrypt/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rsc/letsencrypt/README b/vendor/github.com/rsc/letsencrypt/README deleted file mode 100644 index 575ae16a8..000000000 --- a/vendor/github.com/rsc/letsencrypt/README +++ /dev/null @@ -1,177 +0,0 @@ -package letsencrypt // import "rsc.io/letsencrypt" - -Package letsencrypt obtains TLS certificates from LetsEncrypt.org. - -LetsEncrypt.org is a service that issues free SSL/TLS certificates to -servers that can prove control over the given domain's DNS records or the -servers pointed at by those records. - - -Warning - -Like any other random code you find on the internet, this package should not -be relied upon in important, production systems without thorough testing to -ensure that it meets your needs. - -In the long term you should be using -https://golang.org/x/crypto/acme/autocert instead of this package. Send -improvements there, not here. - -This is a package that I wrote for my own personal web sites (swtch.com, -rsc.io) in a hurry when my paid-for SSL certificate was expiring. It has no -tests, has barely been used, and there is some anecdotal evidence that it -does not properly renew certificates in a timely fashion, so servers that -run for more than 3 months may run into trouble. I don't run this code -anymore: to simplify maintenance, I moved the sites off of Ubuntu VMs and -onto Google App Engine, configured with inexpensive long-term certificates -purchased from cheapsslsecurity.com. - -This package was interesting primarily as an example of how simple the API -for using LetsEncrypt.org could be made, in contrast to the low-level -implementations that existed at the time. In that respect, it helped inform -the design of the golang.org/x/crypto/acme/autocert package. - - -Quick Start - -A complete HTTP/HTTPS web server using TLS certificates from -LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS -certificates in a file letsencrypt.cache across server restarts. - - package main - - import ( - "fmt" - "log" - "net/http" - "rsc.io/letsencrypt" - ) - - func main() { - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, TLS!\n") - }) - var m letsencrypt.Manager - if err := m.CacheFile("letsencrypt.cache"); err != nil { - log.Fatal(err) - } - log.Fatal(m.Serve()) - } - - -Overview - -The fundamental type in this package is the Manager, which manages obtaining -and refreshing a collection of TLS certificates, typically for use by an -HTTPS server. The example above shows the most basic use of a Manager. The -use can be customized by calling additional methods of the Manager. - - -Registration - -A Manager m registers anonymously with LetsEncrypt.org, including agreeing -to the letsencrypt.org terms of service, the first time it needs to obtain a -certificate. To register with a particular email address and with the option -of a prompt for agreement with the terms of service, call m.Register. - - -GetCertificate - -The Manager's GetCertificate method returns certificates from the Manager's -cache, filling the cache by requesting certificates from LetsEncrypt.org. In -this way, a server with a tls.Config.GetCertificate set to m.GetCertificate -will demand load a certificate for any host name it serves. To force loading -of certificates ahead of time, install m.GetCertificate as before but then -call m.Cert for each host name. - -A Manager can only obtain a certificate for a given host name if it can -prove control of that host name to LetsEncrypt.org. By default it proves -control by answering an HTTPS-based challenge: when the LetsEncrypt.org -servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake -must use m.GetCertificate to obtain a per-host certificate. The most common -way to satisfy this requirement is for the host name to resolve to the IP -address of a (single) computer running m.ServeHTTPS, or at least running a -Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. -However, other configurations are possible. For example, a group of machines -could use an implementation of tls.Config.GetCertificate that cached -certificates but handled cache misses by making RPCs to a Manager m on an -elected leader machine. - -In typical usage, then, the setting of tls.Config.GetCertificate to -m.GetCertificate serves two purposes: it provides certificates to the TLS -server for ordinary serving, and it also answers challenges to prove -ownership of the domains in order to obtain those certificates. - -To force the loading of a certificate for a given host into the Manager's -cache, use m.Cert. - - -Persistent Storage - -If a server always starts with a zero Manager m, the server effectively -fetches a new certificate for each of its host name from LetsEncrypt.org on -each restart. This is unfortunate both because the server cannot start if -LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often -it will issue a certificate for a given host name (at time of writing, the -limit is 5 per week for a given host name). To save server state proactively -to a cache file and to reload the server state from that same file when -creating a new manager, call m.CacheFile with the name of the file to use. - -For alternate storage uses, m.Marshal returns the current state of the -Manager as an opaque string, m.Unmarshal sets the state of the Manager using -a string previously returned by m.Marshal (usually a different m), and -m.Watch returns a channel that receives notifications about state changes. - - -Limits - -To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager -limits all its interactions to at most one request every minute, with an -initial allowed burst of 20 requests. - -By default, if GetCertificate is asked for a certificate it does not have, -it will in turn ask LetsEncrypt.org for that certificate. This opens a -potential attack where attackers connect to a server by IP address and -pretend to be asking for an incorrect host name. Then GetCertificate will -attempt to obtain a certificate for that host, incorrectly, eventually -hitting LetsEncrypt.org's rate limit for certificate requests and making it -impossible to obtain actual certificates. Because servers hold certificates -for months at a time, however, an attack would need to be sustained over a -time period of at least a month in order to cause real problems. - -To mitigate this kind of attack, a given Manager limits itself to an average -of one certificate request for a new host every three hours, with an initial -allowed burst of up to 20 requests. Long-running servers will therefore stay -within the LetsEncrypt.org limit of 300 failed requests per month. -Certificate refreshes are not subject to this limit. - -To eliminate the attack entirely, call m.SetHosts to enumerate the exact set -of hosts that are allowed in certificate requests. - - -Web Servers - -The basic requirement for use of a Manager is that there be an HTTPS server -running on port 443 and calling m.GetCertificate to obtain TLS certificates. -Using standard primitives, the way to do this is: - - srv := &http.Server{ - Addr: ":https", - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - srv.ListenAndServeTLS("", "") - -However, this pattern of serving HTTPS with demand-loaded TLS certificates -comes up enough to wrap into a single method m.ServeHTTPS. - -Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS -URLs. That functionality is provided by RedirectHTTP. - -The combination of serving HTTPS with demand-loaded TLS certificates and -serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in -the original example above. - -func RedirectHTTP(w http.ResponseWriter, r *http.Request) -type Manager struct { ... } diff --git a/vendor/github.com/rsc/letsencrypt/lets.go b/vendor/github.com/rsc/letsencrypt/lets.go deleted file mode 100644 index 485abfa1c..000000000 --- a/vendor/github.com/rsc/letsencrypt/lets.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package letsencrypt obtains TLS certificates from LetsEncrypt.org. -// -// LetsEncrypt.org is a service that issues free SSL/TLS certificates to servers -// that can prove control over the given domain's DNS records or -// the servers pointed at by those records. -// -// Warning -// -// Like any other random code you find on the internet, this package should -// not be relied upon in important, production systems without thorough testing -// to ensure that it meets your needs. -// -// In the long term you should be using -// https://golang.org/x/crypto/acme/autocert instead of this package. -// Send improvements there, not here. -// -// This is a package that I wrote for my own personal web sites (swtch.com, rsc.io) -// in a hurry when my paid-for SSL certificate was expiring. It has no tests, -// has barely been used, and there is some anecdotal evidence that it does -// not properly renew certificates in a timely fashion, so servers that run for -// more than 3 months may run into trouble. -// I don't run this code anymore: to simplify maintenance, I moved the sites -// off of Ubuntu VMs and onto Google App Engine, configured with inexpensive -// long-term certificates purchased from cheapsslsecurity.com. -// -// This package was interesting primarily as an example of how simple the API -// for using LetsEncrypt.org could be made, in contrast to the low-level -// implementations that existed at the time. In that respect, it helped inform -// the design of the golang.org/x/crypto/acme/autocert package. -// -// Quick Start -// -// A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org, -// redirecting all HTTP access to HTTPS, and maintaining TLS certificates in a file -// letsencrypt.cache across server restarts. -// -// package main -// -// import ( -// "fmt" -// "log" -// "net/http" -// "rsc.io/letsencrypt" -// ) -// -// func main() { -// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprintf(w, "Hello, TLS!\n") -// }) -// var m letsencrypt.Manager -// if err := m.CacheFile("letsencrypt.cache"); err != nil { -// log.Fatal(err) -// } -// log.Fatal(m.Serve()) -// } -// -// Overview -// -// The fundamental type in this package is the Manager, which -// manages obtaining and refreshing a collection of TLS certificates, -// typically for use by an HTTPS server. -// The example above shows the most basic use of a Manager. -// The use can be customized by calling additional methods of the Manager. -// -// Registration -// -// A Manager m registers anonymously with LetsEncrypt.org, including agreeing to -// the letsencrypt.org terms of service, the first time it needs to obtain a certificate. -// To register with a particular email address and with the option of a -// prompt for agreement with the terms of service, call m.Register. -// -// GetCertificate -// -// The Manager's GetCertificate method returns certificates -// from the Manager's cache, filling the cache by requesting certificates -// from LetsEncrypt.org. In this way, a server with a tls.Config.GetCertificate -// set to m.GetCertificate will demand load a certificate for any host name -// it serves. To force loading of certificates ahead of time, install m.GetCertificate -// as before but then call m.Cert for each host name. -// -// A Manager can only obtain a certificate for a given host name if it can prove -// control of that host name to LetsEncrypt.org. By default it proves control by -// answering an HTTPS-based challenge: when -// the LetsEncrypt.org servers connect to the named host on port 443 (HTTPS), -// the TLS SNI handshake must use m.GetCertificate to obtain a per-host certificate. -// The most common way to satisfy this requirement is for the host name to -// resolve to the IP address of a (single) computer running m.ServeHTTPS, -// or at least running a Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. -// However, other configurations are possible. For example, a group of machines -// could use an implementation of tls.Config.GetCertificate that cached -// certificates but handled cache misses by making RPCs to a Manager m -// on an elected leader machine. -// -// In typical usage, then, the setting of tls.Config.GetCertificate to m.GetCertificate -// serves two purposes: it provides certificates to the TLS server for ordinary serving, -// and it also answers challenges to prove ownership of the domains in order to -// obtain those certificates. -// -// To force the loading of a certificate for a given host into the Manager's cache, -// use m.Cert. -// -// Persistent Storage -// -// If a server always starts with a zero Manager m, the server effectively fetches -// a new certificate for each of its host name from LetsEncrypt.org on each restart. -// This is unfortunate both because the server cannot start if LetsEncrypt.org is -// unavailable and because LetsEncrypt.org limits how often it will issue a certificate -// for a given host name (at time of writing, the limit is 5 per week for a given host name). -// To save server state proactively to a cache file and to reload the server state from -// that same file when creating a new manager, call m.CacheFile with the name of -// the file to use. -// -// For alternate storage uses, m.Marshal returns the current state of the Manager -// as an opaque string, m.Unmarshal sets the state of the Manager using a string -// previously returned by m.Marshal (usually a different m), and m.Watch returns -// a channel that receives notifications about state changes. -// -// Limits -// -// To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager limits all its -// interactions to at most one request every minute, with an initial allowed burst of -// 20 requests. -// -// By default, if GetCertificate is asked for a certificate it does not have, it will in turn -// ask LetsEncrypt.org for that certificate. This opens a potential attack where attackers -// connect to a server by IP address and pretend to be asking for an incorrect host name. -// Then GetCertificate will attempt to obtain a certificate for that host, incorrectly, -// eventually hitting LetsEncrypt.org's rate limit for certificate requests and making it -// impossible to obtain actual certificates. Because servers hold certificates for months -// at a time, however, an attack would need to be sustained over a time period -// of at least a month in order to cause real problems. -// -// To mitigate this kind of attack, a given Manager limits -// itself to an average of one certificate request for a new host every three hours, -// with an initial allowed burst of up to 20 requests. -// Long-running servers will therefore stay -// within the LetsEncrypt.org limit of 300 failed requests per month. -// Certificate refreshes are not subject to this limit. -// -// To eliminate the attack entirely, call m.SetHosts to enumerate the exact set -// of hosts that are allowed in certificate requests. -// -// Web Servers -// -// The basic requirement for use of a Manager is that there be an HTTPS server -// running on port 443 and calling m.GetCertificate to obtain TLS certificates. -// Using standard primitives, the way to do this is: -// -// srv := &http.Server{ -// Addr: ":https", -// TLSConfig: &tls.Config{ -// GetCertificate: m.GetCertificate, -// }, -// } -// srv.ListenAndServeTLS("", "") -// -// However, this pattern of serving HTTPS with demand-loaded TLS certificates -// comes up enough to wrap into a single method m.ServeHTTPS. -// -// Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS URLs. -// That functionality is provided by RedirectHTTP. -// -// The combination of serving HTTPS with demand-loaded TLS certificates and -// serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in -// the original example above. -// -package letsencrypt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/time/rate" - - "github.com/xenolf/lego/acme" -) - -const letsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" -const debug = false - -// A Manager m takes care of obtaining and refreshing a collection of TLS certificates -// obtained by LetsEncrypt.org. -// The zero Manager is not yet registered with LetsEncrypt.org and has no TLS certificates -// but is nonetheless ready for use. -// See the package comment for an overview of how to use a Manager. -type Manager struct { - mu sync.Mutex - state state - rateLimit *rate.Limiter - newHostLimit *rate.Limiter - certCache map[string]*cacheEntry - certTokens map[string]*tls.Certificate - watchChan chan struct{} -} - -// Serve runs an HTTP/HTTPS web server using TLS certificates obtained by the manager. -// The HTTP server redirects all requests to the HTTPS server. -// The HTTPS server obtains TLS certificates as needed and responds to requests -// by invoking http.DefaultServeMux. -// -// Serve does not return unitil the HTTPS server fails to start or else stops. -// Either way, Serve can only return a non-nil error, never nil. -func (m *Manager) Serve() error { - l, err := net.Listen("tcp", ":http") - if err != nil { - return err - } - defer l.Close() - go http.Serve(l, http.HandlerFunc(RedirectHTTP)) - - return m.ServeHTTPS() -} - -// ServeHTTPS runs an HTTPS web server using TLS certificates obtained by the manager. -// The HTTPS server obtains TLS certificates as needed and responds to requests -// by invoking http.DefaultServeMux. -// ServeHTTPS does not return unitil the HTTPS server fails to start or else stops. -// Either way, ServeHTTPS can only return a non-nil error, never nil. -func (m *Manager) ServeHTTPS() error { - srv := &http.Server{ - Addr: ":https", - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - return srv.ListenAndServeTLS("", "") -} - -// RedirectHTTP is an HTTP handler (suitable for use with http.HandleFunc) -// that responds to all requests by redirecting to the same URL served over HTTPS. -// It should only be invoked for requests received over HTTP. -func RedirectHTTP(w http.ResponseWriter, r *http.Request) { - if r.TLS != nil || r.Host == "" { - http.Error(w, "not found", 404) - } - - u := r.URL - u.Host = r.Host - u.Scheme = "https" - http.Redirect(w, r, u.String(), 302) -} - -// state is the serializable state for the Manager. -// It also implements acme.User. -type state struct { - Email string - Reg *acme.RegistrationResource - Key string - key *ecdsa.PrivateKey - Hosts []string - Certs map[string]stateCert -} - -func (s *state) GetEmail() string { return s.Email } -func (s *state) GetRegistration() *acme.RegistrationResource { return s.Reg } -func (s *state) GetPrivateKey() crypto.PrivateKey { return s.key } - -type stateCert struct { - Cert string - Key string -} - -func (cert stateCert) toTLS() (*tls.Certificate, error) { - c, err := tls.X509KeyPair([]byte(cert.Cert), []byte(cert.Key)) - if err != nil { - return nil, err - } - return &c, err -} - -type cacheEntry struct { - host string - m *Manager - - mu sync.Mutex - cert *tls.Certificate - timeout time.Time - refreshing bool - err error -} - -func (m *Manager) init() { - m.mu.Lock() - if m.certCache == nil { - m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20) - m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20) - m.certCache = map[string]*cacheEntry{} - m.certTokens = map[string]*tls.Certificate{} - m.watchChan = make(chan struct{}, 1) - m.watchChan <- struct{}{} - } - m.mu.Unlock() -} - -// Watch returns the manager's watch channel, -// which delivers a notification after every time the -// manager's state (as exposed by Marshal and Unmarshal) changes. -// All calls to Watch return the same watch channel. -// -// The watch channel includes notifications about changes -// before the first call to Watch, so that in the pattern below, -// the range loop executes once immediately, saving -// the result of setup (along with any background updates that -// may have raced in quickly). -// -// m := new(letsencrypt.Manager) -// setup(m) -// go backgroundUpdates(m) -// for range m.Watch() { -// save(m.Marshal()) -// } -// -func (m *Manager) Watch() <-chan struct{} { - m.init() - m.updated() - return m.watchChan -} - -func (m *Manager) updated() { - select { - case m.watchChan <- struct{}{}: - default: - } -} - -func (m *Manager) CacheFile(name string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return err - } - f.Close() - data, err := ioutil.ReadFile(name) - if err != nil { - return err - } - if len(data) > 0 { - if err := m.Unmarshal(string(data)); err != nil { - return err - } - } - go func() { - for range m.Watch() { - err := ioutil.WriteFile(name, []byte(m.Marshal()), 0600) - if err != nil { - log.Printf("writing letsencrypt cache: %v", err) - } - } - }() - return nil -} - -// Registered reports whether the manager has registered with letsencrypt.org yet. -func (m *Manager) Registered() bool { - m.init() - m.mu.Lock() - defer m.mu.Unlock() - return m.registered() -} - -func (m *Manager) registered() bool { - return m.state.Reg != nil && m.state.Reg.Body.Agreement != "" -} - -// Register registers the manager with letsencrypt.org, using the given email address. -// Registration may require agreeing to the letsencrypt.org terms of service. -// If so, Register calls prompt(url) where url is the URL of the terms of service. -// Prompt should report whether the caller agrees to the terms. -// A nil prompt func is taken to mean that the user always agrees. -// The email address is sent to LetsEncrypt.org but otherwise unchecked; -// it can be omitted by passing the empty string. -// -// Calling Register is only required to make sure registration uses a -// particular email address or to insert an explicit prompt into the -// registration sequence. If the manager is not registered, it will -// automatically register with no email address and automatic -// agreement to the terms of service at the first call to Cert or GetCertificate. -func (m *Manager) Register(email string, prompt func(string) bool) error { - m.init() - m.mu.Lock() - defer m.mu.Unlock() - - return m.register(email, prompt) -} - -func (m *Manager) register(email string, prompt func(string) bool) error { - if m.registered() { - return fmt.Errorf("already registered") - } - m.state.Email = email - if m.state.key == nil { - key, err := newKey() - if err != nil { - return fmt.Errorf("generating key: %v", err) - } - Key, err := marshalKey(key) - if err != nil { - return fmt.Errorf("generating key: %v", err) - } - m.state.key = key - m.state.Key = string(Key) - } - - c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) - if err != nil { - return fmt.Errorf("create client: %v", err) - } - reg, err := c.Register() - if err != nil { - return fmt.Errorf("register: %v", err) - } - - m.state.Reg = reg - if reg.Body.Agreement == "" { - if prompt != nil && !prompt(reg.TosURL) { - return fmt.Errorf("did not agree to TOS") - } - if err := c.AgreeToTOS(); err != nil { - return fmt.Errorf("agreeing to TOS: %v", err) - } - } - - m.updated() - - return nil -} - -// Marshal returns an encoding of the manager's state, -// suitable for writing to disk and reloading by calling Unmarshal. -// The state includes registration status, the configured host list -// from SetHosts, and all known certificates, including their private -// cryptographic keys. -// Consequently, the state should be kept private. -func (m *Manager) Marshal() string { - m.init() - m.mu.Lock() - js, err := json.MarshalIndent(&m.state, "", "\t") - m.mu.Unlock() - if err != nil { - panic("unexpected json.Marshal failure") - } - return string(js) -} - -// Unmarshal restores the state encoded by a previous call to Marshal -// (perhaps on a different Manager in a different program). -func (m *Manager) Unmarshal(enc string) error { - m.init() - var st state - if err := json.Unmarshal([]byte(enc), &st); err != nil { - return err - } - if st.Key != "" { - key, err := unmarshalKey(st.Key) - if err != nil { - return err - } - st.key = key - } - m.mu.Lock() - m.state = st - m.mu.Unlock() - for host, cert := range m.state.Certs { - c, err := cert.toTLS() - if err != nil { - log.Printf("letsencrypt: ignoring entry for %s: %v", host, err) - continue - } - m.certCache[host] = &cacheEntry{host: host, m: m, cert: c} - } - m.updated() - return nil -} - -// SetHosts sets the manager's list of known host names. -// If the list is non-nil, the manager will only ever attempt to acquire -// certificates for host names on the list. -// If the list is nil, the manager does not restrict the hosts it will -// ask for certificates for. -func (m *Manager) SetHosts(hosts []string) { - m.init() - m.mu.Lock() - m.state.Hosts = append(m.state.Hosts[:0], hosts...) - m.mu.Unlock() - m.updated() -} - -// GetCertificate can be placed a tls.Config's GetCertificate field to make -// the TLS server use Let's Encrypt certificates. -// Each time a client connects to the TLS server expecting a new host name, -// the TLS server's call to GetCertificate will trigger an exchange with the -// Let's Encrypt servers to obtain that certificate, subject to the manager rate limits. -// -// As noted in the Manager's documentation comment, -// to obtain a certificate for a given host name, that name -// must resolve to a computer running a TLS server on port 443 -// that obtains TLS SNI certificates by calling m.GetCertificate. -// In the standard usage, then, installing m.GetCertificate in the tls.Config -// both automatically provisions the TLS certificates needed for -// ordinary HTTPS service and answers the challenges from LetsEncrypt.org. -func (m *Manager) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - m.init() - - host := clientHello.ServerName - - if debug { - log.Printf("GetCertificate %s", host) - } - - if strings.HasSuffix(host, ".acme.invalid") { - m.mu.Lock() - cert := m.certTokens[host] - m.mu.Unlock() - if cert == nil { - return nil, fmt.Errorf("unknown host") - } - return cert, nil - } - - return m.Cert(host) -} - -// Cert returns the certificate for the given host name, obtaining a new one if necessary. -// -// As noted in the documentation for Manager and for the GetCertificate method, -// obtaining a certificate requires that m.GetCertificate be associated with host. -// In most servers, simply starting a TLS server with a configuration referring -// to m.GetCertificate is sufficient, and Cert need not be called. -// -// The main use of Cert is to force the manager to obtain a certificate -// for a particular host name ahead of time. -func (m *Manager) Cert(host string) (*tls.Certificate, error) { - host = strings.ToLower(host) - if debug { - log.Printf("Cert %s", host) - } - - m.init() - m.mu.Lock() - if !m.registered() { - m.register("", nil) - } - - ok := false - if m.state.Hosts == nil { - ok = true - } else { - for _, h := range m.state.Hosts { - if host == h { - ok = true - break - } - } - } - if !ok { - m.mu.Unlock() - return nil, fmt.Errorf("unknown host") - } - - // Otherwise look in our cert cache. - entry, ok := m.certCache[host] - if !ok { - r := m.rateLimit.Reserve() - ok := r.OK() - if ok { - ok = m.newHostLimit.Allow() - if !ok { - r.Cancel() - } - } - if !ok { - m.mu.Unlock() - return nil, fmt.Errorf("rate limited") - } - entry = &cacheEntry{host: host, m: m} - m.certCache[host] = entry - } - m.mu.Unlock() - - entry.mu.Lock() - defer entry.mu.Unlock() - entry.init() - if entry.err != nil { - return nil, entry.err - } - return entry.cert, nil -} - -func (e *cacheEntry) init() { - if e.err != nil && time.Now().Before(e.timeout) { - return - } - if e.cert != nil { - if e.timeout.IsZero() { - t, err := certRefreshTime(e.cert) - if err != nil { - e.err = err - e.timeout = time.Now().Add(1 * time.Minute) - e.cert = nil - return - } - e.timeout = t - } - if time.Now().After(e.timeout) && !e.refreshing { - e.refreshing = true - go e.refresh() - } - return - } - - cert, refreshTime, err := e.m.verify(e.host) - e.m.mu.Lock() - e.m.certCache[e.host] = e - e.m.mu.Unlock() - e.install(cert, refreshTime, err) -} - -func (e *cacheEntry) install(cert *tls.Certificate, refreshTime time.Time, err error) { - e.cert = nil - e.timeout = time.Time{} - e.err = nil - - if err != nil { - e.err = err - e.timeout = time.Now().Add(1 * time.Minute) - return - } - - e.cert = cert - e.timeout = refreshTime -} - -func (e *cacheEntry) refresh() { - e.m.rateLimit.Wait(context.Background()) - cert, refreshTime, err := e.m.verify(e.host) - - e.mu.Lock() - defer e.mu.Unlock() - e.refreshing = false - if err == nil { - e.install(cert, refreshTime, nil) - } -} - -func (m *Manager) verify(host string) (cert *tls.Certificate, refreshTime time.Time, err error) { - c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) - if err != nil { - return - } - if err = c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}); err != nil { - return - } - c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}) - c.ExcludeChallenges([]acme.Challenge{acme.HTTP01}) - acmeCert, errmap := c.ObtainCertificate([]string{host}, true, nil, false) - if len(errmap) > 0 { - if debug { - log.Printf("ObtainCertificate %v => %v", host, errmap) - } - err = fmt.Errorf("%v", errmap) - return - } - entryCert := stateCert{ - Cert: string(acmeCert.Certificate), - Key: string(acmeCert.PrivateKey), - } - cert, err = entryCert.toTLS() - if err != nil { - if debug { - log.Printf("ObtainCertificate %v toTLS failure: %v", host, err) - } - err = err - return - } - if refreshTime, err = certRefreshTime(cert); err != nil { - return - } - - m.mu.Lock() - if m.state.Certs == nil { - m.state.Certs = make(map[string]stateCert) - } - m.state.Certs[host] = entryCert - m.mu.Unlock() - m.updated() - - return cert, refreshTime, nil -} - -func certRefreshTime(cert *tls.Certificate) (time.Time, error) { - xc, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - if debug { - log.Printf("ObtainCertificate to X.509 failure: %v", err) - } - return time.Time{}, err - } - t := xc.NotBefore.Add(xc.NotAfter.Sub(xc.NotBefore) / 2) - monthEarly := xc.NotAfter.Add(-30 * 24 * time.Hour) - if t.Before(monthEarly) { - t = monthEarly - } - return t, nil -} - -// tlsProvider implements acme.ChallengeProvider for TLS handshake challenges. -type tlsProvider struct { - m *Manager -} - -func (p tlsProvider) Present(domain, token, keyAuth string) error { - cert, dom, err := acme.TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - p.m.mu.Lock() - p.m.certTokens[dom] = &cert - p.m.mu.Unlock() - - return nil -} - -func (p tlsProvider) CleanUp(domain, token, keyAuth string) error { - _, dom, err := acme.TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - p.m.mu.Lock() - delete(p.m.certTokens, dom) - p.m.mu.Unlock() - - return nil -} - -func marshalKey(key *ecdsa.PrivateKey) ([]byte, error) { - data, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}), nil -} - -func unmarshalKey(text string) (*ecdsa.PrivateKey, error) { - b, _ := pem.Decode([]byte(text)) - if b == nil { - return nil, fmt.Errorf("unmarshalKey: missing key") - } - if b.Type != "EC PRIVATE KEY" { - return nil, fmt.Errorf("unmarshalKey: found %q, not %q", b.Type, "EC PRIVATE KEY") - } - k, err := x509.ParseECPrivateKey(b.Bytes) - if err != nil { - return nil, fmt.Errorf("unmarshalKey: %v", err) - } - return k, nil -} - -func newKey() (*ecdsa.PrivateKey, error) { - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.gitignore deleted file mode 100644 index 776cd950c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.6 -tags -test.out -a.out diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.travis.yml deleted file mode 100644 index bb8b8d40b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.7.x - - 1.8.x - - tip - -before_install: - # don't use the miekg/dns when testing forks - - mkdir -p $GOPATH/src/github.com/miekg - - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true - -script: - - go test -race -v -bench=. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/AUTHORS deleted file mode 100644 index 196568352..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Miek Gieben diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/CONTRIBUTORS deleted file mode 100644 index f77e8a895..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Alex A. Skinner -Andrew Tunnell-Jones -Ask Bjørn Hansen -Dave Cheney -Dusty Wilson -Marek Majkowski -Peter van Dijk -Omri Bahumi -Alex Sergeyev diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/COPYRIGHT deleted file mode 100644 index 35702b10e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/COPYRIGHT +++ /dev/null @@ -1,9 +0,0 @@ -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben - -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. - -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7fe..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/README.md b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/README.md deleted file mode 100644 index 32a49cbf5..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/README.md +++ /dev/null @@ -1,162 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) -[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All widely used Resource Records are -supported, including the DNSSEC types. It follows a lean and mean philosophy. -If there is stuff you should know as a DNS programmer there isn't a convenience -function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -We try to keep the "master" branch as sane as possible and at the bleeding edge -of standards, avoiding breaking changes wherever reasonable. We support the last -two versions of Go, currently: 1.7 and 1.8. - -# Goals - -* KISS; -* Fast; -* Small API. If it's easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://github.com/coredns/coredns -* https://cloudflare.com -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/hashicorp/consul -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ -* https://play.google.com/store/apps/details?id=com.turbobytes.dig -* https://github.com/fcambus/statzone -* https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for http://public-dns.info/ -* https://namesmith.io -* https://github.com/miekg/unbound -* https://github.com/miekg/exdns -* https://dnslookup.org -* https://github.com/looterz/grimd -* https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns -* http://quilt.io -* https://github.com/ipdcode/hades (JD.COM) -* https://github.com/StackExchange/dnscontrol/ -* https://www.dnsperf.com/ -* https://dnssectest.net/ - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6; -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; -* Fast: - * Reply speed around ~ 80K qps (faster hardware results in more qps); - * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; -* Server side programming (mimicking the net/http package); -* Client side programming; -* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; -* EDNS0, NSID, Cookies; -* AXFR/IXFR; -* TSIG, SIG(0); -* DNS over TLS: optional encrypted connection between client and server; -* DNS name compression; -* Depends only on the standard library. - -Have fun! - -Miek Gieben - 2010-2012 - - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH -correctly, the following should work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show -when you call `godoc github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record (removed the record) -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6840 - Clarifications and Implementation Notes for DNS Security -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* 7828 - edns-tcp-keepalive EDNS0 Option -* 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) -* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) -* xxxx - EDNS0 DNS Update Lease (draft) - -## Loosely based upon - -* `ldns` -* `NSD` -* `Net::DNS` -* `GRONG` diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client.go deleted file mode 100644 index 301dab9c1..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client.go +++ /dev/null @@ -1,467 +0,0 @@ -package dns - -// A client implementation. - -import ( - "bytes" - "crypto/tls" - "encoding/binary" - "io" - "net" - "time" -) - -const dnsTimeout time.Duration = 2 * time.Second -const tcpIdleTimeout time.Duration = 8 * time.Second - -// A Conn represents a connection to a DNS server. -type Conn struct { - net.Conn // a net.Conn holding the connection - UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - rtt time.Duration - t time.Time - tsigRequestMAC string -} - -// A Client defines parameters for a DNS client. -type Client struct { - Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - TLSConfig *tls.Config // TLS connection configuration - Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight -} - -// Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for a reply. Exchange does not retry a failed query, nor -// will it fall back to TCP in case of truncation. -// See client.Exchange for more information on setting larger buffer sizes. -func Exchange(m *Msg, a string) (r *Msg, err error) { - var co *Conn - co, err = DialTimeout("udp", a, dnsTimeout) - if err != nil { - return nil, err - } - - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - - co.SetWriteDeadline(time.Now().Add(dnsTimeout)) - if err = co.WriteMsg(m); err != nil { - return nil, err - } - - co.SetReadDeadline(time.Now().Add(dnsTimeout)) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// Exchange performs a synchronous query. It sends the message m to the address -// contained in a and waits for a reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -// It is up to the caller to create a message that allows for larger responses to be -// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger -// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit -// of 512 bytes. -func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchange(m, a) - } - // This adds a bunch of garbage, TODO(miek). - t := "nop" - if t1, ok := TypeToString[m.Question[0].Qtype]; ok { - t = t1 - } - cl := "nop" - if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { - cl = cl1 - } - r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - return c.exchange(m, a) - }) - if r != nil && shared { - r = r.Copy() - } - if err != nil { - return r, rtt, err - } - return r, rtt, nil -} - -func (c *Client) dialTimeout() time.Duration { - if c.Timeout != 0 { - return c.Timeout - } - if c.DialTimeout != 0 { - return c.DialTimeout - } - return dnsTimeout -} - -func (c *Client) readTimeout() time.Duration { - if c.ReadTimeout != 0 { - return c.ReadTimeout - } - return dnsTimeout -} - -func (c *Client) writeTimeout() time.Duration { - if c.WriteTimeout != 0 { - return c.WriteTimeout - } - return dnsTimeout -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - network := "udp" - tls := false - - switch c.Net { - case "tcp-tls": - network = "tcp" - tls = true - case "tcp4-tls": - network = "tcp4" - tls = true - case "tcp6-tls": - network = "tcp6" - tls = true - default: - if c.Net != "" { - network = c.Net - } - } - - var deadline time.Time - if c.Timeout != 0 { - deadline = time.Now().Add(c.Timeout) - } - - if tls { - co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) - } else { - co, err = DialTimeout(network, a, c.dialTimeout()) - } - - if err != nil { - return nil, 0, err - } - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - // Otherwise use the client's configured UDP size. - if opt == nil && c.UDPSize >= MinMsgSize { - co.UDPSize = c.UDPSize - } - - co.TsigSecret = c.TsigSecret - co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - - co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, co.rtt, err -} - -// ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction -// signature is verified. -func (co *Conn) ReadMsg() (*Msg, error) { - p, err := co.ReadMsgHeader(nil) - if err != nil { - return nil, err - } - - m := new(Msg) - if err := m.Unpack(p); err != nil { - // If ErrTruncated was returned, we still want to allow the user to use - // the message, but naively they can just check err if they don't want - // to use a truncated message - if err == ErrTruncated { - return m, err - } - return nil, err - } - if t := m.IsTsig(); t != nil { - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - } - return m, err -} - -// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). -// Returns message as a byte slice to be parsed with Msg.Unpack later on. -// Note that error handling on the message body is not possible as only the header is parsed. -func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { - var ( - p []byte - n int - err error - ) - - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - // First two bytes specify the length of the entire message. - l, err := tcpMsgLen(r) - if err != nil { - return nil, err - } - p = make([]byte, l) - n, err = tcpRead(r, p) - co.rtt = time.Since(co.t) - default: - if co.UDPSize > MinMsgSize { - p = make([]byte, co.UDPSize) - } else { - p = make([]byte, MinMsgSize) - } - n, err = co.Read(p) - co.rtt = time.Since(co.t) - } - - if err != nil { - return nil, err - } else if n < headerSize { - return nil, ErrShortRead - } - - p = p[:n] - if hdr != nil { - dh, _, err := unpackMsgHdr(p, 0) - if err != nil { - return nil, err - } - *hdr = dh - } - return p, err -} - -// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. -func tcpMsgLen(t io.Reader) (int, error) { - p := []byte{0, 0} - n, err := t.Read(p) - if err != nil { - return 0, err - } - - // As seen with my local router/switch, retursn 1 byte on the above read, - // resulting a a ShortRead. Just write it out (instead of loop) and read the - // other byte. - if n == 1 { - n1, err := t.Read(p[1:]) - if err != nil { - return 0, err - } - n += n1 - } - - if n != 2 { - return 0, ErrShortRead - } - l := binary.BigEndian.Uint16(p) - if l == 0 { - return 0, ErrShortRead - } - return int(l), nil -} - -// tcpRead calls TCPConn.Read enough times to fill allocated buffer. -func tcpRead(t io.Reader, p []byte) (int, error) { - n, err := t.Read(p) - if err != nil { - return n, err - } - for n < len(p) { - j, err := t.Read(p[n:]) - if err != nil { - return n, err - } - n += j - } - return n, err -} - -// Read implements the net.Conn read method. -func (co *Conn) Read(p []byte) (n int, err error) { - if co.Conn == nil { - return 0, ErrConnEmpty - } - if len(p) < 2 { - return 0, io.ErrShortBuffer - } - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - l, err := tcpMsgLen(r) - if err != nil { - return 0, err - } - if l > len(p) { - return int(l), io.ErrShortBuffer - } - return tcpRead(r, p[:l]) - } - // UDP connection - n, err = co.Conn.Read(p) - if err != nil { - return n, err - } - return n, err -} - -// WriteMsg sends a message through the connection co. -// If the message m contains a TSIG record the transaction -// signature is calculated. -func (co *Conn) WriteMsg(m *Msg) (err error) { - var out []byte - if t := m.IsTsig(); t != nil { - mac := "" - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return ErrSecret - } - out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - // Set for the next read, although only used in zone transfers - co.tsigRequestMAC = mac - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - co.t = time.Now() - if _, err = co.Write(out); err != nil { - return err - } - return nil -} - -// Write implements the net.Conn Write method. -func (co *Conn) Write(p []byte) (n int, err error) { - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - w := t.(io.Writer) - - lp := len(p) - if lp < 2 { - return 0, io.ErrShortBuffer - } - if lp > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, lp+2) - binary.BigEndian.PutUint16(l, uint16(lp)) - p = append(l, p...) - n, err := io.Copy(w, bytes.NewReader(p)) - return int(n), err - } - n, err = co.Conn.Write(p) - return n, err -} - -// Dial connects to the address on the named network. -func Dial(network, address string) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.Dial(network, address) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout acts like Dial but takes a timeout. -func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.DialTimeout(network, address, timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialWithTLS connects to the address on the named network with TLS. -func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = tls.Dial(network, address, tlsConfig) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. -func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { - var dialer net.Dialer - dialer.Timeout = timeout - - conn = new(Conn) - conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) - if err != nil { - return nil, err - } - return conn, nil -} - -func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { - if deadline.IsZero() { - return time.Now().Add(timeout) - } - return deadline -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client_test.go deleted file mode 100644 index dee585f36..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/client_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package dns - -import ( - "crypto/tls" - "fmt" - "net" - "strconv" - "sync" - "testing" - "time" -) - -func TestClientSync(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - // And now with plain Exchange(). - r, err = Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -func TestClientTLSSync(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - c.Net = "tcp-tls" - c.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -func TestClientSyncBadID(t *testing.T) { - HandleFunc("miek.nl.", HelloServerBadID) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - if _, _, err := c.Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } - // And now with plain Exchange(). - if _, err := Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } -} - -func TestClientEDNS0(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeDNSKEY) - - m.SetEdns0(2048, true) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -// Validates the transmission and parsing of local EDNS0 options. -func TestClientEDNS0Local(t *testing.T) { - optStr1 := "1979:0x0707" - optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601" - - handler := func(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1, 2) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}} - - // If the local options are what we expect, then reflect them back. - ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String() - ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if ec1 == optStr1 && ec2 == optStr2 { - m.Extra = append(m.Extra, req.Extra[0]) - } - - w.WriteMsg(m) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - - // Add two local edns options to the query. - ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}} - ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}} - o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}} - m.Extra = append(m.Extra, o) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %s", err) - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Error("failed to get a valid answer") - t.Logf("%v\n", r) - } - - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello local edns" { - t.Error("Unexpected result for miek.nl", txt, "!= Hello local edns") - } - - // Validate the local options in the reply. - got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() - if got != optStr1 { - t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr1) - t.Logf("%v\n", r) - } - - got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if got != optStr2 { - t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr2) - t.Logf("%v\n", r) - } -} - -// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG -func ExampleTsigSecret_updateLeaseTSIG() { - m := new(Msg) - m.SetUpdate("t.local.ip6.io.") - rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") - rrs := make([]RR, 1) - rrs[0] = rr - m.Insert(rrs) - - leaseRr := new(OPT) - leaseRr.Hdr.Name = "." - leaseRr.Hdr.Rrtype = TypeOPT - e := new(EDNS0_UL) - e.Code = EDNS0UL - e.Lease = 120 - leaseRr.Option = append(leaseRr.Option, e) - m.Extra = append(m.Extra, leaseRr) - - c := new(Client) - m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) - c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} - - _, _, err := c.Exchange(m, "127.0.0.1:53") - if err != nil { - panic(err) - } -} - -func TestClientConn(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - // This uses TCP just to make it slightly different than TestClientSync - s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - cn, err := Dial("tcp", addrstr) - if err != nil { - t.Errorf("failed to dial %s: %v", addrstr, err) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - r, err := cn.ReadMsg() - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - h := new(Header) - buf, err := cn.ReadMsgHeader(h) - if buf == nil { - t.Errorf("failed to get an valid answer\n%v", r) - } - if int(h.Bits&0xF) != RcodeSuccess { - t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) - } - if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 { - t.Errorf("expected to have question and additional in response; got something else: %+v", h) - } - if err = r.Unpack(buf); err != nil { - t.Errorf("unable to unpack message fully: %v", err) - } -} - -func TestTruncatedMsg(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSRV) - cnt := 10 - for i := 0; i < cnt; i++ { - r := &SRV{ - Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeSRV, Class: ClassINET, Ttl: 0}, - Port: uint16(i + 8000), - Target: "target.miek.nl.", - } - m.Answer = append(m.Answer, r) - - re := &A{ - Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeA, Class: ClassINET, Ttl: 0}, - A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i)).To4(), - } - m.Extra = append(m.Extra, re) - } - buf, err := m.Pack() - if err != nil { - t.Errorf("failed to pack: %v", err) - } - - r := new(Msg) - if err = r.Unpack(buf); err != nil { - t.Errorf("unable to unpack message: %v", err) - } - if len(r.Answer) != cnt { - t.Errorf("answer count after regular unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != cnt { - t.Errorf("extra count after regular unpack doesn't match: %d", len(r.Extra)) - } - - m.Truncated = true - buf, err = m.Pack() - if err != nil { - t.Errorf("failed to pack truncated: %v", err) - } - - r = new(Msg) - if err = r.Unpack(buf); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack truncated message: %v", err) - } - if !r.Truncated { - t.Errorf("truncated message wasn't unpacked as truncated") - } - if len(r.Answer) != cnt { - t.Errorf("answer count after truncated unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != cnt { - t.Errorf("extra count after truncated unpack doesn't match: %d", len(r.Extra)) - } - - // Now we want to remove almost all of the extra records - // We're going to loop over the extra to get the count of the size of all - // of them - off := 0 - buf1 := make([]byte, m.Len()) - for i := 0; i < len(m.Extra); i++ { - off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) - if err != nil { - t.Errorf("failed to pack extra: %v", err) - } - } - - // Remove all of the extra bytes but 10 bytes from the end of buf - off -= 10 - buf1 = buf[:len(buf)-off] - - r = new(Msg) - if err = r.Unpack(buf1); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack cutoff message: %v", err) - } - if !r.Truncated { - t.Error("truncated cutoff message wasn't unpacked as truncated") - } - if len(r.Answer) != cnt { - t.Errorf("answer count after cutoff unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != 0 { - t.Errorf("extra count after cutoff unpack is not zero: %d", len(r.Extra)) - } - - // Now we want to remove almost all of the answer records too - buf1 = make([]byte, m.Len()) - as := 0 - for i := 0; i < len(m.Extra); i++ { - off1 := off - off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) - as = off - off1 - if err != nil { - t.Errorf("failed to pack extra: %v", err) - } - } - - // Keep exactly one answer left - // This should still cause Answer to be nil - off -= as - buf1 = buf[:len(buf)-off] - - r = new(Msg) - if err = r.Unpack(buf1); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack cutoff message: %v", err) - } - if !r.Truncated { - t.Error("truncated cutoff message wasn't unpacked as truncated") - } - if len(r.Answer) != 0 { - t.Errorf("answer count after second cutoff unpack is not zero: %d", len(r.Answer)) - } - - // Now leave only 1 byte of the question - // Since the header is always 12 bytes, we just need to keep 13 - buf1 = buf[:13] - - r = new(Msg) - err = r.Unpack(buf1) - if err == nil || err == ErrTruncated { - t.Errorf("error should not be ErrTruncated from question cutoff unpack: %v", err) - } - - // Finally, if we only have the header, we should still return an error - buf1 = buf[:12] - - r = new(Msg) - if err = r.Unpack(buf1); err == nil || err != ErrTruncated { - t.Errorf("error not ErrTruncated from header-only unpack: %v", err) - } -} - -func TestTimeout(t *testing.T) { - // Set up a dummy UDP server that won't respond - addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unable to resolve local udp address: %v", err) - } - conn, err := net.ListenUDP("udp", addr) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer conn.Close() - addrstr := conn.LocalAddr().String() - - // Message to send - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - - // Use a channel + timeout to ensure we don't get stuck if the - // Client Timeout is not working properly - done := make(chan struct{}) - - timeout := time.Millisecond - allowable := timeout + (10 * time.Millisecond) - abortAfter := timeout + (100 * time.Millisecond) - - start := time.Now() - - go func() { - c := &Client{Timeout: timeout} - _, _, err := c.Exchange(m, addrstr) - if err == nil { - t.Error("no timeout using Client") - } - done <- struct{}{} - }() - - select { - case <-done: - case <-time.After(abortAfter): - } - - length := time.Since(start) - - if length > allowable { - t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout) - } -} - -// Check that responses from deduplicated requests aren't shared between callers -func TestConcurrentExchanges(t *testing.T) { - cases := make([]*Msg, 2) - cases[0] = new(Msg) - cases[1] = new(Msg) - cases[1].Truncated = true - for _, m := range cases { - block := make(chan struct{}) - waiting := make(chan struct{}) - - handler := func(w ResponseWriter, req *Msg) { - r := m.Copy() - r.SetReply(req) - - waiting <- struct{}{} - <-block - w.WriteMsg(r) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSRV) - c := &Client{ - SingleInflight: true, - } - r := make([]*Msg, 2) - - var wg sync.WaitGroup - wg.Add(len(r)) - for i := 0; i < len(r); i++ { - go func(i int) { - r[i], _, _ = c.Exchange(m.Copy(), addrstr) - wg.Done() - }(i) - } - select { - case <-waiting: - case <-time.After(time.Second): - t.FailNow() - } - close(block) - wg.Wait() - - if r[0] == r[1] { - t.Log("Got same response object, expected non-shared responses") - t.Fail() - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig.go deleted file mode 100644 index 0a1f5a92c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig.go +++ /dev/null @@ -1,131 +0,0 @@ -package dns - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -// ClientConfig wraps the contents of the /etc/resolv.conf file. -type ClientConfig struct { - Servers []string // servers to use - Search []string // suffixes to append to local name - Port string // what port to use - Ndots int // number of dots in name to trigger absolute lookup - Timeout int // seconds before giving up on packet - Attempts int // lost packets before giving up on server, not used in the package dns -} - -// ClientConfigFromFile parses a resolv.conf(5) like file and returns -// a *ClientConfig. -func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { - file, err := os.Open(resolvconf) - if err != nil { - return nil, err - } - defer file.Close() - c := new(ClientConfig) - scanner := bufio.NewScanner(file) - c.Servers = make([]string, 0) - c.Search = make([]string, 0) - c.Port = "53" - c.Ndots = 1 - c.Timeout = 5 - c.Attempts = 2 - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return nil, err - } - line := scanner.Text() - f := strings.Fields(line) - if len(f) < 1 { - continue - } - switch f[0] { - case "nameserver": // add one name server - if len(f) > 1 { - // One more check: make sure server name is - // just an IP address. Otherwise we need DNS - // to look it up. - name := f[1] - c.Servers = append(c.Servers, name) - } - - case "domain": // set search path to just this domain - if len(f) > 1 { - c.Search = make([]string, 1) - c.Search[0] = f[1] - } else { - c.Search = make([]string, 0) - } - - case "search": // set search path to given servers - c.Search = make([]string, len(f)-1) - for i := 0; i < len(c.Search); i++ { - c.Search[i] = f[i+1] - } - - case "options": // magic options - for i := 1; i < len(f); i++ { - s := f[i] - switch { - case len(s) >= 6 && s[:6] == "ndots:": - n, _ := strconv.Atoi(s[6:]) - if n < 1 { - n = 1 - } - c.Ndots = n - case len(s) >= 8 && s[:8] == "timeout:": - n, _ := strconv.Atoi(s[8:]) - if n < 1 { - n = 1 - } - c.Timeout = n - case len(s) >= 8 && s[:9] == "attempts:": - n, _ := strconv.Atoi(s[9:]) - if n < 1 { - n = 1 - } - c.Attempts = n - case s == "rotate": - /* not imp */ - } - } - } - } - return c, nil -} - -// NameList returns all of the names that should be queried based on the -// config. It is based off of go's net/dns name building, but it does not -// check the length of the resulting names. -func (c *ClientConfig) NameList(name string) []string { - // if this domain is already fully qualified, no append needed. - if IsFqdn(name) { - return []string{name} - } - - // Check to see if the name has more labels than Ndots. Do this before making - // the domain fully qualified. - hasNdots := CountLabel(name) > c.Ndots - // Make the domain fully qualified. - name = Fqdn(name) - - // Make a list of names based off search. - names := []string{} - - // If name has enough dots, try that first. - if hasNdots { - names = append(names, name) - } - for _, s := range c.Search { - names = append(names, Fqdn(name+s)) - } - // If we didn't have enough dots, try after suffixes. - if !hasNdots { - names = append(names, name) - } - return names -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig_test.go deleted file mode 100644 index 7755a8a6f..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/clientconfig_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package dns - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -const normal string = ` -# Comment -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1 -` - -const missingNewline string = ` -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1` // <- NOTE: NO newline. - -func testConfig(t *testing.T, data string) { - tempDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("tempDir: %v", err) - } - defer os.RemoveAll(tempDir) - - path := filepath.Join(tempDir, "resolv.conf") - if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { - t.Fatalf("writeFile: %v", err) - } - cc, err := ClientConfigFromFile(path) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if l := len(cc.Servers); l != 2 { - t.Errorf("incorrect number of nameservers detected: %d", l) - } - if l := len(cc.Search); l != 1 { - t.Errorf("domain directive not parsed correctly: %v", cc.Search) - } else { - if cc.Search[0] != "somedomain.com" { - t.Errorf("domain is unexpected: %v", cc.Search[0]) - } - } -} - -func TestNameserver(t *testing.T) { testConfig(t, normal) } -func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } - -func TestNameList(t *testing.T) { - cfg := ClientConfig{ - Ndots: 1, - } - // fqdn should be only result returned - names := cfg.NameList("miek.nl.") - if len(names) != 1 { - t.Errorf("NameList returned != 1 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent fqdn domain: %v", names[0]) - } - - cfg.Search = []string{ - "test", - } - // Sent domain has NDots and search - names = cfg.NameList("miek.nl") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent domain first: %v", names[0]) - } else if names[1] != "miek.nl.test." { - t.Errorf("NameList didn't return search last: %v", names[1]) - } - - cfg.Ndots = 2 - // Sent domain has less than NDots and search - names = cfg.NameList("miek.nl") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl.test." { - t.Errorf("NameList didn't return search first: %v", names[0]) - } else if names[1] != "miek.nl." { - t.Errorf("NameList didn't return sent domain last: %v", names[1]) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/compress_generate.go deleted file mode 100644 index 1a301e9f3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/compress_generate.go +++ /dev/null @@ -1,184 +0,0 @@ -//+build ignore - -// compression_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will look to see if there are (compressible) names, if so it will add that -// type to compressionLenHelperType and comressionLenSearchType which "fake" the -// compression so that Len() is fast. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from compress_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - domainTypes := map[string]bool{} // Types that have a domain name in them (either comressible or not). - cdomainTypes := map[string]bool{} // Types that have a compressible domain name in them (subset of domainType) - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - st, _ := getTypeStruct(o.Type(), scope) - if st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - for i := 1; i < st.NumFields(); i++ { - if _, ok := st.Field(i).Type().(*types.Slice); ok { - if st.Tag(i) == `dns:"domain-name"` { - domainTypes[o.Name()] = true - } - if st.Tag(i) == `dns:"cdomain-name"` { - cdomainTypes[o.Name()] = true - domainTypes[o.Name()] = true - } - continue - } - - switch { - case st.Tag(i) == `dns:"domain-name"`: - domainTypes[o.Name()] = true - case st.Tag(i) == `dns:"cdomain-name"`: - cdomainTypes[o.Name()] = true - domainTypes[o.Name()] = true - } - } - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names - - fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for name, _ := range domainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - for i := 1; i < st.NumFields(); i++ { - out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"domain-name"`: - fallthrough - case `dns:"cdomain-name"`: - // For HIP we need to slice over the elements in this slice. - fmt.Fprintf(b, `for i := range x.%s { - compressionLenHelper(c, x.%s[i]) - } -`, st.Field(i).Name(), st.Field(i).Name()) - } - continue - } - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - out(st.Field(i).Name()) - } - } - } - fmt.Fprintln(b, "}\n}\n\n") - - // compressionLenSearchType - search cdomain-tags types for compressible names. - - fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for name, _ := range cdomainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - j := 1 - for i := 1; i < st.NumFields(); i++ { - out := func(s string, j int) { - fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name()) - } - - // There are no slice types with names that can be compressed. - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - out(st.Field(i).Name(), j) - j++ - } - } - k := "k1" - ok := "ok1" - for i := 2; i < j; i++ { - k += fmt.Sprintf(" + k%d", i) - ok += fmt.Sprintf(" && ok%d", i) - } - fmt.Fprintf(b, "return %s, %s\n", k, ok) - } - fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - f, err := os.Create("zcompress.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dane.go deleted file mode 100644 index 8c4a14ef1..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dane.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad MatchingType or Selector") -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/defaults.go deleted file mode 100644 index c34890eec..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/defaults.go +++ /dev/null @@ -1,285 +0,0 @@ -package dns - -import ( - "errors" - "net" - "strconv" -) - -const hexDigit = "0123456789abcdef" - -// Everything is assumed in ClassINET. - -// SetReply creates a reply message from a request message. -func (dns *Msg) SetReply(request *Msg) *Msg { - dns.Id = request.Id - dns.Response = true - dns.Opcode = request.Opcode - if dns.Opcode == OpcodeQuery { - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit - } - dns.Rcode = RcodeSuccess - if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] - } - return dns -} - -// SetQuestion creates a question message, it sets the Question -// section, generates an Id and sets the RecursionDesired (RD) -// bit to true. -func (dns *Msg) SetQuestion(z string, t uint16) *Msg { - dns.Id = Id() - dns.RecursionDesired = true - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, t, ClassINET} - return dns -} - -// SetNotify creates a notify message, it sets the Question -// section, generates an Id and sets the Authoritative (AA) -// bit to true. -func (dns *Msg) SetNotify(z string) *Msg { - dns.Opcode = OpcodeNotify - dns.Authoritative = true - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetRcode creates an error message suitable for the request. -func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { - dns.SetReply(request) - dns.Rcode = rcode - return dns -} - -// SetRcodeFormatError creates a message with FormError set. -func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { - dns.Rcode = RcodeFormatError - dns.Opcode = OpcodeQuery - dns.Response = true - dns.Authoritative = false - dns.Id = request.Id - return dns -} - -// SetUpdate makes the message a dynamic update message. It -// sets the ZONE section to: z, TypeSOA, ClassINET. -func (dns *Msg) SetUpdate(z string) *Msg { - dns.Id = Id() - dns.Response = false - dns.Opcode = OpcodeUpdate - dns.Compress = false // BIND9 cannot handle compression - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Ns = make([]RR, 1) - s := new(SOA) - s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} - s.Serial = serial - s.Ns = ns - s.Mbox = mbox - dns.Question[0] = Question{z, TypeIXFR, ClassINET} - dns.Ns[0] = s - return dns -} - -// SetAxfr creates message for requesting an AXFR. -func (dns *Msg) SetAxfr(z string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeAXFR, ClassINET} - return dns -} - -// SetTsig appends a TSIG RR to the message. -// This is only a skeleton TSIG RR that is added as the last RR in the -// additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { - t := new(TSIG) - t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} - t.Algorithm = algo - t.Fudge = fudge - t.TimeSigned = uint64(timesigned) - t.OrigId = dns.Id - dns.Extra = append(dns.Extra, t) - return dns -} - -// SetEdns0 appends a EDNS0 OPT RR to the message. -// TSIG should always the last RR in a message. -func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { - e := new(OPT) - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - e.SetUDPSize(udpsize) - if do { - e.SetDo() - } - dns.Extra = append(dns.Extra, e) - return dns -} - -// IsTsig checks if the message has a TSIG record as the last record -// in the additional section. It returns the TSIG record found or nil. -func (dns *Msg) IsTsig() *TSIG { - if len(dns.Extra) > 0 { - if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { - return dns.Extra[len(dns.Extra)-1].(*TSIG) - } - } - return nil -} - -// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 -// record in the additional section will do. It returns the OPT record -// found or nil. -func (dns *Msg) IsEdns0() *OPT { - // EDNS0 is at the end of the additional section, start there. - // We might want to change this to *only* look at the last two - // records. So we see TSIG and/or OPT - this a slightly bigger - // change though. - for i := len(dns.Extra) - 1; i >= 0; i-- { - if dns.Extra[i].Header().Rrtype == TypeOPT { - return dns.Extra[i].(*OPT) - } - } - return nil -} - -// IsDomainName checks if s is a valid domain name, it returns the number of -// labels and true, when a domain name is valid. Note that non fully qualified -// domain name is considered valid, in this case the last label is counted in -// the number of labels. When false is returned the number of labels is not -// defined. Also note that this function is extremely liberal; almost any -// string is a valid domain name as the DNS is 8 bit protocol. It checks if each -// label fits in 63 characters, but there is no length check for the entire -// string s. I.e. a domain name longer than 255 characters is considered valid. -func IsDomainName(s string) (labels int, ok bool) { - _, labels, err := packDomainName(s, nil, 0, nil, false) - return labels, err == nil -} - -// IsSubDomain checks if child is indeed a child of the parent. If child and parent -// are the same domain true is returned as well. -func IsSubDomain(parent, child string) bool { - // Entire child is contained in parent - return CompareDomainName(parent, child) == CountLabel(parent) -} - -// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. -// The checking is performed on the binary payload. -func IsMsg(buf []byte) error { - // Header - if len(buf) < 12 { - return errors.New("dns: bad message header") - } - // Header: Opcode - // TODO(miek): more checks here, e.g. check all header bits. - return nil -} - -// IsFqdn checks if a domain name is fully qualified. -func IsFqdn(s string) bool { - l := len(s) - if l == 0 { - return false - } - return s[l-1] == '.' -} - -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. -func IsRRset(rrset []RR) bool { - if len(rrset) == 0 { - return false - } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name - - for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { - // Mismatch between the records, so this is not a valid rrset for - //signing/verifying - return false - } - } - - return true -} - -// Fqdn return the fully qualified domain name from s. -// If s is already fully qualified, it behaves as the identity function. -func Fqdn(s string) string { - if IsFqdn(s) { - return s - } - return s + "." -} - -// Copied from the official Go code. - -// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP -// address suitable for reverse DNS (PTR) record lookups or an error if it fails -// to parse the IP address. -func ReverseAddr(addr string) (arpa string, err error) { - ip := net.ParseIP(addr) - if ip == nil { - return "", &Error{err: "unrecognized address: " + addr} - } - if ip.To4() != nil { - return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + - strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil - } - // Must be IPv6 - buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) - // Add it, in reverse, to the buffer - for i := len(ip) - 1; i >= 0; i-- { - v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') - } - // Append "ip6.arpa." and return (buf already has the final .) - buf = append(buf, "ip6.arpa."...) - return string(buf), nil -} - -// String returns the string representation for the type t. -func (t Type) String() string { - if t1, ok := TypeToString[uint16(t)]; ok { - return t1 - } - return "TYPE" + strconv.Itoa(int(t)) -} - -// String returns the string representation for the class c. -func (c Class) String() string { - if c1, ok := ClassToString[uint16(c)]; ok { - return c1 - } - return "CLASS" + strconv.Itoa(int(c)) -} - -// String returns the string representation for the name n. -func (n Name) String() string { - return sprintName(string(n)) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns.go deleted file mode 100644 index b3292287c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns.go +++ /dev/null @@ -1,104 +0,0 @@ -package dns - -import "strconv" - -const ( - year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. - defaultTtl = 3600 // Default internal TTL. - - DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. - MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. - MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. -) - -// Error represents a DNS error. -type Error struct{ err string } - -func (e *Error) Error() string { - if e == nil { - return "dns: " - } - return "dns: " + e.err -} - -// An RR represents a resource record. -type RR interface { - // Header returns the header of an resource record. The header contains - // everything up to the rdata. - Header() *RR_Header - // String returns the text representation of the resource record. - String() string - - // copy returns a copy of the RR - copy() RR - // len returns the length (in octets) of the uncompressed RR in wire format. - len() int - // pack packs an RR into wire format. - pack([]byte, int, map[string]int, bool) (int, error) -} - -// RR_Header is the header all DNS resource records share. -type RR_Header struct { - Name string `dns:"cdomain-name"` - Rrtype uint16 - Class uint16 - Ttl uint32 - Rdlength uint16 // Length of data after header. -} - -// Header returns itself. This is here to make RR_Header implements the RR interface. -func (h *RR_Header) Header() *RR_Header { return h } - -// Just to implement the RR interface. -func (h *RR_Header) copy() RR { return nil } - -func (h *RR_Header) copyHeader() *RR_Header { - r := new(RR_Header) - r.Name = h.Name - r.Rrtype = h.Rrtype - r.Class = h.Class - r.Ttl = h.Ttl - r.Rdlength = h.Rdlength - return r -} - -func (h *RR_Header) String() string { - var s string - - if h.Rrtype == TypeOPT { - s = ";" - // and maybe other things - } - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += Class(h.Class).String() + "\t" - s += Type(h.Rrtype).String() + "\t" - return s -} - -func (h *RR_Header) len() int { - l := len(h.Name) + 1 - l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) - return l -} - -// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. -func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, r.len()*2) - off, err := PackRR(r, buf, 0, nil, false) - if err != nil { - return err - } - buf = buf[:off] - if int(r.Header().Rdlength) > off { - return ErrBuf - } - - rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) - if err != nil { - return err - } - *rr = *rfc3597.(*RFC3597) - return nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_bench_test.go deleted file mode 100644 index bccc3d540..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_bench_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package dns - -import ( - "net" - "testing" -) - -func BenchmarkMsgLength(b *testing.B) { - b.StopTimer() - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.StartTimer() - for i := 0; i < b.N; i++ { - msg.Len() - } -} - -func BenchmarkMsgLengthPack(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.Pack() - } -} - -func BenchmarkPackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackDomainName(name1, buf, 0, nil, false) - } -} - -func BenchmarkUnpackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { - name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func BenchmarkCopy(b *testing.B) { - b.ReportAllocs() - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Extra = []RR{rr} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Copy() - } -} - -func BenchmarkPackA(b *testing.B) { - a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} - - buf := make([]byte, a.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(a, buf, 0, nil, false) - } -} - -func BenchmarkUnpackA(b *testing.B) { - a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} - - buf := make([]byte, a.len()) - PackRR(a, buf, 0, nil, false) - a = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackMX(b *testing.B) { - m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} - - buf := make([]byte, m.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(m, buf, 0, nil, false) - } -} - -func BenchmarkUnpackMX(b *testing.B) { - m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} - - buf := make([]byte, m.len()) - PackRR(m, buf, 0, nil, false) - m = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackAAAAA(b *testing.B) { - aaaa, _ := NewRR(". IN A ::1") - - buf := make([]byte, aaaa.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(aaaa, buf, 0, nil, false) - } -} - -func BenchmarkUnpackAAAA(b *testing.B) { - aaaa, _ := NewRR(". IN A ::1") - - buf := make([]byte, aaaa.len()) - PackRR(aaaa, buf, 0, nil, false) - aaaa = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackMsg(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - buf := make([]byte, 512) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.PackBuffer(buf) - } -} - -func BenchmarkUnpackMsg(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - msgBuf, _ := msg.Pack() - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = msg.Unpack(msgBuf) - } -} - -func BenchmarkIdGeneration(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = id() - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_test.go deleted file mode 100644 index dbfe25328..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dns_test.go +++ /dev/null @@ -1,450 +0,0 @@ -package dns - -import ( - "encoding/hex" - "net" - "testing" -) - -func TestPackUnpack(t *testing.T) { - out := new(Msg) - out.Answer = make([]RR, 1) - key := new(DNSKEY) - key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1} - key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600} - key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - out.Answer[0] = key - msg, err := out.Pack() - if err != nil { - t.Error("failed to pack msg with DNSKEY") - } - in := new(Msg) - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with DNSKEY") - } - - sig := new(RRSIG) - sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2, - OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.", - Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"} - sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600} - - out.Answer[0] = sig - msg, err = out.Pack() - if err != nil { - t.Error("failed to pack msg with RRSIG") - } - - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with RRSIG") - } -} - -func TestPackUnpack2(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 1) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x := new(TXT) - x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x.Txt = []string{"heelalaollo"} - - m.Extra[0] = x - m.Answer[0] = rr - _, err := m.Pack() - if err != nil { - t.Error("Packing failed: ", err) - return - } -} - -func TestPackUnpack3(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 2) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x1 := new(TXT) - x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x1.Txt = []string{} - - x2 := new(TXT) - x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x2.Txt = []string{"heelalaollo"} - - m.Extra[0] = x1 - m.Extra[1] = x2 - m.Answer[0] = rr - b, err := m.Pack() - if err != nil { - t.Error("packing failed: ", err) - return - } - - var unpackMsg Msg - err = unpackMsg.Unpack(b) - if err != nil { - t.Error("unpacking failed") - return - } -} - -func TestBailiwick(t *testing.T) { - yes := map[string]string{ - "miek1.nl": "miek1.nl", - "miek.nl": "ns.miek.nl", - ".": "miek.nl", - } - for parent, child := range yes { - if !IsSubDomain(parent, child) { - t.Errorf("%s should be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } - no := map[string]string{ - "www.miek.nl": "ns.miek.nl", - "m\\.iek.nl": "ns.miek.nl", - "w\\.iek.nl": "w.iek.nl", - "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name - "miek.nl": ".", - } - for parent, child := range no { - if IsSubDomain(parent, child) { - t.Errorf("%s should not be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } -} - -func TestPack(t *testing.T) { - rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} - m := new(Msg) - var err error - m.Answer = make([]RR, 1) - for _, r := range rr { - m.Answer[0], err = NewRR(r) - if err != nil { - t.Errorf("failed to create RR: %v", err) - continue - } - if _, err := m.Pack(); err != nil { - t.Errorf("packing failed: %v", err) - } - } - x := new(Msg) - ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") - ns.(*NS).Ns = "a.ntpns.org" - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - // This crashes due to the fact the a.ntpns.org isn't a FQDN - // How to recover() from a remove panic()? - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Answer = make([]RR, 1) - x.Answer[0], err = NewRR(rr[0]) - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Question = make([]Question, 1) - x.Question[0] = Question{";sd#eddddsé›↙èµÂ‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET} - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } -} - -func TestPackNAPTR(t *testing.T) { - for _, n := range []string{ - `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, - `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, - `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, - } { - rr, _ := NewRR(n) - msg := make([]byte, rr.len()) - if off, err := PackRR(rr, msg, 0, nil, false); err != nil { - t.Errorf("packing failed: %v", err) - t.Errorf("length %d, need more than %d", rr.len(), off) - } else { - t.Logf("buf size needed: %d", off) - } - } -} - -func TestCompressLength(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl", TypeMX) - ul := m.Len() - m.Compress = true - if ul != m.Len() { - t.Fatalf("should be equal") - } -} - -// Does the predicted length match final packed length? -func TestMsgCompressLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} - -func TestMsgLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", - msg.Question[0].Name, predicted, len(buf)) - } - } -} - -func TestMsgLength2(t *testing.T) { - // Serialized replies - var testMessages = []string{ - // google.com. IN A? - "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", - // amazon.com. IN A? (reply has no EDNS0 record) - // TODO(miek): this one is off-by-one, need to find out why - //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", - // yahoo.com. IN A? - "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", - // microsoft.com. IN A? - "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", - // google.com. IN MX? - "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", - // reddit.com. IN A? - "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - - m := new(Msg) - m.Unpack(input) - m.Compress = true - lenComp := m.Len() - b, _ := m.Pack() - pacComp := len(b) - m.Compress = false - lenUnComp := m.Len() - b, _ = m.Pack() - pacUnComp := len(b) - if pacComp+1 != lenComp { - t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) - } - if pacUnComp+1 != lenUnComp { - t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) - } - } -} - -func TestMsgLengthCompressionMalformed(t *testing.T) { - // SOA with empty hostmaster, which is illegal - soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, - Ns: ".", - Mbox: "", - Serial: 0, - Refresh: 28800, - Retry: 7200, - Expire: 604800, - Minttl: 60} - m := new(Msg) - m.Compress = true - m.Ns = []RR{soa} - m.Len() // Should not crash. -} - -func TestMsgCompressLength2(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion(Fqdn("bliep."), TypeANY) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."}) - msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}}) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } -} - -func TestToRFC3597(t *testing.T) { - a, _ := NewRR("miek.nl. IN A 10.0.1.1") - x := new(RFC3597) - x.ToRFC3597(a) - if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { - t.Errorf("string mismatch, got: %s", x) - } - - b, _ := NewRR("miek.nl. IN MX 10 mx.miek.nl.") - x.ToRFC3597(b) - if x.String() != `miek.nl. 3600 CLASS1 TYPE15 \# 14 000a026d78046d69656b026e6c00` { - t.Errorf("string mismatch, got: %s", x) - } -} - -func TestNoRdataPack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range TypeToRR { - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} - _, err := PackRR(r, data, 0, nil, false) - if err != nil { - t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - } -} - -func TestNoRdataUnpack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range TypeToRR { - if typ == TypeSOA || typ == TypeTSIG { - // SOA, TSIG will not be seen (like this) in dyn. updates? - continue - } - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} - off, err := PackRR(r, data, 0, nil, false) - if err != nil { - // Should always works, TestNoDataPack should have caught this - t.Errorf("failed to pack RR: %v", err) - continue - } - rr, _, err := UnpackRR(data[:off], 0) - if err != nil { - t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - t.Log(rr) - } -} - -func TestRdataOverflow(t *testing.T) { - rr := new(RFC3597) - rr.Hdr.Name = "." - rr.Hdr.Class = ClassINET - rr.Hdr.Rrtype = 65280 - rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF)) - buf := make([]byte, 0xFFFF*2) - if _, err := PackRR(rr, buf, 0, nil, false); err != nil { - t.Fatalf("maximum size rrdata pack failed: %v", err) - } - rr.Rdata += "00" - if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata { - t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err) - } -} - -func TestCopy(t *testing.T) { - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL - rr1 := Copy(rr) - if rr.String() != rr1.String() { - t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) - } -} - -func TestMsgCopy(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - - m1 := m.Copy() - if m.String() != m1.String() { - t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) - } - - m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") - if m.String() == m1.String() { - t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) - } - - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") - m1.Answer = append(m1.Answer, rr) - if m1.Ns[0].String() == m1.Answer[1].String() { - t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) - } -} - -func TestMsgPackBuffer(t *testing.T) { - var testMessages = []string{ - // news.ycombinator.com.in.escapemg.com. IN A, response - "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10", - - // news.ycombinator.com.in.escapemg.com. IN A, question - "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001", - - "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - m := new(Msg) - if err := m.Unpack(input); err != nil { - t.Errorf("packet %d failed to unpack", i) - continue - } - t.Logf("packet %d %s", i, m.String()) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec.go deleted file mode 100644 index 3bd55388d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec.go +++ /dev/null @@ -1,720 +0,0 @@ -package dns - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - _ "crypto/md5" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "math/big" - "sort" - "strings" - "time" -) - -// DNSSEC encryption algorithm codes. -const ( - _ uint8 = iota - RSAMD5 - DH - DSA - _ // Skip 4, RFC 6725, section 2.1 - RSASHA1 - DSANSEC3SHA1 - RSASHA1NSEC3SHA1 - RSASHA256 - _ // Skip 9, RFC 6725, section 2.1 - RSASHA512 - _ // Skip 11, RFC 6725, section 2.1 - ECCGOST - ECDSAP256SHA256 - ECDSAP384SHA384 - INDIRECT uint8 = 252 - PRIVATEDNS uint8 = 253 // Private (experimental keys) - PRIVATEOID uint8 = 254 -) - -// AlgorithmToString is a map of algorithm IDs to algorithm names. -var AlgorithmToString = map[uint8]string{ - RSAMD5: "RSAMD5", - DH: "DH", - DSA: "DSA", - RSASHA1: "RSASHA1", - DSANSEC3SHA1: "DSA-NSEC3-SHA1", - RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", - RSASHA256: "RSASHA256", - RSASHA512: "RSASHA512", - ECCGOST: "ECC-GOST", - ECDSAP256SHA256: "ECDSAP256SHA256", - ECDSAP384SHA384: "ECDSAP384SHA384", - INDIRECT: "INDIRECT", - PRIVATEDNS: "PRIVATEDNS", - PRIVATEOID: "PRIVATEOID", -} - -// StringToAlgorithm is the reverse of AlgorithmToString. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - -// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. -var AlgorithmToHash = map[uint8]crypto.Hash{ - RSAMD5: crypto.MD5, // Deprecated in RFC 6725 - RSASHA1: crypto.SHA1, - RSASHA1NSEC3SHA1: crypto.SHA1, - RSASHA256: crypto.SHA256, - ECDSAP256SHA256: crypto.SHA256, - ECDSAP384SHA384: crypto.SHA384, - RSASHA512: crypto.SHA512, -} - -// DNSSEC hashing algorithm codes. -const ( - _ uint8 = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental -) - -// HashToString is a map of hash IDs to names. -var HashToString = map[uint8]string{ - SHA1: "SHA1", - SHA256: "SHA256", - GOST94: "GOST94", - SHA384: "SHA384", - SHA512: "SHA512", -} - -// StringToHash is a map of names to hash IDs. -var StringToHash = reverseInt8(HashToString) - -// DNSKEY flag values. -const ( - SEP = 1 - REVOKE = 1 << 7 - ZONE = 1 << 8 -) - -// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. -type rrsigWireFmt struct { - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - /* No Signature */ -} - -// Used for converting DNSKEY's rdata to wirefmt. -type dnskeyWireFmt struct { - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` - /* Nothing is left out */ -} - -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - -// KeyTag calculates the keytag (or key-id) of the DNSKEY. -func (k *DNSKEY) KeyTag() uint16 { - if k == nil { - return 0 - } - var keytag int - switch k.Algorithm { - case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. We could do this faster by looking directly - // at the base64 values. But I'm lazy. - modulus, _ := fromBase64([]byte(k.PublicKey)) - if len(modulus) > 1 { - x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) - keytag = int(x) - } - default: - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return 0 - } - wire = wire[:n] - for i, v := range wire { - if i&1 != 0 { - keytag += int(v) // must be larger than uint32 - } else { - keytag += int(v) << 8 - } - } - keytag += (keytag >> 16) & 0xFFFF - keytag &= 0xFFFF - } - return uint16(keytag) -} - -// ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h uint8) *DS { - if k == nil { - return nil - } - ds := new(DS) - ds.Hdr.Name = k.Hdr.Name - ds.Hdr.Class = k.Hdr.Class - ds.Hdr.Rrtype = TypeDS - ds.Hdr.Ttl = k.Hdr.Ttl - ds.Algorithm = k.Algorithm - ds.DigestType = h - ds.KeyTag = k.KeyTag() - - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return nil - } - wire = wire[:n] - - owner := make([]byte, 255) - off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) - if err1 != nil { - return nil - } - owner = owner[:off] - // RFC4034: - // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); - // "|" denotes concatenation - // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. - - var hash crypto.Hash - switch h { - case SHA1: - hash = crypto.SHA1 - case SHA256: - hash = crypto.SHA256 - case SHA384: - hash = crypto.SHA384 - case SHA512: - hash = crypto.SHA512 - default: - return nil - } - - s := hash.New() - s.Write(owner) - s.Write(wire) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - return ds -} - -// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. -func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { - c := &CDNSKEY{DNSKEY: *k} - c.Hdr = *k.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDNSKEY - return c -} - -// ToCDS converts a DS record to a CDS record. -func (d *DS) ToCDS() *CDS { - c := &CDS{DS: *d} - c.Hdr = *d.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDS - return c -} - -// Sign signs an RRSet. The signature needs to be filled in with the values: -// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied -// from the RRset. Sign returns a non-nill error when the signing went OK. -// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non -// zero, it is used as-is, otherwise the TTL of the RRset is used as the -// OrigTTL. -func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = rrset[0].Header().Name - rr.Hdr.Class = rrset[0].Header().Class - if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = rrset[0].Header().Ttl - } - rr.TypeCovered = rrset[0].Header().Rrtype - rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) - - if strings.HasPrefix(rrset[0].Header().Name, "*") { - rr.Labels-- // wildcard, remove from label count - } - - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - // For signing, lowercase this name - sigwire.SignerName = strings.ToLower(rr.SignerName) - - // Create the desired binary blob - signdata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signdata) - if err != nil { - return err - } - signdata = signdata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - h := hash.New() - h.Write(signdata) - h.Write(wire) - - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - - return nil -} - -func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { - signature, err := k.Sign(rand.Reader, hashed, hash) - if err != nil { - return nil, err - } - - switch alg { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: - return signature, nil - - case ECDSAP256SHA256, ECDSAP384SHA384: - ecdsaSignature := &struct { - R, S *big.Int - }{} - if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { - return nil, err - } - - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - - signature := intToBytes(ecdsaSignature.R, intlen) - signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) - return signature, nil - - // There is no defined interface for what a DSA backed crypto.Signer returns - case DSA, DSANSEC3SHA1: - // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - // signature := []byte{byte(t)} - // signature = append(signature, intToBytes(r1, 20)...) - // signature = append(signature, intToBytes(s1, 20)...) - // rr.Signature = signature - } - - return nil, ErrAlg -} - -// Verify validates an RRSet with the signature and key. This is only the -// cryptographic test, the signature validity period must be checked separately. -// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. -func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { - // First the easy checks - if !IsRRset(rrset) { - return ErrRRset - } - if rr.KeyTag != k.KeyTag() { - return ErrKey - } - if rr.Hdr.Class != k.Hdr.Class { - return ErrKey - } - if rr.Algorithm != k.Algorithm { - return ErrKey - } - if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { - return ErrKey - } - if k.Protocol != 3 { - return ErrKey - } - - // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if rrset[0].Header().Class != rr.Hdr.Class { - return ErrRRset - } - if rrset[0].Header().Rrtype != rr.TypeCovered { - return ErrRRset - } - - // RFC 4035 5.3.2. Reconstructing the Signed Data - // Copy the sig, except the rrsig data - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = strings.ToLower(rr.SignerName) - // Create the desired binary blob - signeddata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signeddata) - if err != nil { - return err - } - signeddata = signeddata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: - // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? - pubkey := k.publicKeyRSA() // Get the key - if pubkey == nil { - return ErrKey - } - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) - - case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyECDSA() - if pubkey == nil { - return ErrKey - } - - // Split sigbuf into the r and s coordinates - r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) - s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { - return nil - } - return ErrSig - - default: - return ErrAlg - } -} - -// ValidityPeriod uses RFC1982 serial arithmetic to calculate -// if a signature period is valid. If t is the zero time, the -// current time is taken other t is. Returns true if the signature -// is valid at the given time, otherwise returns false. -func (rr *RRSIG) ValidityPeriod(t time.Time) bool { - var utc int64 - if t.IsZero() { - utc = time.Now().UTC().Unix() - } else { - utc = t.UTC().Unix() - } - modi := (int64(rr.Inception) - utc) / year68 - mode := (int64(rr.Expiration) - utc) / year68 - ti := int64(rr.Inception) + (modi * year68) - te := int64(rr.Expiration) + (mode * year68) - return ti <= utc && utc <= te -} - -// Return the signatures base64 encodedig sigdata as a byte slice. -func (rr *RRSIG) sigBuf() []byte { - sigbuf, err := fromBase64([]byte(rr.Signature)) - if err != nil { - return nil - } - return sigbuf -} - -// publicKeyRSA returns the RSA public key from a DNSKEY record. -func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - - // RFC 2537/3110, section 2. RSA Public KEY Resource Records - // Length is in the 0th byte, unless its zero, then it - // it in bytes 1 and 2 and its a 16 bit number - explen := uint16(keybuf[0]) - keyoff := 1 - if explen == 0 { - explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) - keyoff = 3 - } - pubkey := new(rsa.PublicKey) - - pubkey.N = big.NewInt(0) - shift := uint64((explen - 1) * 8) - expo := uint64(0) - for i := int(explen - 1); i > 0; i-- { - expo += uint64(keybuf[keyoff+i]) << shift - shift -= 8 - } - // Remainder - expo += uint64(keybuf[keyoff]) - if expo > (2<<31)+1 { - // Larger expo than supported. - // println("dns: F5 primes (or larger) are not supported") - return nil - } - pubkey.E = int(expo) - - pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) - return pubkey -} - -// publicKeyECDSA returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - pubkey := new(ecdsa.PublicKey) - switch k.Algorithm { - case ECDSAP256SHA256: - pubkey.Curve = elliptic.P256() - if len(keybuf) != 64 { - // wrongly encoded key - return nil - } - case ECDSAP384SHA384: - pubkey.Curve = elliptic.P384() - if len(keybuf) != 96 { - // Wrongly encoded key - return nil - } - } - pubkey.X = big.NewInt(0) - pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) - pubkey.Y = big.NewInt(0) - pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) - return pubkey -} - -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) - pubkey.Parameters.P = big.NewInt(0).SetBytes(p) - pubkey.Parameters.G = big.NewInt(0).SetBytes(g) - pubkey.Y = big.NewInt(0).SetBytes(y) - return pubkey -} - -type wireSlice [][]byte - -func (p wireSlice) Len() int { return len(p) } -func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p wireSlice) Less(i, j int) bool { - _, ioff, _ := UnpackDomainName(p[i], 0) - _, joff, _ := UnpackDomainName(p[j], 0) - return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 -} - -// Return the raw signature data. -func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { - wires := make(wireSlice, len(rrset)) - for i, r := range rrset { - r1 := r.copy() - r1.Header().Ttl = s.OrigTtl - labels := SplitDomainName(r1.Header().Name) - // 6.2. Canonical RR Form. (4) - wildcards - if len(labels) > int(s.Labels) { - // Wildcard - r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." - } - // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - r1.Header().Name = strings.ToLower(r1.Header().Name) - // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. - // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, - // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, - // SRV, DNAME, A6 - // - // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): - // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record - // that needs conversion to lowercase, and twice at that. Since HINFO - // records contain no domain names, they are not subject to case - // conversion. - switch x := r1.(type) { - case *NS: - x.Ns = strings.ToLower(x.Ns) - case *CNAME: - x.Target = strings.ToLower(x.Target) - case *SOA: - x.Ns = strings.ToLower(x.Ns) - x.Mbox = strings.ToLower(x.Mbox) - case *MB: - x.Mb = strings.ToLower(x.Mb) - case *MG: - x.Mg = strings.ToLower(x.Mg) - case *MR: - x.Mr = strings.ToLower(x.Mr) - case *PTR: - x.Ptr = strings.ToLower(x.Ptr) - case *MINFO: - x.Rmail = strings.ToLower(x.Rmail) - x.Email = strings.ToLower(x.Email) - case *MX: - x.Mx = strings.ToLower(x.Mx) - case *NAPTR: - x.Replacement = strings.ToLower(x.Replacement) - case *KX: - x.Exchanger = strings.ToLower(x.Exchanger) - case *SRV: - x.Target = strings.ToLower(x.Target) - case *DNAME: - x.Target = strings.ToLower(x.Target) - } - // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, r1.len()+1) // +1 to be safe(r) - off, err1 := PackRR(r1, wire, 0, nil, false) - if err1 != nil { - return nil, err1 - } - wire = wire[:off] - wires[i] = wire - } - sort.Sort(wires) - for i, wire := range wires { - if i > 0 && bytes.Equal(wire, wires[i-1]) { - continue - } - buf = append(buf, wire...) - } - return buf, nil -} - -func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go RRSIG packing - off, err := packUint16(sw.TypeCovered, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(sw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(sw.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(sw.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(sw.SignerName, msg, off, nil, false) - if err != nil { - return off, err - } - return off, nil -} - -func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { - // copied from zmsg.go DNSKEY packing - off, err := packUint16(dw.Flags, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(dw.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(dw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(dw.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keygen.go deleted file mode 100644 index 5e4b7741a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keygen.go +++ /dev/null @@ -1,156 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" -) - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - } - - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return priv, nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return priv, nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch k.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) - return priv, nil - default: - return nil, ErrAlg - } -} - -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - var intlen int - switch k.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)).Bytes() - if len(i) < 256 { - buf = make([]byte, 1, 1+len(i)) - buf[0] = uint8(len(i)) - } else { - buf = make([]byte, 3, 3+len(i)) - buf[0] = 0 - buf[1] = uint8(len(i) >> 8) - buf[2] = uint8(len(i)) - } - buf = append(buf, i...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int, intlen int) []byte { - buf := intToBytes(_X, intlen) - buf = append(buf, intToBytes(_Y, intlen)...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) - buf := []byte{byte(t)} - buf = append(buf, intToBytes(_Q, 20)...) - buf = append(buf, intToBytes(_P, 64+t*8)...) - buf = append(buf, intToBytes(_G, 64+t*8)...) - buf = append(buf, intToBytes(_Y, 64+t*8)...) - return buf -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keyscan.go deleted file mode 100644 index 4f8d830b8..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ /dev/null @@ -1,249 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "io" - "math/big" - "strconv" - "strings" -) - -// NewPrivateKey returns a PrivateKey by parsing the string s. -// s should be in the same form of the BIND private key files. -func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { - if s == "" || s[len(s)-1] != '\n' { // We need a closing newline - return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") - } - return k.ReadPrivateKey(strings.NewReader(s), "") -} - -// ReadPrivateKey reads a private key from the io.Reader q. The string file is -// only used in error reporting. -// The public key must be known, because some cryptographic algorithms embed -// the public inside the privatekey. -func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { - m, err := parseKey(q, file) - if m == nil { - return nil, err - } - if _, ok := m["private-key-format"]; !ok { - return nil, ErrPrivKey - } - if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { - return nil, ErrPrivKey - } - // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) - if err != nil { - return nil, ErrPrivKey - } - switch uint8(algo) { - case DSA: - priv, err := readPrivateKeyDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case RSAMD5: - fallthrough - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: - priv, err := readPrivateKeyRSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyRSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: - priv, err := readPrivateKeyECDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyECDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - default: - return nil, ErrPrivKey - } -} - -// Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { - p := new(rsa.PrivateKey) - p.Primes = []*big.Int{nil, nil} - for k, v := range m { - switch k { - case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - switch k { - case "modulus": - p.PublicKey.N = big.NewInt(0) - p.PublicKey.N.SetBytes(v1) - case "publicexponent": - i := big.NewInt(0) - i.SetBytes(v1) - p.PublicKey.E = int(i.Int64()) // int64 should be large enough - case "privateexponent": - p.D = big.NewInt(0) - p.D.SetBytes(v1) - case "prime1": - p.Primes[0] = big.NewInt(0) - p.Primes[0].SetBytes(v1) - case "prime2": - p.Primes[1] = big.NewInt(0) - p.Primes[1].SetBytes(v1) - } - case "exponent1", "exponent2", "coefficient": - // not used in Go (yet) - case "created", "publish", "activate": - // not used in Go (yet) - } - } - return p, nil -} - -func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { - p := new(dsa.PrivateKey) - p.X = big.NewInt(0) - for k, v := range m { - switch k { - case "private_value(x)": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.X.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { - p := new(ecdsa.PrivateKey) - p.D = big.NewInt(0) - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.D.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -// parseKey reads a private key from r. It returns a map[string]string, -// with the key-value pairs, or an error when the file is not correct. -func parseKey(r io.Reader, file string) (map[string]string, error) { - s := scanInit(r) - m := make(map[string]string) - c := make(chan lex) - k := "" - // Start the lexer - go klexer(s, c) - for l := range c { - // It should alternate - switch l.value { - case zKey: - k = l.token - case zValue: - if k == "" { - return nil, &ParseError{file, "no private key seen", l} - } - //println("Setting", strings.ToLower(k), "to", l.token, "b") - m[strings.ToLower(k)] = l.token - k = "" - } - } - return m, nil -} - -// klexer scans the sourcefile and returns tokens on the channel c. -func klexer(s *scan, c chan lex) { - var l lex - str := "" // Hold the current read text - commt := false - key := true - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - switch x { - case ':': - if commt { - break - } - l.token = str - if key { - l.value = zKey - c <- l - // Next token is a space, eat it - s.tokenText() - key = false - str = "" - } else { - l.value = zValue - } - case ';': - commt = true - case '\n': - if commt { - // Reset a comment - commt = false - } - l.value = zValue - l.token = str - c <- l - str = "" - commt = false - key = true - default: - if commt { - break - } - str += string(x) - } - x, err = s.tokenText() - } - if len(str) > 0 { - // Send remainder - l.token = str - l.value = zValue - c <- l - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_privkey.go deleted file mode 100644 index 56f3ea934..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_privkey.go +++ /dev/null @@ -1,85 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "math/big" - "strconv" -) - -const format = "Private-key-format: v1.3\n" - -// PrivateKeyString converts a PrivateKey to a string. This string has the same -// format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the DNSKEY -// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey -func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { - algorithm := strconv.Itoa(int(r.Algorithm)) - algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" - - switch p := p.(type) { - case *rsa.PrivateKey: - modulus := toBase64(p.PublicKey.N.Bytes()) - e := big.NewInt(int64(p.PublicKey.E)) - publicExponent := toBase64(e.Bytes()) - privateExponent := toBase64(p.D.Bytes()) - prime1 := toBase64(p.Primes[0].Bytes()) - prime2 := toBase64(p.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - one := big.NewInt(1) - p1 := big.NewInt(0).Sub(p.Primes[0], one) - q1 := big.NewInt(0).Sub(p.Primes[1], one) - exp1 := big.NewInt(0).Mod(p.D, p1) - exp2 := big.NewInt(0).Mod(p.D, q1) - coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) - - exponent1 := toBase64(exp1.Bytes()) - exponent2 := toBase64(exp2.Bytes()) - coefficient := toBase64(coeff.Bytes()) - - return format + - "Algorithm: " + algorithm + "\n" + - "Modulus: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" - - case *ecdsa.PrivateKey: - var intlen int - switch r.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - private := toBase64(intToBytes(p.D, intlen)) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - case *dsa.PrivateKey: - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - - default: - return "" - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_test.go deleted file mode 100644 index ca085ed3b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dnssec_test.go +++ /dev/null @@ -1,733 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "reflect" - "strings" - "testing" - "time" -) - -func getKey() *DNSKEY { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - return key -} - -func getSoa() *SOA { - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - return soa -} - -func TestGenerateEC(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, _ := key.Generate(256) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateDSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = DSA - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestSecure(t *testing.T) { - soa := getSoa() - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = 14400 - sig.KeyTag = 12051 - sig.SignerName = "miek.nl." - sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M=" - - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - // It should validate. Period is checked separately, so this will keep on working - if sig.Verify(key, []RR{soa}) != nil { - t.Error("failure to validate") - } -} - -func TestSignature(t *testing.T) { - sig := new(RRSIG) - sig.Hdr.Name = "miek.nl." - sig.Hdr.Class = ClassINET - sig.Hdr.Ttl = 3600 - sig.TypeCovered = TypeDNSKEY - sig.Algorithm = RSASHA1 - sig.Labels = 2 - sig.OrigTtl = 4000 - sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970 - sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970 - sig.KeyTag = 34641 - sig.SignerName = "miek.nl." - sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - // Should not be valid - if sig.ValidityPeriod(time.Now()) { - t.Error("should not be valid") - } - - sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980 - sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100 - if !sig.ValidityPeriod(time.Now()) { - t.Error("should be valid") - } -} - -func TestSignVerify(t *testing.T) { - // The record we want to sign - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - soa1 := new(SOA) - soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa1.Ns = "open.nlnetlabs.nl." - soa1.Mbox = "miekg.atoom.net." - soa1.Serial = 1293945905 - soa1.Refresh = 14400 - soa1.Retry = 3600 - soa1.Expire = 604800 - soa1.Minttl = 86400 - - srv := new(SRV) - srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0} - srv.Port = 1000 - srv.Weight = 800 - srv.Target = "web1.miek.nl." - - hinfo := &HINFO{ - Hdr: RR_Header{ - Name: "miek.nl.", - Rrtype: TypeHINFO, - Class: ClassINET, - Ttl: 3789, - }, - Cpu: "X", - Os: "Y", - } - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(512) - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = soa.Hdr.Rrtype - sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3 - sig.OrigTtl = soa.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - - for _, r := range []RR{soa, soa1, srv, hinfo} { - if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{r}); err != nil { - t.Error("failure to sign the record:", err) - continue - } - if err := sig.Verify(key, []RR{r}); err != nil { - t.Error("failure to validate") - continue - } - t.Logf("validated: %s", r.Header().Name) - } -} - -func Test65534(t *testing.T) { - t6 := new(RFC3597) - t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0} - t6.Rdata = "505D870001" - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = t6.Hdr.Rrtype - sig.Labels = uint8(CountLabel(t6.Hdr.Name)) - sig.OrigTtl = t6.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to sign the TYPE65534 record") - } - if err := sig.Verify(key, []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to validate") - } else { - t.Logf("validated: %s", t6.Header().Name) - } -} - -func TestDnskey(t *testing.T) { - pubkey, err := ReadRR(strings.NewReader(` -miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} -`), "Kmiek.nl.+010+05240.key") - if err != nil { - t.Fatal(err) - } - privStr := `Private-key-format: v1.3 -Algorithm: 10 (RSASHA512) -Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= -PublicExponent: AQAB -PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE= -Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ== -Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw== -Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== -Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== -Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== -` - privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), - "Kmiek.nl.+010+05240.private") - if err != nil { - t.Fatal(err) - } - if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { - t.Error("pubkey is not what we've read") - } - if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { - t.Error("privkey is not what we've read") - t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) - } -} - -func TestTag(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - tag := key.KeyTag() - if tag != 12051 { - t.Errorf("wrong key tag: %d for key %v", tag, key) - } -} - -func TestKeyRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - priv, _ := key.Generate(2048) - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = soa.Hdr.Ttl - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - - if err := sig.Sign(priv.(*rsa.PrivateKey), []RR{soa}); err != nil { - t.Error("failed to sign") - return - } - if err := sig.Verify(key, []RR{soa}); err != nil { - t.Error("failed to verify") - } -} - -func TestKeyToDS(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - ds := key.ToDS(SHA1) - if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" { - t.Errorf("wrong DS digest for SHA1\n%v", ds) - } -} - -func TestSignRSA(t *testing.T) { - pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ" - - priv := `Private-key-format: v1.3 -Algorithm: 5 (RSASHA1) -Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k= -PublicExponent: AQAB -PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk= -Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw== -Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw== -Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ== -Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw== -Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw== -Created: 20110302104537 -Publish: 20110302104537 -Activate: 20110302104537` - - xk, _ := NewRR(pub) - k := xk.(*DNSKEY) - p, err := k.NewPrivateKey(priv) - if err != nil { - t.Error(err) - } - switch priv := p.(type) { - case *rsa.PrivateKey: - if 65537 != priv.PublicKey.E { - t.Error("exponenent should be 65537") - } - default: - t.Errorf("we should have read an RSA key: %v", priv) - } - if k.KeyTag() != 37350 { - t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k) - } - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = k.KeyTag() - sig.SignerName = k.Hdr.Name - sig.Algorithm = k.Algorithm - - sig.Sign(p.(*rsa.PrivateKey), []RR{soa}) - if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" { - t.Errorf("signature is not correct: %v", sig) - } -} - -func TestSignVerifyECDSA(t *testing.T) { - pub := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - priv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - - eckey, err := NewRR(pub) - if err != nil { - t.Fatal(err) - } - privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) - if err != nil { - t.Fatal(err) - } - // TODO: Create separate test for this - ds := eckey.(*DNSKEY).ToDS(SHA384) - if ds.KeyTag != 10771 { - t.Fatal("wrong keytag on DS") - } - if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { - t.Fatal("wrong DS Digest") - } - a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") - sig := new(RRSIG) - sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration, _ = StringToTime("20100909102025") - sig.Inception, _ = StringToTime("20100812102025") - sig.KeyTag = eckey.(*DNSKEY).KeyTag() - sig.SignerName = eckey.(*DNSKEY).Hdr.Name - sig.Algorithm = eckey.(*DNSKEY).Algorithm - - if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{a}) != nil { - t.Fatal("failure to sign the record") - } - - if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil { - t.Fatalf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - eckey.(*DNSKEY).String(), - a.String(), - sig.String(), - eckey.(*DNSKEY).PrivateKeyString(privkey), - err, - ) - } -} - -func TestSignVerifyECDSA2(t *testing.T) { - srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") - if err != nil { - t.Fatal(err) - } - srv := srv1.(*SRV) - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, err := key.Generate(256) - if err != nil { - t.Fatal("failure to generate key") - } - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = srv.Hdr.Rrtype - sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 - sig.OrigTtl = srv.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = ECDSAP256SHA256 - - if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { - t.Fatal("failure to sign the record") - } - - err = sig.Verify(key, []RR{srv}) - if err != nil { - t.Logf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - key.String(), - srv.String(), - sig.String(), - key.PrivateKeyString(privkey), - err, - ) - } -} - -// Here the test vectors from the relevant RFCs are checked. -// rfc6605 6.1 -func TestRFC6605P256(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 ( - GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb - krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )` - exPriv := `Private-key-format: v1.2 -Algorithm: 13 (ECDSAP256SHA256) -PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 55648 13 2 ( - b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 - e2770ce6d6e37df61d17 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 ( - 20100909100439 20100812100439 55648 example.net. - qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA - yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909100439") - ourRRSIG.Inception, _ = StringToTime("20100812100439") - err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -// rfc6605 6.2 -func TestRFC6605P384(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - exPriv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 10771 14 4 ( - 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 - d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 - 6df983d6 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 ( - 20100909102025 20100812102025 10771 example.net. - /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP - 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz - WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909102025") - ourRRSIG.Inception, _ = StringToTime("20100812102025") - err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -func TestInvalidRRSet(t *testing.T) { - goodRecords := make([]RR, 2) - goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - goodRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} - - // Generate key - keyname := "cloudflare.com." - key := &DNSKEY{ - Hdr: RR_Header{Name: keyname, Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 0}, - Algorithm: ECDSAP256SHA256, - Flags: ZONE, - Protocol: 3, - } - privatekey, err := key.Generate(256) - if err != nil { - t.Fatal(err.Error()) - } - - // Need to fill in: Inception, Expiration, KeyTag, SignerName and Algorithm - curTime := time.Now() - signature := &RRSIG{ - Inception: uint32(curTime.Unix()), - Expiration: uint32(curTime.Add(time.Hour).Unix()), - KeyTag: key.KeyTag(), - SignerName: keyname, - Algorithm: ECDSAP256SHA256, - } - - // Inconsistent name between records - badRecords := make([]RR, 2) - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &TXT{Hdr: RR_Header{Name: "nama.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent names considered valid") - } - - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &A{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeA, Class: ClassINET, Ttl: 0}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent record types considered valid") - } - - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassCHAOS, Ttl: 0}, Txt: []string{"_o/"}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent record class considered valid") - } - - // Sign the good record set and then make sure verification fails on the bad record set - if err := signature.Sign(privatekey.(crypto.Signer), goodRecords); err != nil { - t.Fatal("Signing good records failed") - } - - if err := signature.Verify(key, badRecords); err != ErrRRset { - t.Fatal("Verification did not return ErrRRset with inconsistent records") - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/doc.go deleted file mode 100644 index e38753d7d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/doc.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Package dns implements a full featured interface to the Domain Name System. -Server- and client-side programming is supported. -The package allows complete control over what is send out to the DNS. The package -API follows the less-is-more principle, by presenting a small, clean interface. - -The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, -TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. -Note that domain names MUST be fully qualified, before sending them, unqualified -names in a message will result in a packing failure. - -Resource records are native types. They are not stored in wire format. -Basic usage pattern for creating a new resource record: - - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, - Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." - -Or directly from a string: - - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") - -Or when the default TTL (3600) and class (IN) suit you: - - mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") - -Or even: - - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") - -In the DNS messages are exchanged, these messages contain resource -records (sets). Use pattern for creating a message: - - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - -Or when not certain if the domain name is fully qualified: - - m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) - -The message m is now a message with the question section set to ask -the MX records for the miek.nl. zone. - -The following is slightly more verbose, but more flexible: - - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} - -After creating a message it can be send. -Basic use pattern for synchronous querying the DNS at a -server configured on 127.0.0.1 and port 53: - - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") - -Suppressing multiple outstanding queries (with the same question, type and -class) is as easy as setting: - - c.SingleInflight = true - -If these "advanced" features are not needed, a simple UDP query can be send, -with: - - in, err := dns.Exchange(m1, "127.0.0.1:53") - -When this functions returns you will get dns message. A dns message consists -out of four sections. -The question section: in.Question, the answer section: in.Answer, -the authority section: in.Ns and the additional section: in.Extra. - -Each of these sections (except the Question section) contain a []RR. Basic -use pattern for accessing the rdata of a TXT RR as the first RR in -the Answer section: - - if t, ok := in.Answer[0].(*dns.TXT); ok { - // do something with t.Txt - } - -Domain Name and TXT Character String Representations - -Both domain names and TXT character strings are converted to presentation -form both when unpacked and when converted to strings. - -For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks -will be escaped. Bytes below 32 and above 127 will be converted to \DDD -form. - -For domain names, in addition to the above rules brackets, periods, -spaces, semicolons and the at symbol are escaped. - -DNSSEC - -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It -uses public key cryptography to sign resource records. The -public keys are stored in DNSKEY records and the signatures in RRSIG records. - -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit -to a request. - - m := new(dns.Msg) - m.SetEdns0(4096, true) - -Signature generation, signature verification and key generation are all supported. - -DYNAMIC UPDATES - -Dynamic updates reuses the DNS message format, but renames three of -the sections. Question is Zone, Answer is Prerequisite, Authority is -Update, only the Additional is not renamed. See RFC 2136 for the gory details. - -You can set a rather complex set of rules for the existence of absence of -certain resource records or names in a zone to specify if resource records -should be added or removed. The table from RFC 2136 supplemented with the Go -DNS function shows which functions exist to specify the prerequisites. - - 3.2.4 - Table Of Metavalues Used In Prerequisite Section - - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used - -The prerequisite section can also be left empty. -If you have decided on the prerequisites you can tell what RRs should -be added or deleted. The next table shows the options you have and -what functions to call. - - 3.4.2.6 - Table Of Metavalues Used In Update Section - - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert - -TRANSACTION SIGNATURE - -An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. - -Basic use pattern when querying with a TSIG name "axfr." (note that these key names -must be fully qualified - as they are domain names) and the base64 secret -"so6ZGir4GPAqINNh9U5c3A==": - - c := new(dns.Client) - c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - ... - // When sending the TSIG RR is calculated and filled in before sending - -When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with -TSIG, this is the basic use pattern. In this example we request an AXFR for -miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" -and using the server 176.58.119.54: - - t := new(dns.Transfer) - m := new(dns.Msg) - t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m.SetAxfr("miek.nl.") - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - c, err := t.In(m, "176.58.119.54:53") - for r := range c { ... } - -You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. -If something is not correct an error is returned. - -Basic use pattern validating and replying to a message that has TSIG set. - - server := &dns.Server{Addr: ":53", Net: "udp"} - server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - go server.ListenAndServe() - dns.HandleFunc(".", handleRequest) - - func handleRequest(w dns.ResponseWriter, r *dns.Msg) { - m := new(dns.Msg) - m.SetReply(r) - if r.IsTsig() != nil { - if w.TsigStatus() == nil { - // *Msg r has an TSIG record and it was validated - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - } else { - // *Msg r has an TSIG records and it was not valided - } - } - w.WriteMsg(m) - } - -PRIVATE RRS - -RFC 6895 sets aside a range of type codes for private use. This range -is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these -can be used, before requesting an official type code from IANA. - -see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more -information. - -EDNS0 - -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated -by RFC 6891. It defines an new RR type, the OPT RR, which is then completely -abused. -Basic use pattern for creating an (empty) OPT RR: - - o := new(dns.OPT) - o.Hdr.Name = "." // MUST be the root zone, per definition. - o.Hdr.Rrtype = dns.TypeOPT - -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) -interfaces. Currently only a few have been standardized: EDNS0_NSID -(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note -that these options may be combined in an OPT RR. -Basic use pattern for a server to check if (and which) options are set: - - // o is a dns.OPT - for _, s := range o.Option { - switch e := s.(type) { - case *dns.EDNS0_NSID: - // do stuff with e.Nsid - case *dns.EDNS0_SUBNET: - // access e.Family, e.Address, etc. - } - } - -SIG(0) - -From RFC 2931: - - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. - -It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared -secret approach in TSIG. -Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and -RSASHA512. - -Signing subsequent messages in multi-message sessions is not implemented. -*/ -package dns diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dyn_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dyn_test.go deleted file mode 100644 index 09986a5e4..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/dyn_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package dns - -// Find better solution diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns.go deleted file mode 100644 index dbff3714c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns.go +++ /dev/null @@ -1,597 +0,0 @@ -package dns - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "net" - "strconv" -) - -// EDNS0 Option codes. -const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (RFC5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (RFC6891) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0COOKIE = 0xa // EDNS0 Cookie - EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828) - EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) - _DO = 1 << 15 // dnssec ok -) - -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. -type OPT struct { - Hdr RR_Header - Option []EDNS0 `dns:"opt"` -} - -func (rr *OPT) String() string { - s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " - if rr.Do() { - s += "flags: do; " - } else { - s += "flags: ; " - } - s += "udp: " + strconv.Itoa(int(rr.UDPSize())) - - for _, o := range rr.Option { - switch o.(type) { - case *EDNS0_NSID: - s += "\n; NSID: " + o.String() - h, e := o.pack() - var r string - if e == nil { - for _, c := range h { - r += "(" + string(c) + ")" - } - s += " " + r - } - case *EDNS0_SUBNET: - s += "\n; SUBNET: " + o.String() - if o.(*EDNS0_SUBNET).DraftOption { - s += " (draft)" - } - case *EDNS0_COOKIE: - s += "\n; COOKIE: " + o.String() - case *EDNS0_UL: - s += "\n; UPDATE LEASE: " + o.String() - case *EDNS0_LLQ: - s += "\n; LONG LIVED QUERIES: " + o.String() - case *EDNS0_DAU: - s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() - case *EDNS0_DHU: - s += "\n; DS HASH UNDERSTOOD: " + o.String() - case *EDNS0_N3U: - s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() - case *EDNS0_LOCAL: - s += "\n; LOCAL OPT: " + o.String() - } - } - return s -} - -func (rr *OPT) len() int { - l := rr.Hdr.len() - for i := 0; i < len(rr.Option); i++ { - l += 4 // Account for 2-byte option code and 2-byte option length. - lo, _ := rr.Option[i].pack() - l += len(lo) - } - return l -} - -// return the old value -> delete SetVersion? - -// Version returns the EDNS version used. Only zero is defined. -func (rr *OPT) Version() uint8 { - return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) -} - -// SetVersion sets the version of EDNS. This is usually zero. -func (rr *OPT) SetVersion(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) -} - -// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). -func (rr *OPT) ExtendedRcode() int { - return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 -} - -// SetExtendedRcode sets the EDNS extended RCODE field. -func (rr *OPT) SetExtendedRcode(v uint8) { - if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! - return - } - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) -} - -// UDPSize returns the UDP buffer size. -func (rr *OPT) UDPSize() uint16 { - return rr.Hdr.Class -} - -// SetUDPSize sets the UDP buffer size. -func (rr *OPT) SetUDPSize(size uint16) { - rr.Hdr.Class = size -} - -// Do returns the value of the DO (DNSSEC OK) bit. -func (rr *OPT) Do() bool { - return rr.Hdr.Ttl&_DO == _DO -} - -// SetDo sets the DO (DNSSEC OK) bit. -// If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. -func (rr *OPT) SetDo(do ...bool) { - if len(do) == 1 { - if do[0] { - rr.Hdr.Ttl |= _DO - } else { - rr.Hdr.Ttl &^= _DO - } - } else { - rr.Hdr.Ttl |= _DO - } -} - -// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. -type EDNS0 interface { - // Option returns the option code for the option. - Option() uint16 - // pack returns the bytes of the option data. - pack() ([]byte, error) - // unpack sets the data as found in the buffer. Is also sets - // the length of the slice as the length of the option data. - unpack([]byte) error - // String returns the string representation of the option. - String() string -} - -// EDNS0_NSID option is used to retrieve a nameserver -// identifier. When sending a request Nsid must be set to the empty string -// The identifier is an opaque string encoded as hex. -// Basic use pattern for creating an nsid option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_NSID) -// e.Code = dns.EDNS0NSID -// e.Nsid = "AA" -// o.Option = append(o.Option, e) -type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded -} - -func (e *EDNS0_NSID) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Nsid) - if err != nil { - return nil, err - } - return h, nil -} - -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } -func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return string(e.Nsid) } - -// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver -// an idea of where the client lives. It can then give back a different -// answer depending on the location or network topology. -// Basic use pattern for creating an subnet option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET -// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 -// e.SourceScope = 0 -// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 -// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 -// o.Option = append(o.Option, e) -// -// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic -// for which netmask applies to the address. This code will parse all the -// available bits when unpacking (up to optlen). When packing it will apply -// SourceNetmask. If you need more advanced logic, patches welcome and good luck. -type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET - Family uint16 // 1 for IP, 2 for IP6 - SourceNetmask uint8 - SourceScope uint8 - Address net.IP - DraftOption bool // Set to true if using the old (0x50fa) option code -} - -func (e *EDNS0_SUBNET) Option() uint16 { - if e.DraftOption { - return EDNS0SUBNETDRAFT - } - return EDNS0SUBNET -} - -func (e *EDNS0_SUBNET) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint16(b[0:], e.Family) - b[2] = e.SourceNetmask - b[3] = e.SourceScope - switch e.Family { - case 1: - if e.SourceNetmask > net.IPv4len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address.To4()) != net.IPv4len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - case 2: - if e.SourceNetmask > net.IPv6len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address) != net.IPv6len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - default: - return nil, errors.New("dns: bad address family") - } - return b, nil -} - -func (e *EDNS0_SUBNET) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Family = binary.BigEndian.Uint16(b) - e.SourceNetmask = b[2] - e.SourceScope = b[3] - switch e.Family { - case 1: - if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { - return errors.New("dns: bad netmask") - } - addr := make([]byte, net.IPv4len) - for i := 0; i < net.IPv4len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) - case 2: - if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { - return errors.New("dns: bad netmask") - } - addr := make([]byte, net.IPv6len) - for i := 0; i < net.IPv6len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], - addr[11], addr[12], addr[13], addr[14], addr[15]} - default: - return errors.New("dns: bad address family") - } - return nil -} - -func (e *EDNS0_SUBNET) String() (s string) { - if e.Address == nil { - s = "" - } else if e.Address.To4() != nil { - s = e.Address.String() - } else { - s = "[" + e.Address.String() + "]" - } - s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) - return -} - -// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_COOKIE) -// e.Code = dns.EDNS0COOKIE -// e.Cookie = "24a5ac.." -// o.Option = append(o.Option, e) -// -// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is -// always 8 bytes. It may then optionally be followed by the server cookie. The server -// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: -// -// cCookie := o.Cookie[:16] -// sCookie := o.Cookie[16:] -// -// There is no guarantee that the Cookie string has a specific length. -type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data -} - -func (e *EDNS0_COOKIE) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Cookie) - if err != nil { - return nil, err - } - return h, nil -} - -func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } -func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } -func (e *EDNS0_COOKIE) String() string { return e.Cookie } - -// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set -// an expiration on an update RR. This is helpful for clients that cannot clean -// up after themselves. This is a draft RFC and more information can be found at -// http://files.dns-sd.org/draft-sekar-dns-ul.txt -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_UL) -// e.Code = dns.EDNS0UL -// e.Lease = 120 // in seconds -// o.Option = append(o.Option, e) -type EDNS0_UL struct { - Code uint16 // Always EDNS0UL - Lease uint32 -} - -func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } -func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } - -// Copied: http://golang.org/src/pkg/net/dnsmsg.go -func (e *EDNS0_UL) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, e.Lease) - return b, nil -} - -func (e *EDNS0_UL) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Lease = binary.BigEndian.Uint32(b) - return nil -} - -// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 -// Implemented for completeness, as the EDNS0 type code is assigned. -type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ - Version uint16 - Opcode uint16 - Error uint16 - Id uint64 - LeaseLife uint32 -} - -func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } - -func (e *EDNS0_LLQ) pack() ([]byte, error) { - b := make([]byte, 18) - binary.BigEndian.PutUint16(b[0:], e.Version) - binary.BigEndian.PutUint16(b[2:], e.Opcode) - binary.BigEndian.PutUint16(b[4:], e.Error) - binary.BigEndian.PutUint64(b[6:], e.Id) - binary.BigEndian.PutUint32(b[14:], e.LeaseLife) - return b, nil -} - -func (e *EDNS0_LLQ) unpack(b []byte) error { - if len(b) < 18 { - return ErrBuf - } - e.Version = binary.BigEndian.Uint16(b[0:]) - e.Opcode = binary.BigEndian.Uint16(b[2:]) - e.Error = binary.BigEndian.Uint16(b[4:]) - e.Id = binary.BigEndian.Uint64(b[6:]) - e.LeaseLife = binary.BigEndian.Uint32(b[14:]) - return nil -} - -func (e *EDNS0_LLQ) String() string { - s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + - " " + strconv.FormatUint(uint64(e.LeaseLife), 10) - return s -} - -type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU - AlgCode []uint8 -} - -func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DAU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU - AlgCode []uint8 -} - -func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DHU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U - AlgCode []uint8 -} - -func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_N3U) String() string { - // Re-use the hash map - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE - Expire uint32 -} - -func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } - -func (e *EDNS0_EXPIRE) pack() ([]byte, error) { - b := make([]byte, 4) - b[0] = byte(e.Expire >> 24) - b[1] = byte(e.Expire >> 16) - b[2] = byte(e.Expire >> 8) - b[3] = byte(e.Expire) - return b, nil -} - -func (e *EDNS0_EXPIRE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Expire = binary.BigEndian.Uint32(b) - return nil -} - -// The EDNS0_LOCAL option is used for local/experimental purposes. The option -// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] -// (RFC6891), although any unassigned code can actually be used. The content of -// the option is made available in Data, unaltered. -// Basic use pattern for creating a local option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_LOCAL) -// e.Code = dns.EDNS0LOCALSTART -// e.Data = []byte{72, 82, 74} -// o.Option = append(o.Option, e) -type EDNS0_LOCAL struct { - Code uint16 - Data []byte -} - -func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } -func (e *EDNS0_LOCAL) String() string { - return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) -} - -func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil -} - -func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } - return nil -} - -// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep -// the TCP connection alive. See RFC 7828. -type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE - Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; - Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. -} - -func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } - -func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { - if e.Timeout != 0 && e.Length != 2 { - return nil, errors.New("dns: timeout specified but length is not 2") - } - if e.Timeout == 0 && e.Length != 0 { - return nil, errors.New("dns: timeout not specified but length is not 0") - } - b := make([]byte, 4+e.Length) - binary.BigEndian.PutUint16(b[0:], e.Code) - binary.BigEndian.PutUint16(b[2:], e.Length) - if e.Length == 2 { - binary.BigEndian.PutUint16(b[4:], e.Timeout) - } - return b, nil -} - -func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Length = binary.BigEndian.Uint16(b[2:4]) - if e.Length != 0 && e.Length != 2 { - return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) - } - if e.Length == 2 { - if len(b) < 6 { - return ErrBuf - } - e.Timeout = binary.BigEndian.Uint16(b[4:6]) - } - return nil -} - -func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { - s = "use tcp keep-alive" - if e.Length == 0 { - s += ", timeout omitted" - } else { - s += fmt.Sprintf(", timeout %dms", e.Timeout*100) - } - return -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns_test.go deleted file mode 100644 index c290b0c8a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/edns_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package dns - -import "testing" - -func TestOPTTtl(t *testing.T) { - e := &OPT{} - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - - // verify the default setting of DO=0 - if e.Do() { - t.Errorf("DO bit should be zero") - } - - // There are 6 possible invocations of SetDo(): - // - // 1. Starting with DO=0, using SetDo() - // 2. Starting with DO=0, using SetDo(true) - // 3. Starting with DO=0, using SetDo(false) - // 4. Starting with DO=1, using SetDo() - // 5. Starting with DO=1, using SetDo(true) - // 6. Starting with DO=1, using SetDo(false) - - // verify that invoking SetDo() sets DO=1 (TEST #1) - e.SetDo() - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo(true) works when DO=1 (TEST #5) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should still be non-zero") - } - // verify that we can use SetDo(false) to set DO=0 (TEST #6) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should be zero") - } - // verify that if we call SetDo(false) when DO=0 that it is unchanged (TEST #3) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should still be zero") - } - // verify that using SetDo(true) works for DO=0 (TEST #2) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo() works for DO=1 (TEST #4) - e.SetDo() - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - - if e.Version() != 0 { - t.Errorf("version should be non-zero") - } - - e.SetVersion(42) - if e.Version() != 42 { - t.Errorf("set 42, expected %d, got %d", 42, e.Version()) - } - - e.SetExtendedRcode(42) - if e.ExtendedRcode() != 42 { - t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode()) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/example_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/example_test.go deleted file mode 100644 index 64c14962c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/example_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package dns_test - -import ( - "errors" - "fmt" - "log" - "net" - - "github.com/miekg/dns" -) - -// Retrieve the MX records for miek.nl. -func ExampleMX() { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.RecursionDesired = true - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, a := range r.Answer { - if mx, ok := a.(*dns.MX); ok { - fmt.Printf("%s\n", mx.String()) - } - } -} - -// Retrieve the DNSKEY records of a zone and convert them -// to DS records for SHA1, SHA256 and SHA384. -func ExampleDS() { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - zone := "miek.nl" - m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY) - m.SetEdns0(4096, true) - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, k := range r.Answer { - if key, ok := k.(*dns.DNSKEY); ok { - for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { - fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) - } - } - } -} - -const TypeAPAIR = 0x0F99 - -type APAIR struct { - addr [2]net.IP -} - -func NewAPAIR() dns.PrivateRdata { return new(APAIR) } - -func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() } -func (rd *APAIR) Parse(txt []string) error { - if len(txt) != 2 { - return errors.New("two addresses required for APAIR") - } - for i, s := range txt { - ip := net.ParseIP(s) - if ip == nil { - return errors.New("invalid IP in APAIR text representation") - } - rd.addr[i] = ip - } - return nil -} - -func (rd *APAIR) Pack(buf []byte) (int, error) { - b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *APAIR) Unpack(buf []byte) (int, error) { - ln := net.IPv4len * 2 - if len(buf) != ln { - return 0, errors.New("invalid length of APAIR rdata") - } - cp := make([]byte, ln) - copy(cp, buf) // clone bytes to use them in IPs - - rd.addr[0] = net.IP(cp[:3]) - rd.addr[1] = net.IP(cp[4:]) - - return len(buf), nil -} - -func (rd *APAIR) Copy(dest dns.PrivateRdata) error { - cp := make([]byte, rd.Len()) - _, err := rd.Pack(cp) - if err != nil { - return err - } - - d := dest.(*APAIR) - d.addr[0] = net.IP(cp[:3]) - d.addr[1] = net.IP(cp[4:]) - return nil -} - -func (rd *APAIR) Len() int { - return net.IPv4len * 2 -} - -func ExamplePrivateHandle() { - dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR) - defer dns.PrivateHandleRemove(TypeAPAIR) - - rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)") - if err != nil { - log.Fatal("could not parse APAIR record: ", err) - } - fmt.Println(rr) - // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 - - m := new(dns.Msg) - m.Id = 12345 - m.SetQuestion("miek.nl.", TypeAPAIR) - m.Answer = append(m.Answer, rr) - - fmt.Println(m) - // ;; opcode: QUERY, status: NOERROR, id: 12345 - // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 - // - // ;; QUESTION SECTION: - // ;miek.nl. IN APAIR - // - // ;; ANSWER SECTION: - // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/format.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/format.go deleted file mode 100644 index 3f5303c20..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/format.go +++ /dev/null @@ -1,87 +0,0 @@ -package dns - -import ( - "net" - "reflect" - "strconv" -) - -// NumField returns the number of rdata fields r has. -func NumField(r RR) int { - return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header -} - -// Field returns the rdata field i as a string. Fields are indexed starting from 1. -// RR types that holds slice data, for instance the NSEC type bitmap will return a single -// string where the types are concatenated using a space. -// Accessing non existing fields will cause a panic. -func Field(r RR, i int) string { - if i == 0 { - return "" - } - d := reflect.ValueOf(r).Elem().Field(i) - switch k := d.Kind(); k { - case reflect.String: - return d.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(d.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(d.Uint(), 10) - case reflect.Slice: - switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { - case `dns:"a"`: - // TODO(miek): Hmm store this as 16 bytes - if d.Len() < net.IPv6len { - return net.IPv4(byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint())).String() - } - return net.IPv4(byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint())).String() - case `dns:"aaaa"`: - return net.IP{ - byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint()), - byte(d.Index(4).Uint()), - byte(d.Index(5).Uint()), - byte(d.Index(6).Uint()), - byte(d.Index(7).Uint()), - byte(d.Index(8).Uint()), - byte(d.Index(9).Uint()), - byte(d.Index(10).Uint()), - byte(d.Index(11).Uint()), - byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint()), - }.String() - case `dns:"nsec"`: - if d.Len() == 0 { - return "" - } - s := Type(d.Index(0).Uint()).String() - for i := 1; i < d.Len(); i++ { - s += " " + Type(d.Index(i).Uint()).String() - } - return s - default: - // if it does not have a tag its a string slice - fallthrough - case `dns:"txt"`: - if d.Len() == 0 { - return "" - } - s := d.Index(0).String() - for i := 1; i < d.Len(); i++ { - s += " " + d.Index(i).String() - } - return s - } - } - return "" -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/fuzz_test.go deleted file mode 100644 index 255869730..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/fuzz_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package dns - -import "testing" - -func TestFuzzString(t *testing.T) { - testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"", - ";a ", ";a����������", - " NSAP O ", " NSAP N ", - " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6", - " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ", - "$GENERATE 0-3 ${441189,5039418474430,o}", - "$INCLUDE 00 TYPE00000000000n ", - "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0", - } - for i, tc := range testcases { - rr, err := NewRR(tc) - if err == nil { - // rr can be nil because we can (for instance) just parse a comment - if rr == nil { - continue - } - t.Fatalf("parsed mailformed RR %d: %s", i, rr.String()) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/generate.go deleted file mode 100644 index e4481a4b0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/generate.go +++ /dev/null @@ -1,159 +0,0 @@ -package dns - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Parse the $GENERATE statement as used in BIND9 zones. -// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. -// We are called after '$GENERATE '. After which we expect: -// * the range (12-24/2) -// * lhs (ownername) -// * [[ttl][class]] -// * type -// * rhs (rdata) -// But we are lazy here, only the range is parsed *all* occurrences -// of $ after that are interpreted. -// Any error are returned as a string value, the empty string signals -// "no error". -func generate(l lex, c chan lex, t chan *Token, o string) string { - step := 1 - if i := strings.IndexAny(l.token, "/"); i != -1 { - if i+1 == len(l.token) { - return "bad step in $GENERATE range" - } - if s, err := strconv.Atoi(l.token[i+1:]); err == nil { - if s < 0 { - return "bad step in $GENERATE range" - } - step = s - } else { - return "bad step in $GENERATE range" - } - l.token = l.token[:i] - } - sx := strings.SplitN(l.token, "-", 2) - if len(sx) != 2 { - return "bad start-stop in $GENERATE range" - } - start, err := strconv.Atoi(sx[0]) - if err != nil { - return "bad start in $GENERATE range" - } - end, err := strconv.Atoi(sx[1]) - if err != nil { - return "bad stop in $GENERATE range" - } - if end < 0 || start < 0 || end < start { - return "bad range in $GENERATE range" - } - - <-c // _BLANK - // Create a complete new string, which we then parse again. - s := "" -BuildRR: - l = <-c - if l.value != zNewline && l.value != zEOF { - s += l.token - goto BuildRR - } - for i := start; i <= end; i += step { - var ( - escape bool - dom bytes.Buffer - mod string - err error - offset int - ) - - for j := 0; j < len(s); j++ { // No 'range' because we need to jump around - switch s[j] { - case '\\': - if escape { - dom.WriteByte('\\') - escape = false - continue - } - escape = true - case '$': - mod = "%d" - offset = 0 - if escape { - dom.WriteByte('$') - escape = false - continue - } - escape = false - if j+1 >= len(s) { // End of the string - dom.WriteString(fmt.Sprintf(mod, i+offset)) - continue - } else { - if s[j+1] == '$' { - dom.WriteByte('$') - j++ - continue - } - } - // Search for { and } - if s[j+1] == '{' { // Modifier block - sep := strings.Index(s[j+2:], "}") - if sep == -1 { - return "bad modifier in $GENERATE" - } - mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) - if err != nil { - return err.Error() - } - j += 2 + sep // Jump to it - } - dom.WriteString(fmt.Sprintf(mod, i+offset)) - default: - if escape { // Pretty useless here - escape = false - continue - } - dom.WriteByte(s[j]) - } - } - // Re-parse the RR and send it on the current channel t - rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) - if err != nil { - return err.Error() - } - t <- &Token{RR: rx} - // Its more efficient to first built the rrlist and then parse it in - // one go! But is this a problem? - } - return "" -} - -// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. -func modToPrintf(s string) (string, int, error) { - xs := strings.SplitN(s, ",", 3) - if len(xs) != 3 { - return "", 0, errors.New("bad modifier in $GENERATE") - } - // xs[0] is offset, xs[1] is width, xs[2] is base - if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { - return "", 0, errors.New("bad base in $GENERATE") - } - offset, err := strconv.Atoi(xs[0]) - if err != nil || offset > 255 { - return "", 0, errors.New("bad offset in $GENERATE") - } - width, err := strconv.Atoi(xs[1]) - if err != nil || width > 255 { - return "", offset, errors.New("bad width in $GENERATE") - } - switch { - case width < 0: - return "", offset, errors.New("bad width in $GENERATE") - case width == 0: - return "%" + xs[1] + xs[2], offset, nil - } - return "%0" + xs[1] + xs[2], offset, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/issue_test.go deleted file mode 100644 index 265ad56c0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/issue_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package dns - -// Tests that solve that an specific issue. - -import ( - "strings" - "testing" -) - -func TestTCPRtt(t *testing.T) { - m := new(Msg) - m.RecursionDesired = true - m.SetQuestion("example.org.", TypeA) - - c := &Client{} - for _, proto := range []string{"udp", "tcp"} { - c.Net = proto - _, rtt, err := c.Exchange(m, "8.8.4.4:53") - if err != nil { - t.Fatal(err) - } - if rtt == 0 { - t.Fatalf("expecting non zero rtt %s, got zero", c.Net) - } - } -} - -func TestNSEC3MissingSalt(t *testing.T) { - rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 aabbccdd K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H") - if err != nil { - t.Fatalf("failed to parse example rr: %s", err) - } - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. missing salt? err: %s", err) - } - in := rr.(*NSEC3).Salt - out := m.Answer[0].(*NSEC3).Salt - if in != out { - t.Fatalf("expected salts to match. packed: `%s`. returned: `%s`", in, out) - } -} - -func TestNSEC3MixedNextDomain(t *testing.T) { - rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 - k8udemvp1j2f7eg6jebps17vp3n8i58h") - if err != nil { - t.Fatalf("failed to parse example rr: %s", err) - } - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. err: %s", err) - } - in := strings.ToUpper(rr.(*NSEC3).NextDomain) - out := m.Answer[0].(*NSEC3).NextDomain - if in != out { - t.Fatalf("expected round trip to produce NextDomain `%s`, instead `%s`", in, out) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels.go deleted file mode 100644 index 9538d9c3a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels.go +++ /dev/null @@ -1,171 +0,0 @@ -package dns - -import "strings" - -// Holds a bunch of helper functions for dealing with labels. - -// SplitDomainName splits a name string into it's labels. -// www.miek.nl. returns []string{"www", "miek", "nl"} -// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, -// The root label (.) returns nil. Note that using -// strings.Split(s) will work in most cases, but does not handle -// escaped dots (\.) for instance. -// s must be a syntactically valid domain name, see IsDomainName. -func SplitDomainName(s string) (labels []string) { - if len(s) == 0 { - return nil - } - fqdnEnd := 0 // offset of the final '.' or the length of the name - idx := Split(s) - begin := 0 - if s[len(s)-1] == '.' { - fqdnEnd = len(s) - 1 - } else { - fqdnEnd = len(s) - } - - switch len(idx) { - case 0: - return nil - case 1: - // no-op - default: - end := 0 - for i := 1; i < len(idx); i++ { - end = idx[i] - labels = append(labels, s[begin:end-1]) - begin = end - } - } - - labels = append(labels, s[begin:fqdnEnd]) - return labels -} - -// CompareDomainName compares the names s1 and s2 and -// returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are not downcased -// before the comparison. -// -// www.miek.nl. and miek.nl. have two labels in common: miek and nl -// www.miek.nl. and www.bla.nl. have one label in common: nl -// -// s1 and s2 must be syntactically valid domain names. -func CompareDomainName(s1, s2 string) (n int) { - s1, s2 = strings.ToLower(s1), strings.ToLower(s2) - s1 = Fqdn(s1) - s2 = Fqdn(s2) - l1 := Split(s1) - l2 := Split(s2) - - // the first check: root label - if l1 == nil || l2 == nil { - return - } - - j1 := len(l1) - 1 // end - i1 := len(l1) - 2 // start - j2 := len(l2) - 1 - i2 := len(l2) - 2 - // the second check can be done here: last/only label - // before we fall through into the for-loop below - if s1[l1[j1]:] == s2[l2[j2]:] { - n++ - } else { - return - } - for { - if i1 < 0 || i2 < 0 { - break - } - if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { - n++ - } else { - break - } - j1-- - i1-- - j2-- - i2-- - } - return -} - -// CountLabel counts the the number of labels in the string s. -// s must be a syntactically valid domain name. -func CountLabel(s string) (labels int) { - if s == "." { - return - } - off := 0 - end := false - for { - off, end = NextLabel(s, off) - labels++ - if end { - return - } - } -} - -// Split splits a name s into its label indexes. -// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see SplitDomainName. -// s must be a syntactically valid domain name. -func Split(s string) []int { - if s == "." { - return nil - } - idx := make([]int, 1, 3) - off := 0 - end := false - - for { - off, end = NextLabel(s, off) - if end { - return idx - } - idx = append(idx, off) - } -} - -// NextLabel returns the index of the start of the next label in the -// string s starting at offset. -// The bool end is true when the end of the string has been reached. -// Also see PrevLabel. -func NextLabel(s string, offset int) (i int, end bool) { - quote := false - for i = offset; i < len(s)-1; i++ { - switch s[i] { - case '\\': - quote = !quote - default: - quote = false - case '.': - if quote { - quote = !quote - continue - } - return i + 1, false - } - } - return i + 1, true -} - -// PrevLabel returns the index of the label when starting from the right and -// jumping n labels to the left. -// The bool start is true when the start of the string has been overshot. -// Also see NextLabel. -func PrevLabel(s string, n int) (i int, start bool) { - if n == 0 { - return len(s), false - } - lab := Split(s) - if lab == nil { - return 0, true - } - if n > len(lab) { - return 0, true - } - return lab[len(lab)-n], false -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels_test.go deleted file mode 100644 index 9875d6cd9..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/labels_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package dns - -import "testing" - -func TestCompareDomainName(t *testing.T) { - s1 := "www.miek.nl." - s2 := "miek.nl." - s3 := "www.bla.nl." - s4 := "nl.www.bla." - s5 := "nl" - s6 := "miek.nl" - - if CompareDomainName(s1, s2) != 2 { - t.Errorf("%s with %s should be %d", s1, s2, 2) - } - if CompareDomainName(s1, s3) != 1 { - t.Errorf("%s with %s should be %d", s1, s3, 1) - } - if CompareDomainName(s3, s4) != 0 { - t.Errorf("%s with %s should be %d", s3, s4, 0) - } - // Non qualified tests - if CompareDomainName(s1, s5) != 1 { - t.Errorf("%s with %s should be %d", s1, s5, 1) - } - if CompareDomainName(s1, s6) != 2 { - t.Errorf("%s with %s should be %d", s1, s5, 2) - } - - if CompareDomainName(s1, ".") != 0 { - t.Errorf("%s with %s should be %d", s1, s5, 0) - } - if CompareDomainName(".", ".") != 0 { - t.Errorf("%s with %s should be %d", ".", ".", 0) - } - if CompareDomainName("test.com.", "TEST.COM.") != 2 { - t.Errorf("test.com. and TEST.COM. should be an exact match") - } -} - -func TestSplit(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "www..miek.nl": 4, - `www\.miek.nl.`: 2, - `www\\.miek.nl.`: 3, - ".": 0, - "nl.": 1, - "nl": 1, - "com.": 1, - ".com.": 2, - } - for s, i := range splitter { - if x := len(Split(s)); x != i { - t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) - } else { - t.Logf("%s %v", s, Split(s)) - } - } -} - -func TestSplit2(t *testing.T) { - splitter := map[string][]int{ - "www.miek.nl.": {0, 4, 9}, - "www.miek.nl": {0, 4, 9}, - "nl": {0}, - } - for s, i := range splitter { - x := Split(s) - switch len(i) { - case 1: - if x[0] != i[0] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - default: - if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - } - } -} - -func TestPrevLabel(t *testing.T) { - type prev struct { - string - int - } - prever := map[prev]int{ - prev{"www.miek.nl.", 0}: 12, - prev{"www.miek.nl.", 1}: 9, - prev{"www.miek.nl.", 2}: 4, - - prev{"www.miek.nl", 0}: 11, - prev{"www.miek.nl", 1}: 9, - prev{"www.miek.nl", 2}: 4, - - prev{"www.miek.nl.", 5}: 0, - prev{"www.miek.nl", 5}: 0, - - prev{"www.miek.nl.", 3}: 0, - prev{"www.miek.nl", 3}: 0, - } - for s, i := range prever { - x, ok := PrevLabel(s.string, s.int) - if i != x { - t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string) - } - } -} - -func TestCountLabel(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "nl": 1, - ".": 0, - } - for s, i := range splitter { - x := CountLabel(s) - if x != i { - t.Errorf("CountLabel should have %d, got %d", i, x) - } - } -} - -func TestSplitDomainName(t *testing.T) { - labels := map[string][]string{ - "miek.nl": {"miek", "nl"}, - ".": nil, - "www.miek.nl.": {"www", "miek", "nl"}, - "www.miek.nl": {"www", "miek", "nl"}, - "www..miek.nl": {"www", "", "miek", "nl"}, - `www\.miek.nl`: {`www\.miek`, "nl"}, - `www\\.miek.nl`: {`www\\`, "miek", "nl"}, - ".www.miek.nl.": {"", "www", "miek", "nl"}, - } -domainLoop: - for domain, splits := range labels { - parts := SplitDomainName(domain) - if len(parts) != len(splits) { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - for i := range parts { - if parts[i] != splits[i] { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - } - } -} - -func TestIsDomainName(t *testing.T) { - type ret struct { - ok bool - lab int - } - names := map[string]*ret{ - "..": {false, 1}, - "@.": {true, 1}, - "www.example.com": {true, 3}, - "www.e%ample.com": {true, 3}, - "www.example.com.": {true, 3}, - "mi\\k.nl.": {true, 2}, - "mi\\k.nl": {true, 2}, - } - for d, ok := range names { - l, k := IsDomainName(d) - if ok.ok != k || ok.lab != l { - t.Errorf(" got %v %d for %s ", k, l, d) - t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d) - } - } -} - -func BenchmarkSplitLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - Split("www.example.com") - } -} - -func BenchmarkLenLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - CountLabel("www.example.com") - } -} - -func BenchmarkCompareLabels(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - CompareDomainName("www.example.com", "aa.example.com") - } -} - -func BenchmarkIsSubDomain(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - IsSubDomain("www.example.com", "aa.example.com") - IsSubDomain("example.com", "aa.example.com") - IsSubDomain("miek.nl", "aa.example.com") - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg.go deleted file mode 100644 index 605fe6c5c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg.go +++ /dev/null @@ -1,1159 +0,0 @@ -// DNS packet assembly, see RFC 1035. Converting from - Unpack() - -// and to - Pack() - wire format. -// All the packers and unpackers take a (msg []byte, off int) -// and return (off1 int, ok bool). If they return ok==false, they -// also return off1==len(msg), so that the next unpacker will -// also fail. This lets us avoid checks of ok until the end of a -// packing sequence. - -package dns - -//go:generate go run msg_generate.go -//go:generate go run compress_generate.go - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/big" - "math/rand" - "strconv" - "sync" -) - -const ( - maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 -) - -var ( - ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. - ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. - ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. - ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. - ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... - ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. - ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. - ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. - ErrKey error = &Error{err: "bad key"} - ErrKeySize error = &Error{err: "bad key size"} - ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} - ErrNoSig error = &Error{err: "no signature found"} - ErrPrivKey error = &Error{err: "bad private key"} - ErrRcode error = &Error{err: "bad rcode"} - ErrRdata error = &Error{err: "bad rdata"} - ErrRRset error = &Error{err: "bad rrset"} - ErrSecret error = &Error{err: "no secrets defined"} - ErrShortRead error = &Error{err: "short read"} - ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. - ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. - ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. -) - -// Id by default, returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. This being a -// variable the function can be reassigned to a custom function. -// For instance, to make it return a static value: -// -// dns.Id = func() uint16 { return 3 } -var Id func() uint16 = id - -var ( - idLock sync.Mutex - idRand *rand.Rand -) - -// id returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. -func id() uint16 { - idLock.Lock() - - if idRand == nil { - // This (partially) works around - // https://github.com/golang/go/issues/11833 by only - // seeding idRand upon the first call to id. - - var seed int64 - var buf [8]byte - - if _, err := crand.Read(buf[:]); err == nil { - seed = int64(binary.LittleEndian.Uint64(buf[:])) - } else { - seed = rand.Int63() - } - - idRand = rand.New(rand.NewSource(seed)) - } - - // The call to idRand.Uint32 must be within the - // mutex lock because *rand.Rand is not safe for - // concurrent use. - // - // There is no added performance overhead to calling - // idRand.Uint32 inside a mutex lock over just - // calling rand.Uint32 as the global math/rand rng - // is internally protected by a sync.Mutex. - id := uint16(idRand.Uint32()) - - idLock.Unlock() - return id -} - -// MsgHdr is a a manually-unpacked version of (id, bits). -type MsgHdr struct { - Id uint16 - Response bool - Opcode int - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - Zero bool - AuthenticatedData bool - CheckingDisabled bool - Rcode int -} - -// Msg contains the layout of a DNS message. -type Msg struct { - MsgHdr - Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. - Question []Question // Holds the RR(s) of the question section. - Answer []RR // Holds the RR(s) of the answer section. - Ns []RR // Holds the RR(s) of the authority section. - Extra []RR // Holds the RR(s) of the additional section. -} - -// ClassToString is a maps Classes to strings for each CLASS wire type. -var ClassToString = map[uint16]string{ - ClassINET: "IN", - ClassCSNET: "CS", - ClassCHAOS: "CH", - ClassHESIOD: "HS", - ClassNONE: "NONE", - ClassANY: "ANY", -} - -// OpcodeToString maps Opcodes to strings. -var OpcodeToString = map[int]string{ - OpcodeQuery: "QUERY", - OpcodeIQuery: "IQUERY", - OpcodeStatus: "STATUS", - OpcodeNotify: "NOTIFY", - OpcodeUpdate: "UPDATE", -} - -// RcodeToString maps Rcodes to strings. -var RcodeToString = map[int]string{ - RcodeSuccess: "NOERROR", - RcodeFormatError: "FORMERR", - RcodeServerFailure: "SERVFAIL", - RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMPL", - RcodeRefused: "REFUSED", - RcodeYXDomain: "YXDOMAIN", // See RFC 2136 - RcodeYXRrset: "YXRRSET", - RcodeNXRrset: "NXRRSET", - RcodeNotAuth: "NOTAUTH", - RcodeNotZone: "NOTZONE", - RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 - // RcodeBadVers: "BADVERS", - RcodeBadKey: "BADKEY", - RcodeBadTime: "BADTIME", - RcodeBadMode: "BADMODE", - RcodeBadName: "BADNAME", - RcodeBadAlg: "BADALG", - RcodeBadTrunc: "BADTRUNC", - RcodeBadCookie: "BADCOOKIE", -} - -// Domain names are a sequence of counted strings -// split at the dots. They end with a zero-length string. - -// PackDomainName packs a domain name s into msg[off:]. -// If compression is wanted compress must be true and the compression -// map needs to hold a mapping between domain names and offsets -// pointing into msg. -func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off1, _, err = packDomainName(s, msg, off, compression, compress) - return -} - -func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { - // special case if msg == nil - lenmsg := 256 - if msg != nil { - lenmsg = len(msg) - } - ls := len(s) - if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, 0, nil - } - // If not fully qualified, error out, but only if msg == nil #ugly - switch { - case msg == nil: - if s[ls-1] != '.' { - s += "." - ls++ - } - case msg != nil: - if s[ls-1] != '.' { - return lenmsg, 0, ErrFqdn - } - } - // Each dot ends a segment of the name. - // We trade each dot byte for a length byte. - // Except for escaped dots (\.), which are normal dots. - // There is also a trailing zero. - - // Compression - nameoffset := -1 - pointer := -1 - // Emit sequence of counted strings, chopping at dots. - begin := 0 - bs := []byte(s) - roBs, bsFresh, escapedDot := s, true, false - for i := 0; i < ls; i++ { - if bs[i] == '\\' { - for j := i; j < ls-1; j++ { - bs[j] = bs[j+1] - } - ls-- - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - // check for \DDD - if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - bs[i] = dddToByte(bs[i:]) - for j := i + 1; j < ls-2; j++ { - bs[j] = bs[j+2] - } - ls -= 2 - } - escapedDot = bs[i] == '.' - bsFresh = false - continue - } - - if bs[i] == '.' { - if i > 0 && bs[i-1] == '.' && !escapedDot { - // two dots back to back is not legal - return lenmsg, labels, ErrRdata - } - if i-begin >= 1<<6 { // top two bits of length must be clear - return lenmsg, labels, ErrRdata - } - // off can already (we're in a loop) be bigger than len(msg) - // this happens when a name isn't fully qualified - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = byte(i - begin) - } - offset := off - off++ - for j := begin; j < i; j++ { - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = bs[j] - } - off++ - } - if compress && !bsFresh { - roBs = string(bs) - bsFresh = true - } - // Don't try to compress '.' - // We should only compress when compress it true, but we should also still pick - // up names that can be used for *future* compression(s). - if compression != nil && roBs[begin:] != "." { - if p, ok := compression[roBs[begin:]]; !ok { - // Only offsets smaller than this can be used. - if offset < maxCompressionOffset { - compression[roBs[begin:]] = offset - } - } else { - // The first hit is the longest matching dname - // keep the pointer offset we get back and store - // the offset of the current name, because that's - // where we need to insert the pointer later - - // If compress is true, we're allowed to compress this dname - if pointer == -1 && compress { - pointer = p // Where to point to - nameoffset = offset // Where to point from - break - } - } - } - labels++ - begin = i + 1 - } - escapedDot = false - } - // Root label is special - if len(bs) == 1 && bs[0] == '.' { - return off, labels, nil - } - // If we did compression and we find something add the pointer here - if pointer != -1 { - // We have two bytes (14 bits) to put the pointer in - // if msg == nil, we will never do compression - binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) - off = nameoffset + 1 - goto End - } - if msg != nil && off < len(msg) { - msg[off] = 0 - } -End: - off++ - return off, labels, nil -} - -// Unpack a domain name. -// In addition to the simple sequences of counted strings above, -// domain names are allowed to refer to strings elsewhere in the -// packet, to avoid repeating common suffixes when returning -// many entries in a single domain. The pointers are marked -// by a length byte with the top two bits set. Ignoring those -// two bits, that byte and the next give a 14 bit offset from msg[0] -// where we should pick up the trail. -// Note that if we jump elsewhere in the packet, -// we return off1 == the offset after the first pointer we found, -// which is where the next record will start. -// In theory, the pointers are only allowed to jump backward. -// We let them jump anywhere and stop jumping after a while. - -// UnpackDomainName unpacks a domain name into a string. -func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, 64) - off1 := 0 - lenmsg := len(msg) - maxLen := maxDomainNameWireOctets - ptr := 0 // number of pointers followed -Loop: - for { - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // end of name - break Loop - } - // literal string - if off+c > lenmsg { - return "", lenmsg, ErrBuf - } - for j := off; j < off+c; j++ { - switch b := msg[j]; b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': - s = append(s, '\\', b) - // presentation-format \X escapes add an extra byte - maxLen += 1 - default: - if b < 32 || b >= 127 { // unprintable, use \DDD - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - // presentation-format \DDD escapes add 3 extra bytes - maxLen += 3 - } else { - s = append(s, b) - } - } - } - s = append(s, '.') - off += c - case 0xC0: - // pointer to somewhere else in msg. - // remember location after first ptr, - // since that's how many bytes we consumed. - // also, don't follow too many pointers -- - // maybe there's a loop. - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c1 := msg[off] - off++ - if ptr == 0 { - off1 = off - } - if ptr++; ptr > 10 { - return "", lenmsg, &Error{err: "too many compression pointers"} - } - // pointer should guarantee that it advances and points forwards at least - // but the condition on previous three lines guarantees that it's - // at least loop-free - off = (c^0xC0)<<8 | int(c1) - default: - // 0x80 and 0x40 are reserved - return "", lenmsg, ErrRdata - } - } - if ptr == 0 { - off1 = off - } - if len(s) == 0 { - s = []byte(".") - } else if len(s) >= maxLen { - // error if the name is too long, but don't throw it away - return string(s), lenmsg, ErrLongDomain - } - return string(s), off1, nil -} - -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { - if len(txt) == 0 { - if offset >= len(msg) { - return offset, ErrBuf - } - msg[offset] = 0 - return offset, nil - } - var err error - for i := range txt { - if len(txt[i]) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(txt[i], msg, offset, tmp) - if err != nil { - return offset, err - } - } - return offset, nil -} - -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { - lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - l := offset - lenByteOffset - 1 - if l > 255 { - return offset, &Error{err: "string exceeded 255 bytes in txt"} - } - msg[lenByteOffset] = byte(l) - return offset, nil -} - -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - return offset, nil -} - -func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { - off = off0 - var s string - for off < len(msg) && err == nil { - s, off, err = unpackTxtString(msg, off) - if err == nil { - ss = append(ss, s) - } - } - return -} - -func unpackTxtString(msg []byte, offset int) (string, int, error) { - if offset+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - l := int(msg[offset]) - if offset+l+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - s := make([]byte, 0, l) - for _, b := range msg[offset+1 : offset+1+l] { - switch b { - case '"', '\\': - s = append(s, '\\', b) - default: - if b < 32 || b > 127 { // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - offset += 1 + l - return string(s), offset, nil -} - -// Helpers for dealing with escaped bytes -func isDigit(b byte) bool { return b >= '0' && b <= '9' } - -func dddToByte(s []byte) byte { - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -// Helper function for packing and unpacking -func intToBytes(i *big.Int, length int) []byte { - buf := i.Bytes() - if len(buf) < length { - b := make([]byte, length) - copy(b[length-len(buf):], buf) - return b - } - return buf -} - -// PackRR packs a resource record rr into msg[off:]. -// See PackDomainName for documentation about the compression. -func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - if rr == nil { - return len(msg), &Error{err: "nil rr"} - } - - off1, err = rr.pack(msg, off, compression, compress) - if err != nil { - return len(msg), err - } - // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. - if rawSetRdlength(msg, off, off1) { - return off1, nil - } - return off, ErrRdata -} - -// UnpackRR unpacks msg[off:] into an RR. -func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { - h, off, msg, err := unpackHeader(msg, off) - if err != nil { - return nil, len(msg), err - } - end := off + int(h.Rdlength) - - if fn, known := typeToUnpack[h.Rrtype]; !known { - rr, off, err = unpackRFC3597(h, msg, off) - } else { - rr, off, err = fn(h, msg, off) - } - if off != end { - return &h, end, &Error{err: "bad rdlength"} - } - return rr, off, err -} - -// unpackRRslice unpacks msg[off:] into an []RR. -// If we cannot unpack the whole array, then it will return nil -func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { - var r RR - // Optimistically make dst be the length that was sent - dst := make([]RR, 0, l) - for i := 0; i < l; i++ { - off1 := off - r, off, err = UnpackRR(msg, off) - if err != nil { - off = len(msg) - break - } - // If offset does not increase anymore, l is a lie - if off1 == off { - l = i - break - } - dst = append(dst, r) - } - if err != nil && off == len(msg) { - dst = nil - } - return dst, off, err -} - -// Convert a MsgHdr to a string, with dig-like headers: -// -//;; opcode: QUERY, status: NOERROR, id: 48404 -// -//;; flags: qr aa rd ra; -func (h *MsgHdr) String() string { - if h == nil { - return " MsgHdr" - } - - s := ";; opcode: " + OpcodeToString[h.Opcode] - s += ", status: " + RcodeToString[h.Rcode] - s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" - - s += ";; flags:" - if h.Response { - s += " qr" - } - if h.Authoritative { - s += " aa" - } - if h.Truncated { - s += " tc" - } - if h.RecursionDesired { - s += " rd" - } - if h.RecursionAvailable { - s += " ra" - } - if h.Zero { // Hmm - s += " z" - } - if h.AuthenticatedData { - s += " ad" - } - if h.CheckingDisabled { - s += " cd" - } - - s += ";" - return s -} - -// Pack packs a Msg: it is converted to to wire format. -// If the dns.Compress is true the message will be in compressed wire format. -func (dns *Msg) Pack() (msg []byte, err error) { - return dns.PackBuffer(nil) -} - -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small -// a new buffer is allocated. -func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - // We use a similar function in tsig.go's stripTsig. - var ( - dh Header - compression map[string]int - ) - - if dns.Compress { - compression = make(map[string]int) // Compression pointer mappings - } - - if dns.Rcode < 0 || dns.Rcode > 0xFFF { - return nil, ErrRcode - } - if dns.Rcode > 0xF { - // Regular RCODE field is 4 bits - opt := dns.IsEdns0() - if opt == nil { - return nil, ErrExtendedRcode - } - opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) - dns.Rcode &= 0xF - } - - // Convert convenient Msg into wire-like Header. - dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) - if dns.Response { - dh.Bits |= _QR - } - if dns.Authoritative { - dh.Bits |= _AA - } - if dns.Truncated { - dh.Bits |= _TC - } - if dns.RecursionDesired { - dh.Bits |= _RD - } - if dns.RecursionAvailable { - dh.Bits |= _RA - } - if dns.Zero { - dh.Bits |= _Z - } - if dns.AuthenticatedData { - dh.Bits |= _AD - } - if dns.CheckingDisabled { - dh.Bits |= _CD - } - - // Prepare variable sized arrays. - question := dns.Question - answer := dns.Answer - ns := dns.Ns - extra := dns.Extra - - dh.Qdcount = uint16(len(question)) - dh.Ancount = uint16(len(answer)) - dh.Nscount = uint16(len(ns)) - dh.Arcount = uint16(len(extra)) - - // We need the uncompressed length here, because we first pack it and then compress it. - msg = buf - uncompressedLen := compressedLen(dns, false) - if packLen := uncompressedLen + 1; len(msg) < packLen { - msg = make([]byte, packLen) - } - - // Pack it in: header and then the pieces. - off := 0 - off, err = dh.pack(msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - for i := 0; i < len(question); i++ { - off, err = question[i].pack(msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(answer); i++ { - off, err = PackRR(answer[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(ns); i++ { - off, err = PackRR(ns[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(extra); i++ { - off, err = PackRR(extra[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - return msg[:off], nil -} - -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - var ( - dh Header - off int - ) - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return err - } - - dns.Id = dh.Id - dns.Response = (dh.Bits & _QR) != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = (dh.Bits & _AA) != 0 - dns.Truncated = (dh.Bits & _TC) != 0 - dns.RecursionDesired = (dh.Bits & _RD) != 0 - dns.RecursionAvailable = (dh.Bits & _RA) != 0 - dns.Zero = (dh.Bits & _Z) != 0 - dns.AuthenticatedData = (dh.Bits & _AD) != 0 - dns.CheckingDisabled = (dh.Bits & _CD) != 0 - dns.Rcode = int(dh.Bits & 0xF) - - if off == len(msg) { - return ErrTruncated - } - - // Optimistically use the count given to us in the header - dns.Question = make([]Question, 0, int(dh.Qdcount)) - - for i := 0; i < int(dh.Qdcount); i++ { - off1 := off - var q Question - q, off, err = unpackQuestion(msg, off) - if err != nil { - // Even if Truncated is set, we only will set ErrTruncated if we - // actually got the questions - return err - } - if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! - dh.Qdcount = uint16(i) - break - } - dns.Question = append(dns.Question, q) - } - - dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) - // The header counts might have been wrong so we need to update it - dh.Ancount = uint16(len(dns.Answer)) - if err == nil { - dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Nscount = uint16(len(dns.Ns)) - if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Arcount = uint16(len(dns.Extra)) - - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } else if dns.Truncated { - // Whether we ran into a an error or not, we want to return that it - // was truncated - err = ErrTruncated - } - return err -} - -// Convert a complete message to a string with dig-like output. -func (dns *Msg) String() string { - if dns == nil { - return " MsgHdr" - } - s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" - if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" - for i := 0; i < len(dns.Question); i++ { - s += dns.Question[i].String() + "\n" - } - } - if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] != nil { - s += dns.Answer[i].String() + "\n" - } - } - } - if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] != nil { - s += dns.Ns[i].String() + "\n" - } - } - } - if len(dns.Extra) > 0 { - s += "\n;; ADDITIONAL SECTION:\n" - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] != nil { - s += dns.Extra[i].String() + "\n" - } - } - } - return s -} - -// Len returns the message length when in (un)compressed wire format. -// If dns.Compress is true compression it is taken into account. Len() -// is provided to be a faster way to get the size of the resulting packet, -// than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } - -// compressedLen returns the message length when in compressed wire format -// when compress is true, otherwise the uncompressed length is returned. -func compressedLen(dns *Msg, compress bool) int { - // We always return one more than needed. - l := 12 // Message header is always 12 bytes - compression := map[string]int{} - - for i := 0; i < len(dns.Question); i++ { - l += dns.Question[i].len() - if compress { - compressionLenHelper(compression, dns.Question[i].Name) - } - } - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] == nil { - continue - } - l += dns.Answer[i].len() - if compress { - k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Answer[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Answer[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Answer[i]) - } - } - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] == nil { - continue - } - l += dns.Ns[i].len() - if compress { - k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Ns[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Ns[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Ns[i]) - } - } - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] == nil { - continue - } - l += dns.Extra[i].len() - if compress { - k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Extra[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Extra[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Extra[i]) - } - } - return l -} - -// Put the parts of the name in the compression map. -func compressionLenHelper(c map[string]int, s string) { - pref := "" - lbs := Split(s) - for j := len(lbs) - 1; j >= 0; j-- { - pref = s[lbs[j]:] - if _, ok := c[pref]; !ok { - c[pref] = len(pref) - } - } -} - -// Look for each part in the compression map and returns its length, -// keep on searching so we get the longest match. -func compressionLenSearch(c map[string]int, s string) (int, bool) { - off := 0 - end := false - if s == "" { // don't bork on bogus data - return 0, false - } - for { - if _, ok := c[s[off:]]; ok { - return len(s[off:]), true - } - if end { - break - } - off, end = NextLabel(s, off) - } - return 0, false -} - -// Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { r1 := r.copy(); return r1 } - -// Len returns the length (in octets) of the uncompressed RR in wire format. -func Len(r RR) int { return r.len() } - -// Copy returns a new *Msg which is a deep-copy of dns. -func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } - -// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. -func (dns *Msg) CopyTo(r1 *Msg) *Msg { - r1.MsgHdr = dns.MsgHdr - r1.Compress = dns.Compress - - if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy - } - - rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) - var rri int - - if len(dns.Answer) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Answer); i++ { - rrArr[rri] = dns.Answer[i].copy() - rri++ - } - r1.Answer = rrArr[rrbegin:rri:rri] - } - - if len(dns.Ns) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Ns); i++ { - rrArr[rri] = dns.Ns[i].copy() - rri++ - } - r1.Ns = rrArr[rrbegin:rri:rri] - } - - if len(dns.Extra) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Extra); i++ { - rrArr[rri] = dns.Extra[i].copy() - rri++ - } - r1.Extra = rrArr[rrbegin:rri:rri] - } - - return r1 -} - -func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := PackDomainName(q.Name, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint16(q.Qtype, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(q.Qclass, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func unpackQuestion(msg []byte, off int) (Question, int, error) { - var ( - q Question - err error - ) - q.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qtype, off, err = unpackUint16(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qclass, off, err = unpackUint16(msg, off) - if off == len(msg) { - return q, off, nil - } - return q, off, err -} - -func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := packUint16(dh.Id, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Bits, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Qdcount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Ancount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Nscount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Arcount, msg, off) - return off, err -} - -func unpackMsgHdr(msg []byte, off int) (Header, int, error) { - var ( - dh Header - err error - ) - dh.Id, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Bits, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Qdcount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Ancount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Nscount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Arcount, off, err = unpackUint16(msg, off) - return dh, off, err -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 4d9f81d43..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,349 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) - fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return off, err -} -headerEnd := off -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_helpers.go deleted file mode 100644 index 615274ab0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_helpers.go +++ /dev/null @@ -1,633 +0,0 @@ -package dns - -import ( - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "net" - "strconv" -) - -// helper functions called from the generated zmsg.go - -// These function are named after the tag to help pack/unpack, if there is no tag it is the name -// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or -// packDataDomainName. - -func unpackDataA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv4len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking a"} - } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil -} - -func packDataA(a net.IP, msg []byte, off int) (int, error) { - // It must be a slice of 4, even if it is 16, we encode only the first 4 - if off+net.IPv4len > len(msg) { - return len(msg), &Error{err: "overflow packing a"} - } - switch len(a) { - case net.IPv4len, net.IPv6len: - copy(msg[off:], a.To4()) - off += net.IPv4len - case 0: - // Allowed, for dynamic updates. - default: - return len(msg), &Error{err: "overflow packing a"} - } - return off, nil -} - -func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv6len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking aaaa"} - } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil -} - -func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { - if off+net.IPv6len > len(msg) { - return len(msg), &Error{err: "overflow packing aaaa"} - } - - switch len(aaaa) { - case net.IPv6len: - copy(msg[off:], aaaa) - off += net.IPv6len - case 0: - // Allowed, dynamic updates. - default: - return len(msg), &Error{err: "overflow packing aaaa"} - } - return off, nil -} - -// unpackHeader unpacks an RR header, returning the offset to the end of the header and a -// re-sliced msg according to the expected length of the RR. -func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { - hdr := RR_Header{} - if off == len(msg) { - return hdr, off, msg, nil - } - - hdr.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rrtype, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Class, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Ttl, off, err = unpackUint32(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rdlength, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) - return hdr, off, msg, nil -} - -// pack packs an RR header, returning the offset to the end of the header. -// See PackDomainName for documentation about the compression. -func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - if off == len(msg) { - return off, nil - } - - off, err = PackDomainName(hdr.Name, msg, off, compression, compress) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Rrtype, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Class, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint32(hdr.Ttl, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Rdlength, msg, off) - if err != nil { - return len(msg), err - } - return off, nil -} - -// helper helper functions. - -// truncateMsgFromRdLength truncates msg to match the expected length of the RR. -// Returns an error if msg is smaller than the expected size. -func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { - lenrd := off + int(rdlength) - if lenrd > len(msg) { - return msg, &Error{err: "overflowing header size"} - } - return msg[:lenrd], nil -} - -func fromBase32(s []byte) (buf []byte, err error) { - for i, b := range s { - if b >= 'a' && b <= 'z' { - s[i] = b - 32 - } - } - buflen := base32.HexEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base32.HexEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } - -func fromBase64(s []byte) (buf []byte, err error) { - buflen := base64.StdEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base64.StdEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } - -// dynamicUpdate returns true if the Rdlength is zero. -func noRdata(h RR_Header) bool { return h.Rdlength == 0 } - -func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { - if off+1 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint8"} - } - return uint8(msg[off]), off + 1, nil -} - -func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { - if off+1 > len(msg) { - return len(msg), &Error{err: "overflow packing uint8"} - } - msg[off] = byte(i) - return off + 1, nil -} - -func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { - if off+2 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint16"} - } - return binary.BigEndian.Uint16(msg[off:]), off + 2, nil -} - -func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { - if off+2 > len(msg) { - return len(msg), &Error{err: "overflow packing uint16"} - } - binary.BigEndian.PutUint16(msg[off:], i) - return off + 2, nil -} - -func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { - if off+4 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint32"} - } - return binary.BigEndian.Uint32(msg[off:]), off + 4, nil -} - -func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { - if off+4 > len(msg) { - return len(msg), &Error{err: "overflow packing uint32"} - } - binary.BigEndian.PutUint32(msg[off:], i) - return off + 4, nil -} - -func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { - if off+6 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} - } - // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5]))) - off += 6 - return i, off, nil -} - -func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { - if off+6 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64 as uint48"} - } - msg[off] = byte(i >> 40) - msg[off+1] = byte(i >> 32) - msg[off+2] = byte(i >> 24) - msg[off+3] = byte(i >> 16) - msg[off+4] = byte(i >> 8) - msg[off+5] = byte(i) - off += 6 - return off, nil -} - -func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { - if off+8 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64"} - } - return binary.BigEndian.Uint64(msg[off:]), off + 8, nil -} - -func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { - if off+8 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64"} - } - binary.BigEndian.PutUint64(msg[off:], i) - off += 8 - return off, nil -} - -func unpackString(msg []byte, off int) (string, int, error) { - if off+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - l := int(msg[off]) - if off+l+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - s := make([]byte, 0, l) - for _, b := range msg[off+1 : off+1+l] { - switch b { - case '"', '\\': - s = append(s, '\\', b) - default: - if b < 32 || b > 127 { // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - off += 1 + l - return string(s), off, nil -} - -func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackStringBase32(msg []byte, off, end int) (string, int, error) { - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base32"} - } - s := toBase32(msg[off:end]) - return s, end, nil -} - -func packStringBase32(s string, msg []byte, off int) (int, error) { - b32, err := fromBase32([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b32) > len(msg) { - return len(msg), &Error{err: "overflow packing base32"} - } - copy(msg[off:off+len(b32)], b32) - off += len(b32) - return off, nil -} - -func unpackStringBase64(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is base64 encoded value, so we don't need an explicit length - // to be set. Thus far all RR's that have base64 encoded fields have those as their - // last one. What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base64"} - } - s := toBase64(msg[off:end]) - return s, end, nil -} - -func packStringBase64(s string, msg []byte, off int) (int, error) { - b64, err := fromBase64([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b64) > len(msg) { - return len(msg), &Error{err: "overflow packing base64"} - } - copy(msg[off:off+len(b64)], b64) - off += len(b64) - return off, nil -} - -func unpackStringHex(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is hex encoded value, so we don't need an explicit length - // to be set. NSEC and TSIG have hex fields with a length field. - // What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking hex"} - } - - s := hex.EncodeToString(msg[off:end]) - return s, end, nil -} - -func packStringHex(s string, msg []byte, off int) (int, error) { - h, err := hex.DecodeString(s) - if err != nil { - return len(msg), err - } - if off+(len(h)) > len(msg) { - return len(msg), &Error{err: "overflow packing hex"} - } - copy(msg[off:off+len(h)], h) - off += len(h) - return off, nil -} - -func unpackStringTxt(msg []byte, off int) ([]string, int, error) { - txt, off, err := unpackTxt(msg, off) - if err != nil { - return nil, len(msg), err - } - return txt, off, nil -} - -func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { - var edns []EDNS0 -Option: - code := uint16(0) - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - switch code { - case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0SUBNET, EDNS0SUBNETDRAFT: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - if code == EDNS0SUBNETDRAFT { - e.DraftOption = true - } - case EDNS0COOKIE: - e := new(EDNS0_COOKIE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - default: - e := new(EDNS0_LOCAL) - e.Code = code - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - } - - if off < len(msg) { - goto Option - } - - return edns, off, nil -} - -func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { - for _, el := range options { - b, err := el.pack() - if err != nil || off+3 > len(msg) { - return len(msg), &Error{err: "overflow packing opt"} - } - binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code - binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length - off += 4 - if off+len(b) > len(msg) { - copy(msg[off:], b) - off = len(msg) - continue - } - // Actual data - copy(msg[off:off+len(b)], b) - off += len(b) - } - return off, nil -} - -func unpackStringOctet(msg []byte, off int) (string, int, error) { - s := string(msg[off:]) - return s, len(msg), nil -} - -func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { - var nsec []uint16 - length, window, lastwindow := 0, 0, -1 - for off < len(msg) { - if off+2 > len(msg) { - return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} - } - window = int(msg[off]) - length = int(msg[off+1]) - off += 2 - if window <= lastwindow { - // RFC 4034: Blocks are present in the NSEC RR RDATA in - // increasing numerical order. - return nsec, len(msg), &Error{err: "out of order NSEC block"} - } - if length == 0 { - // RFC 4034: Blocks with no types present MUST NOT be included. - return nsec, len(msg), &Error{err: "empty NSEC block"} - } - if length > 32 { - return nsec, len(msg), &Error{err: "NSEC block too long"} - } - if off+length > len(msg) { - return nsec, len(msg), &Error{err: "overflowing NSEC block"} - } - - // Walk the bytes in the window and extract the type bits - for j := 0; j < length; j++ { - b := msg[off+j] - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - nsec = append(nsec, uint16(window*256+j*8+0)) - } - if b&0x40 == 0x40 { - nsec = append(nsec, uint16(window*256+j*8+1)) - } - if b&0x20 == 0x20 { - nsec = append(nsec, uint16(window*256+j*8+2)) - } - if b&0x10 == 0x10 { - nsec = append(nsec, uint16(window*256+j*8+3)) - } - if b&0x8 == 0x8 { - nsec = append(nsec, uint16(window*256+j*8+4)) - } - if b&0x4 == 0x4 { - nsec = append(nsec, uint16(window*256+j*8+5)) - } - if b&0x2 == 0x2 { - nsec = append(nsec, uint16(window*256+j*8+6)) - } - if b&0x1 == 0x1 { - nsec = append(nsec, uint16(window*256+j*8+7)) - } - } - off += length - lastwindow = window - } - return nsec, off, nil -} - -func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { - if len(bitmap) == 0 { - return off, nil - } - var lastwindow, lastlength uint16 - for j := 0; j < len(bitmap); j++ { - t := bitmap[j] - window := t / 256 - length := (t-window*256)/8 + 1 - if window > lastwindow && lastlength != 0 { // New window, jump to the new offset - off += int(lastlength) + 2 - lastlength = 0 - } - if window < lastwindow || length < lastlength { - return len(msg), &Error{err: "nsec bits out of order"} - } - if off+2+int(length) > len(msg) { - return len(msg), &Error{err: "overflow packing nsec"} - } - // Setting the window # - msg[off] = byte(window) - // Setting the octets length - msg[off+1] = byte(length) - // Setting the bit value for the type in the right octet - msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) - lastwindow, lastlength = window, length - } - off += int(lastlength) + 2 - return off, nil -} - -func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { - var ( - servers []string - s string - err error - ) - if end > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking domain names"} - } - for off < end { - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return servers, len(msg), err - } - servers = append(servers, s) - } - return servers, off, nil -} - -func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { - var err error - for j := 0; j < len(names); j++ { - off, err = PackDomainName(names[j], msg, off, compression, false && compress) - if err != nil { - return len(msg), err - } - } - return off, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_test.go deleted file mode 100644 index 2dbef6260..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/msg_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package dns - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "testing" -) - -const ( - maxPrintableLabel = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789x" - tooLongLabel = maxPrintableLabel + "x" -) - -var ( - longDomain = maxPrintableLabel[:53] + strings.TrimSuffix( - strings.Join([]string{".", ".", ".", ".", "."}, maxPrintableLabel[:49]), ".") - reChar = regexp.MustCompile(`.`) - i = -1 - maxUnprintableLabel = reChar.ReplaceAllStringFunc(maxPrintableLabel, func(ch string) string { - if i++; i >= 32 { - i = 0 - } - return fmt.Sprintf("\\%03d", i) - }) -) - -func TestUnpackDomainName(t *testing.T) { - var cases = []struct { - label string - input string - expectedOutput string - expectedError string - }{ - {"empty domain", - "\x00", - ".", - ""}, - {"long label", - string(63) + maxPrintableLabel + "\x00", - maxPrintableLabel + ".", - ""}, - {"unprintable label", - string(63) + regexp.MustCompile(`\\[0-9]+`).ReplaceAllStringFunc(maxUnprintableLabel, - func(escape string) string { - n, _ := strconv.ParseInt(escape[1:], 10, 8) - return string(n) - }) + "\x00", - maxUnprintableLabel + ".", - ""}, - {"long domain", - string(53) + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - longDomain + ".", - ""}, - {"compression pointer", - // an unrealistic but functional test referencing an offset _inside_ a label - "\x03foo" + "\x05\x03com\x00" + "\x07example" + "\xC0\x05", - "foo.\\003com\\000.example.com.", - ""}, - - {"too long domain", - string(54) + "x" + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - "x" + longDomain + ".", - ErrLongDomain.Error()}, - {"too long by pointer", - // a matryoshka doll name to get over 255 octets after expansion via internal pointers - string([]byte{ - // 11 length values, first to last - 40, 37, 34, 31, 28, 25, 22, 19, 16, 13, 0, - // 12 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "", - ErrLongDomain.Error()}, - {"long by pointer", - // a matryoshka doll name _not_ exceeding 255 octets after expansion - string([]byte{ - // 11 length values, first to last - 37, 34, 31, 28, 25, 22, 19, 16, 13, 10, 0, - // 9 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "" + - (`\"\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003\192\002.`) + - (`\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003.`) + - (`\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004.`) + - (`\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005.`) + - `\022\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007\192\006.` + - `\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007.` + - `\016\013\010\000xxxxxxxxx\192\010\192\009\192\008.` + - `\013\010\000xxxxxxxxx\192\010\192\009.` + - `\010\000xxxxxxxxx\192\010.` + - `\000xxxxxxxxx.`, - ""}, - {"truncated name", "\x07example\x03", "", "dns: buffer size too small"}, - {"non-absolute name", "\x07example\x03com", "", "dns: buffer size too small"}, - {"compression pointer cycle", - "\x03foo" + "\x03bar" + "\x07example" + "\xC0\x04", - "", - "dns: too many compression pointers"}, - {"reserved compression pointer 0b10", "\x07example\x80", "", "dns: bad rdata"}, - {"reserved compression pointer 0b01", "\x07example\x40", "", "dns: bad rdata"}, - } - for _, test := range cases { - output, idx, err := UnpackDomainName([]byte(test.input), 0) - if test.expectedOutput != "" && output != test.expectedOutput { - t.Errorf("%s: expected %s, got %s", test.label, test.expectedOutput, output) - } - if test.expectedError == "" && err != nil { - t.Errorf("%s: expected no error, got %d %v", test.label, idx, err) - } else if test.expectedError != "" && (err == nil || err.Error() != test.expectedError) { - t.Errorf("%s: expected error %s, got %d %v", test.label, test.expectedError, idx, err) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx.go deleted file mode 100644 index 9b908c447..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx.go +++ /dev/null @@ -1,106 +0,0 @@ -package dns - -import ( - "crypto/sha1" - "hash" - "strings" -) - -type saltWireFmt struct { - Salt string `dns:"size-hex"` -} - -// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. -func HashName(label string, ha uint8, iter uint16, salt string) string { - saltwire := new(saltWireFmt) - saltwire.Salt = salt - wire := make([]byte, DefaultMsgSize) - n, err := packSaltWire(saltwire, wire) - if err != nil { - return "" - } - wire = wire[:n] - name := make([]byte, 255) - off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) - if err != nil { - return "" - } - name = name[:off] - var s hash.Hash - switch ha { - case SHA1: - s = sha1.New() - default: - return "" - } - - // k = 0 - s.Write(name) - s.Write(wire) - nsec3 := s.Sum(nil) - // k > 0 - for k := uint16(0); k < iter; k++ { - s.Reset() - s.Write(nsec3) - s.Write(wire) - nsec3 = s.Sum(nsec3[:0]) - } - return toBase32(nsec3) -} - -// Cover returns true if a name is covered by the NSEC3 record -func (rr *NSEC3) Cover(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - - nextHash := rr.NextDomain - if ownerHash == nextHash { // empty interval - return false - } - if ownerHash > nextHash { // end of zone - if nameHash > ownerHash { // covered since there is nothing after ownerHash - return true - } - return nameHash < nextHash // if nameHash is before beginning of zone it is covered - } - if nameHash < ownerHash { // nameHash is before ownerHash, not covered - return false - } - return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) -} - -// Match returns true if a name matches the NSEC3 record -func (rr *NSEC3) Match(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - if ownerHash == nameHash { - return true - } - return false -} - -func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { - off, err := packStringHex(sw.Salt, msg, 0) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx_test.go deleted file mode 100644 index 8d5f71797..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/nsecx_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package dns - -import "testing" - -func TestPackNsec3(t *testing.T) { - nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") - if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" { - t.Error(nsec3) - } - - nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD") - if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" { - t.Error(nsec3) - } -} - -func TestNsec3(t *testing.T) { - nsec3, _ := NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if !nsec3.(*NSEC3).Match("nl.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if !nsec3.(*NSEC3).Match("NL.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.NL. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if nsec3.(*NSEC3).Match("com.") { // - t.Fatal("com. is not in the zone nl.") - } - if nsec3.(*NSEC3).Match("test.nl.") { // name hash = gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q - t.Fatal("gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q.nl. should not match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - nsec3, _ = NewRR("nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if nsec3.(*NSEC3).Match("nl.") { - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should not match a record without a owner hash") - } - - for _, tc := range []struct { - rr *NSEC3 - name string - covers bool - }{ - // positive tests - { // name hash between owner hash and next hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "bsd.com.", - covers: true, - }, - { // end of zone, name hash is after owner hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "3v62ulr0nre83v0rja2vjgtlif9v6rab.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "csd.com.", - covers: true, - }, - { // end of zone, name hash is before beginning of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "PT3RON8N7PM3A0OE989IB84OOSADP7O8.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB", - }, - name: "asd.com.", - covers: true, - }, - // negative tests - { // too short owner name - rr: &NSEC3{ - Hdr: RR_Header{Name: "nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // outside of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "39p91242oslggest5e6a7cci4iaeqvnk.nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // empty interval - rr: &NSEC3{ - Hdr: RR_Header{Name: "2n1tb3vairuobl6rkdvii42n9tfmialp.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "asd.com.", - covers: false, - }, - { // name hash is before owner hash, not covered - rr: &NSEC3{ - Hdr: RR_Header{Name: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "asd.com.", - covers: false, - }, - } { - covers := tc.rr.Cover(tc.name) - if tc.covers != covers { - t.Fatalf("Cover failed for %s: expected %t, got %t [record: %s]", tc.name, tc.covers, covers, tc.rr) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/parse_test.go deleted file mode 100644 index fc5bdaf5d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/parse_test.go +++ /dev/null @@ -1,1540 +0,0 @@ -package dns - -import ( - "bytes" - "crypto/rsa" - "encoding/hex" - "fmt" - "math/rand" - "net" - "reflect" - "strconv" - "strings" - "testing" - "testing/quick" - "time" -) - -func TestDotInName(t *testing.T) { - buf := make([]byte, 20) - PackDomainName("aa\\.bb.nl.", buf, 0, nil, false) - // index 3 must be a real dot - if buf[3] != '.' { - t.Error("dot should be a real dot") - } - - if buf[6] != 2 { - t.Error("this must have the value 2") - } - dom, _, _ := UnpackDomainName(buf, 0) - // printing it should yield the backspace again - if dom != "aa\\.bb.nl." { - t.Error("dot should have been escaped: ", dom) - } -} - -func TestDotLastInLabel(t *testing.T) { - sample := "aa\\..au." - buf := make([]byte, 20) - _, err := PackDomainName(sample, buf, 0, nil, false) - if err != nil { - t.Fatalf("unexpected error packing domain: %v", err) - } - dom, _, _ := UnpackDomainName(buf, 0) - if dom != sample { - t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom) - } -} - -func TestTooLongDomainName(t *testing.T) { - l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt." - dom := l + l + l + l + l + l + l - _, err := NewRR(dom + " IN A 127.0.0.1") - if err == nil { - t.Error("should be too long") - } else { - t.Logf("error is %v", err) - } - _, err = NewRR("..com. IN A 127.0.0.1") - if err == nil { - t.Error("should fail") - } else { - t.Logf("error is %v", err) - } -} - -func TestDomainName(t *testing.T) { - tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.", - "www.*.miek.nl.", "www.*.miek.nl.", - } - dbuff := make([]byte, 40) - - for _, ts := range tests { - if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil { - t.Error("not a valid domain name") - continue - } - n, _, err := UnpackDomainName(dbuff, 0) - if err != nil { - t.Error("failed to unpack packed domain name") - continue - } - if ts != n { - t.Errorf("must be equal: in: %s, out: %s", ts, n) - } - } -} - -func TestDomainNameAndTXTEscapes(t *testing.T) { - tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', 9, 13, 10, 0, 255} - for _, b := range tests { - rrbytes := []byte{ - 1, b, 0, // owner - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - 0, 2, 1, b, // Data - } - rr1, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - s := rr1.String() - rr2, err := NewRR(s) - if err != nil { - t.Errorf("Error parsing unpacked RR's string: %v", err) - t.Errorf(" Bytes: %v", rrbytes) - t.Errorf("String: %v", s) - } - repacked := make([]byte, len(rrbytes)) - if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { - t.Errorf("error packing parsed RR: %v", err) - t.Errorf(" original Bytes: %v", rrbytes) - t.Errorf("unpacked Struct: %v", rr1) - t.Errorf(" parsed Struct: %v", rr2) - } - if !bytes.Equal(repacked, rrbytes) { - t.Error("packed bytes don't match original bytes") - t.Errorf(" original bytes: %v", rrbytes) - t.Errorf(" packed bytes: %v", repacked) - t.Errorf("unpacked struct: %v", rr1) - t.Errorf(" parsed struct: %v", rr2) - } - } -} - -func TestTXTEscapeParsing(t *testing.T) { - test := [][]string{ - {`";"`, `";"`}, - {`\;`, `";"`}, - {`"\t"`, `"t"`}, - {`"\r"`, `"r"`}, - {`"\ "`, `" "`}, - {`"\;"`, `";"`}, - {`"\;\""`, `";\""`}, - {`"\(a\)"`, `"(a)"`}, - {`"\(a)"`, `"(a)"`}, - {`"(a\)"`, `"(a)"`}, - {`"(a)"`, `"(a)"`}, - {`"\048"`, `"0"`}, - {`"\` + "\t" + `"`, `"\009"`}, - {`"\` + "\n" + `"`, `"\010"`}, - {`"\` + "\r" + `"`, `"\013"`}, - {`"\` + "\x11" + `"`, `"\017"`}, - {`"\'"`, `"'"`}, - } - for _, s := range test { - rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0])) - if err != nil { - t.Errorf("could not parse %v TXT: %s", s[0], err) - continue - } - - txt := sprintTxt(rr.(*TXT).Txt) - if txt != s[1] { - t.Errorf("mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1]) - } - } -} - -func GenerateDomain(r *rand.Rand, size int) []byte { - dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs - var dn []byte - done := false - for i := 0; i < dnLen && !done; { - max := dnLen - i - if max > 63 { - max = 63 - } - lLen := max - if lLen != 0 { - lLen = int(r.Int31()) % max - } - done = lLen == 0 - if done { - continue - } - l := make([]byte, lLen+1) - l[0] = byte(lLen) - for j := 0; j < lLen; j++ { - l[j+1] = byte(rand.Int31()) - } - dn = append(dn, l...) - i += 1 + lLen - } - return append(dn, 0) -} - -func TestDomainQuick(t *testing.T) { - r := rand.New(rand.NewSource(0)) - f := func(l int) bool { - db := GenerateDomain(r, l) - ds, _, err := UnpackDomainName(db, 0) - if err != nil { - panic(err) - } - buf := make([]byte, 255) - off, err := PackDomainName(ds, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing domain: %v", err) - t.Errorf(" bytes: %v", db) - t.Errorf("string: %v", ds) - return false - } - if !bytes.Equal(db, buf[:off]) { - t.Errorf("repacked domain doesn't match original:") - t.Errorf("src bytes: %v", db) - t.Errorf(" string: %v", ds) - t.Errorf("out bytes: %v", buf[:off]) - return false - } - return true - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} - -func GenerateTXT(r *rand.Rand, size int) []byte { - rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs - var rd []byte - for i := 0; i < rdLen; { - max := rdLen - 1 - if max > 255 { - max = 255 - } - sLen := max - if max != 0 { - sLen = int(r.Int31()) % max - } - s := make([]byte, sLen+1) - s[0] = byte(sLen) - for j := 0; j < sLen; j++ { - s[j+1] = byte(rand.Int31()) - } - rd = append(rd, s...) - i += 1 + sLen - } - return rd -} - -// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt -// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't -// place the quotes where they need to be. -// So either add some code the places the quotes in just the right spots, make this non random -// or do something else. -// Disabled for now. (miek) -func testTXTRRQuick(t *testing.T) { - s := rand.NewSource(0) - r := rand.New(s) - typeAndClass := []byte{ - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - } - f := func(l int) bool { - owner := GenerateDomain(r, l) - rdata := GenerateTXT(r, l) - rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata)) - rrbytes = append(rrbytes, owner...) - rrbytes = append(rrbytes, typeAndClass...) - rrbytes = append(rrbytes, byte(len(rdata)>>8)) - rrbytes = append(rrbytes, byte(len(rdata))) - rrbytes = append(rrbytes, rdata...) - rr, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - buf := make([]byte, len(rrbytes)*3) - off, err := PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("pack Error: %v\nRR: %v", err, rr) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("packed bytes don't match original bytes") - t.Errorf("src bytes: %v", rrbytes) - t.Errorf(" struct: %v", rr) - t.Errorf("out bytes: %v", buf) - return false - } - if len(rdata) == 0 { - // string'ing won't produce any data to parse - return true - } - rrString := rr.String() - rr2, err := NewRR(rrString) - if err != nil { - t.Errorf("error parsing own output: %v", err) - t.Errorf("struct: %v", rr) - t.Errorf("string: %v", rrString) - return false - } - if rr2.String() != rrString { - t.Errorf("parsed rr.String() doesn't match original string") - t.Errorf("original: %v", rrString) - t.Errorf(" parsed: %v", rr2.String()) - return false - } - - buf = make([]byte, len(rrbytes)*3) - off, err = PackRR(rr2, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing parsed rr: %v", err) - t.Errorf("unpacked Struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed Struct: %v", rr2) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("parsed packed bytes don't match original bytes") - t.Errorf(" source bytes: %v", rrbytes) - t.Errorf("unpacked struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed struct: %v", rr2) - t.Errorf(" repacked bytes: %v", buf) - return false - } - return true - } - c := &quick.Config{MaxCountScale: 10} - if err := quick.Check(f, c); err != nil { - t.Error(err) - } -} - -func TestParseDirectiveMisc(t *testing.T) { - tests := map[string]string{ - "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.", - "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.", - "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.", - `name. IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 5m ; refresh (5 minutes) - 5m ; retry (5 minutes) - 2w ; expire (2 weeks) - 300 ; minimum (5 minutes) -)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300", - ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.", - "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestNSEC(t *testing.T) { - nsectests := map[string]string{ - "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F", - "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test. NSEC3 1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA": "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test.\t3600\tIN\tNSEC3\t1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA", - } - for i, o := range nsectests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseLOC(t *testing.T) { - lt := map[string]string{ - "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseDS(t *testing.T) { - dt := map[string]string{ - "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestQuotes(t *testing.T) { - tests := map[string]string{ - `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", - `t.example.com. IN TXT "a - bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\010 bc\"", - `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", - `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", - `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", - `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", - `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", - `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseClass(t *testing.T) { - tests := map[string]string{ - "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1", - "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1", - "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1", - // ClassANY can not occur in zone files - // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", - "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestBrace(t *testing.T) { - tests := map[string]string{ - "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1", - "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.", - `miek.nl. IN ( - 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1", - "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1", - "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1", - "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.", - "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.", - `(miek.nl.) ( - (IN) - (AAAA) - (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1", - `(miek.nl.) ( - (IN) - (AAAA) - (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1", - "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2", - `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7(2)00 ; retry (2 hours) - 604()800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600", - "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10", - "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11", - "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12", - `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7200 ; retry (2 hours) - 604800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse RR: %v\n\t%s", err, i) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseFailure(t *testing.T) { - tests := []string{"miek.nl. IN A 327.0.0.1", - "miek.nl. IN AAAA ::x", - "miek.nl. IN MX a0 miek.nl.", - "miek.nl aap IN MX mx.miek.nl.", - "miek.nl 200 IN mxx 10 mx.miek.nl.", - "miek.nl. inn MX 10 mx.miek.nl.", - // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata - "miek.nl. IN CNAME ..", - "miek.nl. PA MX 10 miek.nl.", - "miek.nl. ) IN MX 10 miek.nl.", - } - - for _, s := range tests { - _, err := NewRR(s) - if err == nil { - t.Errorf("should have triggered an error: \"%s\"", s) - } - } -} - -func TestZoneParsing(t *testing.T) { - // parse_test.db - db := ` -a.example.com. IN A 127.0.0.1 -8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG -$ORIGIN a.example.com. -test IN A 127.0.0.1 - IN SSHFP 1 2 ( - BC6533CDC95A79078A39A56EA7635984ED655318ADA9 - B6159E30723665DA95BB ) -$ORIGIN b.example.com. -test IN CNAME test.a.example.com. -` - start := time.Now().UnixNano() - to := ParseZone(strings.NewReader(db), "", "parse_test.db") - var i int - for x := range to { - i++ - if x.Error != nil { - t.Error(x.Error) - continue - } - t.Log(x.RR) - } - delta := time.Now().UnixNano() - start - t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) -} - -func ExampleParseZone() { - zone := `$ORIGIN . -$TTL 3600 ; 1 hour -name IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 300 ; refresh (5 minutes) - 300 ; retry (5 minutes) - 1209600 ; expire (2 weeks) - 300 ; minimum (5 minutes) - ) -$TTL 10800 ; 3 hours -name. 10800 IN NS name. - IN NS g6.nstld.com. - 7200 NS h6.nstld.com. - 3600 IN NS j6.nstld.com. - IN 3600 NS k6.nstld.com. - NS l6.nstld.com. - NS a6.nstld.com. - NS c6.nstld.com. - NS d6.nstld.com. - NS f6.nstld.com. - NS m6.nstld.com. -( - NS m7.nstld.com. -) -$ORIGIN name. -0-0onlus NS ns7.ehiweb.it. - NS ns8.ehiweb.it. -0-g MX 10 mx01.nic - MX 10 mx02.nic - MX 10 mx03.nic - MX 10 mx04.nic -$ORIGIN 0-g.name -moutamassey NS ns01.yahoodomains.jp. - NS ns02.yahoodomains.jp. -` - to := ParseZone(strings.NewReader(zone), "", "testzone") - for x := range to { - fmt.Println(x.RR) - } - // Output: - // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 - // name. 10800 IN NS name. - // name. 10800 IN NS g6.nstld.com. - // name. 7200 IN NS h6.nstld.com. - // name. 3600 IN NS j6.nstld.com. - // name. 3600 IN NS k6.nstld.com. - // name. 10800 IN NS l6.nstld.com. - // name. 10800 IN NS a6.nstld.com. - // name. 10800 IN NS c6.nstld.com. - // name. 10800 IN NS d6.nstld.com. - // name. 10800 IN NS f6.nstld.com. - // name. 10800 IN NS m6.nstld.com. - // name. 10800 IN NS m7.nstld.com. - // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. - // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. - // 0-g.name. 10800 IN MX 10 mx01.nic.name. - // 0-g.name. 10800 IN MX 10 mx02.nic.name. - // 0-g.name. 10800 IN MX 10 mx03.nic.name. - // 0-g.name. 10800 IN MX 10 mx04.nic.name. - // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. - // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. -} - -func ExampleHIP() { - h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs.example.com. )` - if hip, err := NewRR(h); err == nil { - fmt.Println(hip.String()) - } - // Output: - // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. -} - -func TestHIP(t *testing.T) { - h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs1.example.com. - rvs2.example.com. )` - rr, err := NewRR(h) - if err != nil { - t.Fatalf("failed to parse RR: %v", err) - } - t.Logf("RR: %s", rr) - msg := new(Msg) - msg.Answer = []RR{rr, rr} - bytes, err := msg.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := msg.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(msg.Answer) != 2 { - t.Fatalf("2 answers expected: %v", msg) - } - for i, rr := range msg.Answer { - rr := rr.(*HIP) - t.Logf("RR: %s", rr) - if l := len(rr.RendezvousServers); l != 2 { - t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) - } - for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} { - if rr.RendezvousServers[j] != s { - t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg) - } - } - } -} - -func ExampleSOA() { - s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" - if soa, err := NewRR(s); err == nil { - fmt.Println(soa.String()) - } - // Output: - // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 -} - -func TestLineNumberError(t *testing.T) { - s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" - if _, err := NewRR(s); err != nil { - if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { - t.Error("not expecting this error: ", err) - } - } -} - -// Test with no known RR on the line -func TestLineNumberError2(t *testing.T) { - tests := map[string]string{ - "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21", - "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57", - "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60", - `example.com 1000 IN TALINK ( a.example.com. - bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18", - // This is a bug, it should report an error on line 1, but the new is already processed. - `example.com 1000 IN TALINK ( a.example.com. b...example.com. - )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"} - - for in, errStr := range tests { - _, err := NewRR(in) - if err == nil { - t.Error("err is nil") - } else { - if err.Error() != errStr { - t.Errorf("%s: error should be %s is %v", in, errStr, err) - } - } - } -} - -// Test if the calculations are correct -func TestRfc1982(t *testing.T) { - // If the current time and the timestamp are more than 68 years apart - // it means the date has wrapped. 0 is 1970 - - // fall in the current 68 year span - strtests := []string{"20120525134203", "19700101000000", "20380119031408"} - for _, v := range strtests { - if x, _ := StringToTime(v); v != TimeToString(x) { - t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x) - } - } - - inttests := map[uint32]string{0: "19700101000000", - 1 << 31: "20380119031408", - 1<<32 - 1: "21060207062815", - } - for i, v := range inttests { - if TimeToString(i) != v { - t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i)) - } - } - - // Future tests, these dates get parsed to a date within the current 136 year span - future := map[string]string{"22680119031408": "20631123173144", - "19010101121212": "20370206184028", - "19210101121212": "20570206184028", - "19500101121212": "20860206184028", - "19700101000000": "19700101000000", - "19690101000000": "21050207062816", - "29210101121212": "21040522212236", - } - for from, to := range future { - x, _ := StringToTime(from) - y := TimeToString(x) - if y != to { - t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y) - } - } -} - -func TestEmpty(t *testing.T) { - for range ParseZone(strings.NewReader(""), "", "") { - t.Errorf("should be empty") - } -} - -func TestLowercaseTokens(t *testing.T) { - var testrecords = []string{ - "example.org. 300 IN a 1.2.3.4", - "example.org. 300 in A 1.2.3.4", - "example.org. 300 in a 1.2.3.4", - "example.org. 300 a 1.2.3.4", - "example.org. 300 A 1.2.3.4", - "example.org. IN a 1.2.3.4", - "example.org. in A 1.2.3.4", - "example.org. in a 1.2.3.4", - "example.org. a 1.2.3.4", - "example.org. A 1.2.3.4", - "example.org. a 1.2.3.4", - "$ORIGIN example.org.\n a 1.2.3.4", - "$Origin example.org.\n a 1.2.3.4", - "$origin example.org.\n a 1.2.3.4", - "example.org. Class1 Type1 1.2.3.4", - } - for _, testrr := range testrecords { - _, err := NewRR(testrr) - if err != nil { - t.Errorf("failed to parse %#v, got %v", testrr, err) - } - } -} - -func ExampleParseZone_generate() { - // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 - zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" - to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") - for x := range to { - if x.Error == nil { - fmt.Println(x.RR.String()) - } - } - // Output: - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. - // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. - // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. - // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. - // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. - // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. - // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. - // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. - // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. -} - -func TestSRVPacking(t *testing.T) { - msg := Msg{} - - things := []string{"1.2.3.4:8484", - "45.45.45.45:8484", - "84.84.84.84:8484", - } - - for i, n := range things { - h, p, err := net.SplitHostPort(n) - if err != nil { - continue - } - port, _ := strconv.ParseUint(p, 10, 16) - - rr := &SRV{ - Hdr: RR_Header{Name: "somename.", - Rrtype: TypeSRV, - Class: ClassINET, - Ttl: 5}, - Priority: uint16(i), - Weight: 5, - Port: uint16(port), - Target: h + ".", - } - - msg.Answer = append(msg.Answer, rr) - } - - _, err := msg.Pack() - if err != nil { - t.Fatalf("couldn't pack %v: %v", msg, err) - } -} - -func TestParseBackslash(t *testing.T) { - if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { - t.Errorf("could not create RR with \\ and \\@ in it") - } else { - t.Logf("parsed %s", r.String()) - } -} - -func TestILNP(t *testing.T) { - tests := []string{ - "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64", - "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65", - "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66", - "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0", - "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0", - "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0", - "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000", - "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000", - "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.", - "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.", - } - for _, t1 := range tests { - r, err := NewRR(t1) - if err != nil { - t.Fatalf("an error occurred: %v", err) - } else { - if t1 != r.String() { - t.Fatalf("strings should be equal %s %s", t1, r.String()) - } - } - } -} - -func TestGposEidNimloc(t *testing.T) { - dt := map[string]string{ - "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.", - "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0", - "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0", - "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920", - "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPX(t *testing.T) { - dt := map[string]string{ - "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.", - "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestComment(t *testing.T) { - // Comments we must see - comments := map[string]bool{"; this is comment 1": true, - "; this is comment 4": true, "; this is comment 6": true, - "; this is comment 7": true, "; this is comment 8": true} - zone := ` -foo. IN A 10.0.0.1 ; this is comment 1 -foo. IN A ( - 10.0.0.2 ; this is comment2 -) -; this is comment3 -foo. IN A 10.0.0.3 -foo. IN A ( 10.0.0.4 ); this is comment 4 - -foo. IN A 10.0.0.5 -; this is comment5 - -foo. IN A 10.0.0.6 - -foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6 -foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7 -foo. IN TXT "THIS IS TEXT MAN"; this is comment 8 -` - for x := range ParseZone(strings.NewReader(zone), ".", "") { - if x.Error == nil { - if x.Comment != "" { - if _, ok := comments[x.Comment]; !ok { - t.Errorf("wrong comment %s", x.Comment) - } - } - } - } -} - -func TestEUIxx(t *testing.T) { - tests := map[string]string{ - "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a", - "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestUserRR(t *testing.T) { - tests := map[string]string{ - "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234", - "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556", - "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestTXT(t *testing.T) { - // Test single entry TXT record - rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`) - if err != nil { - t.Error("failed to parse single value TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT value:", len(rr.Txt)) - } else if rr.Txt[0] != "single value" { - t.Error("bad single value") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` { - t.Error("bad representation of TXT record:", rr.String()) - } - if rr.len() != 28+1+12 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test multi entries TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`) - if err != nil { - t.Error("failed to parse multi-values TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 4 { - t.Error("bad size of TXT multi-value:", len(rr.Txt)) - } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" { - t.Error("bad values in TXT records") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` { - t.Error("bad representation of TXT multi value record:", rr.String()) - } - if rr.len() != 28+1+3+1+3+1+3+1+3 { - t.Error("bad size of serialized multi value record:", rr.len()) - } - } - - // Test empty-string in TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT empty-string value:", len(rr.Txt)) - } else if rr.Txt[0] != "" { - t.Error("bad value for empty-string TXT record") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT ""` { - t.Error("bad representation of empty-string TXT record:", rr.String()) - } - if rr.len() != 28+1 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - s += "b" - rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } - if rr.(*TXT).Txt[1] != "b" { - t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) - } - t.Log(rr.String()) -} - -func TestTypeXXXX(t *testing.T) { - _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd") - if err != nil { - t.Errorf("failed to parse TYPE1234 RR: %v", err) - } - _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd") - if err == nil { - t.Errorf("this should not work, for TYPE655341") - } - _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001") - if err == nil { - t.Errorf("this should not work") - } -} - -func TestPTR(t *testing.T) { - _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.") - if err != nil { - t.Error("failed to parse ", err) - } -} - -func TestDigit(t *testing.T) { - tests := map[string]byte{ - "miek\\000.nl. 100 IN TXT \"A\"": 0, - "miek\\001.nl. 100 IN TXT \"A\"": 1, - "miek\\254.nl. 100 IN TXT \"A\"": 254, - "miek\\255.nl. 100 IN TXT \"A\"": 255, - "miek\\256.nl. 100 IN TXT \"A\"": 0, - "miek\\257.nl. 100 IN TXT \"A\"": 1, - "miek\\004.nl. 100 IN TXT \"A\"": 4, - } - for s, i := range tests { - r, err := NewRR(s) - buf := make([]byte, 40) - if err != nil { - t.Fatalf("failed to parse %v", err) - } - PackRR(r, buf, 0, nil, false) - t.Log(buf) - if buf[5] != i { - t.Fatalf("5 pos must be %d, is %d", i, buf[5]) - } - r1, _, _ := UnpackRR(buf, 0) - if r1.Header().Ttl != 100 { - t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl) - } - } -} - -func TestParseRRSIGTimestamp(t *testing.T) { - tests := map[string]bool{ - `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - } - for r := range tests { - _, err := NewRR(r) - if err != nil { - t.Error(err) - } - } -} - -func TestTxtEqual(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - rr1.Txt = []string{"a\"a", "\"", "b"} - rr2, _ := NewRR(rr1.String()) - if rr1.String() != rr2.String() { - // This is not an error, but keep this test. - t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) - } - t.Logf("%s\n%s", rr1.String(), rr2.String()) -} - -func TestTxtLong(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - // Make a long txt record, this breaks when sending the packet, - // but not earlier. - rr1.Txt = []string{"start-"} - for i := 0; i < 200; i++ { - rr1.Txt[0] += "start-" - } - str := rr1.String() - if len(str) < len(rr1.Txt[0]) { - t.Error("string conversion should work") - } -} - -// Basically, don't crash. -func TestMalformedPackets(t *testing.T) { - var packets = []string{ - "0021641c0000000100000000000078787878787878787878787303636f6d0000100001", - } - - // com = 63 6f 6d - for _, packet := range packets { - data, _ := hex.DecodeString(packet) - // for _, v := range data { - // t.Log(v) - // } - var msg Msg - msg.Unpack(data) - // println(msg.String()) - } -} - -type algorithm struct { - name uint8 - bits int -} - -func TestNewPrivateKey(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - algorithms := []algorithm{ - {ECDSAP256SHA256, 256}, - {ECDSAP384SHA384, 384}, - {RSASHA1, 1024}, - {RSASHA256, 2048}, - {DSA, 1024}, - } - - for _, algo := range algorithms { - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = algo.name - privkey, err := key.Generate(algo.bits) - if err != nil { - t.Fatal(err) - } - - newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) - if err != nil { - t.Error(key.String()) - t.Error(key.PrivateKeyString(privkey)) - t.Fatal(err) - } - - switch newPrivKey := newPrivKey.(type) { - case *rsa.PrivateKey: - newPrivKey.Precompute() - } - - if !reflect.DeepEqual(privkey, newPrivKey) { - t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey) - } - } -} - -// special input test -func TestNewRRSpecial(t *testing.T) { - var ( - rr RR - err error - expect string - ) - - rr, err = NewRR("; comment") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("$ORIGIN foo.") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR(" ") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("\n") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") - expect = "foo.\t3600\tIN\tA\t1.1.1.1" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr == nil || rr.String() != expect { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } -} - -func TestPrintfVerbsRdata(t *testing.T) { - x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") - if Field(x, 1) != "20" { - t.Errorf("should be 20") - } - if Field(x, 2) != "mx.miek.nl." { - t.Errorf("should be mx.miek.nl.") - } - - x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") - if Field(x, 1) != "127.0.0.1" { - t.Errorf("should be 127.0.0.1") - } - - x, _ = NewRR("www.miek.nl. IN AAAA ::1") - if Field(x, 1) != "::1" { - t.Errorf("should be ::1") - } - - x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") - if Field(x, 1) != "a.miek.nl." { - t.Errorf("should be a.miek.nl.") - } - if Field(x, 2) != "A NS SOA MX AAAA" { - t.Errorf("should be A NS SOA MX AAAA") - } - - x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") - if Field(x, 1) != "first second" { - t.Errorf("should be first second") - } - if Field(x, 0) != "" { - t.Errorf("should be empty") - } -} - -func TestParseTokenOverflow(t *testing.T) { - _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797") - if err == nil { - t.Fatalf("token overflow should return an error") - } - t.Logf("err: %s\n", err) -} - -func TestParseTLSA(t *testing.T) { - lt := []string{ - "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2", - } - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseSMIMEA(t *testing.T) { - lt := map[string]string{ - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8 c26b251fa0c887ba4869f01 1a65f7e79967c2eb729f5b": "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseSSHFP(t *testing.T) { - lt := []string{ - "test.example.org.\t300\tSSHFP\t1 2 (\n" + - "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" + - "\t\t\t\t\tB6159E30723665DA95BB )", - "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )", - } - result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB" - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != result { - t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseHINFO(t *testing.T) { - dt := map[string]string{ - "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"", - "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"", - // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"", - // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html - // but effectively, even Bind would replace it to correctly formed text when you AXFR - // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseCAA(t *testing.T) { - lt := map[string]string{ - "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"", - "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"", - "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"", - "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPackCAA(t *testing.T) { - m := new(Msg) - record := new(CAA) - record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0} - record.Tag = "issue" - record.Value = "symantec.com" - record.Flag = 1 - - m.Answer = append(m.Answer, record) - bytes, err := m.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := m.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(m.Answer) != 1 { - t.Fatalf("incorrect number of answers unpacked") - } - rr := m.Answer[0].(*CAA) - if rr.Tag != "issue" { - t.Fatalf("invalid tag for unpacked answer") - } else if rr.Value != "symantec.com" { - t.Fatalf("invalid value for unpacked answer") - } else if rr.Flag != 1 { - t.Fatalf("invalid flag for unpacked answer") - } -} - -func TestParseURI(t *testing.T) { - lt := map[string]string{ - "_http._tcp. IN URI 10 1 \"http://www.example.com/path\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"http://www.example.com/path\"", - "_http._tcp. IN URI 10 1 \"\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"\"", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseAVC(t *testing.T) { - avcs := map[string]string{ - `example.org. IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`: `example.org. 3600 IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`, - } - for avc, o := range avcs { - rr, err := NewRR(avc) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", avc, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestUnbalancedParens(t *testing.T) { - sig := `example.com. 3600 IN RRSIG MX 15 2 3600 ( - 1440021600 1438207200 3613 example.com. ( - oL9krJun7xfBOIWcGHi7mag5/hdZrKWw15jPGrHpjQeRAvTdszaPD+QLs3f - x8A4M3e23mRZ9VrbpMngwcrqNAg== )` - _, err := NewRR(sig) - if err == nil { - t.Fatalf("Failed to detect extra opening brace") - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr.go deleted file mode 100644 index 6b08e6e95..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr.go +++ /dev/null @@ -1,149 +0,0 @@ -package dns - -import ( - "fmt" - "strings" -) - -// PrivateRdata is an interface used for implementing "Private Use" RR types, see -// RFC 6895. This allows one to experiment with new RR types, without requesting an -// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. -type PrivateRdata interface { - // String returns the text presentaton of the Rdata of the Private RR. - String() string - // Parse parses the Rdata of the private RR. - Parse([]string) error - // Pack is used when packing a private RR into a buffer. - Pack([]byte) (int, error) - // Unpack is used when unpacking a private RR from a buffer. - // TODO(miek): diff. signature than Pack, see edns0.go for instance. - Unpack([]byte) (int, error) - // Copy copies the Rdata. - Copy(PrivateRdata) error - // Len returns the length in octets of the Rdata. - Len() int -} - -// PrivateRR represents an RR that uses a PrivateRdata user-defined type. -// It mocks normal RRs and implements dns.RR interface. -type PrivateRR struct { - Hdr RR_Header - Data PrivateRdata -} - -func mkPrivateRR(rrtype uint16) *PrivateRR { - // Panics if RR is not an instance of PrivateRR. - rrfunc, ok := TypeToRR[rrtype] - if !ok { - panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) - } - - anyrr := rrfunc() - switch rr := anyrr.(type) { - case *PrivateRR: - return rr - } - panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) -} - -// Header return the RR header of r. -func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } - -func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } - -// Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } -func (r *PrivateRR) copy() RR { - // make new RR like this: - rr := mkPrivateRR(r.Hdr.Rrtype) - newh := r.Hdr.copyHeader() - rr.Hdr = *newh - - err := r.Data.Copy(rr.Data) - if err != nil { - panic("dns: got value that could not be used to copy Private rdata") - } - return rr -} -func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := r.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - n, err := r.Data.Pack(msg[off:]) - if err != nil { - return len(msg), err - } - off += n - r.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype - - typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { - if noRdata(h) { - return &h, off, nil - } - var err error - - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - off1, err := rr.Data.Unpack(msg[off:]) - off += off1 - if err != nil { - return rr, off, err - } - return rr, off, err - } - - setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 - Fetch: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l = <-c; l.value { - case zNewline, zEOF: - break Fetch - case zString: - text = append(text, l.token) - } - } - - err := rr.Data.Parse(text) - if err != nil { - return nil, &ParseError{f, err.Error(), l}, "" - } - - return rr, nil, "" - } - - typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} -} - -// PrivateHandleRemove removes defenitions required to support private RR type. -func PrivateHandleRemove(rtype uint16) { - rtypestr, ok := TypeToString[rtype] - if ok { - delete(TypeToRR, rtype) - delete(TypeToString, rtype) - delete(typeToparserFunc, rtype) - delete(StringToType, rtypestr) - delete(typeToUnpack, rtype) - } - return -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr_test.go deleted file mode 100644 index 72ec8f5c0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/privaterr_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package dns_test - -import ( - "strings" - "testing" - - "github.com/miekg/dns" -) - -const TypeISBN uint16 = 0xFF00 - -// A crazy new RR type :) -type ISBN struct { - x string // rdata with 10 or 13 numbers, dashes or spaces allowed -} - -func NewISBN() dns.PrivateRdata { return &ISBN{""} } - -func (rd *ISBN) Len() int { return len([]byte(rd.x)) } -func (rd *ISBN) String() string { return rd.x } - -func (rd *ISBN) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *ISBN) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *ISBN) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *ISBN) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*ISBN) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t") - -func TestPrivateText(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - if rr.String() != testrecord { - t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) - } else { - t.Log(rr.String()) - } -} - -func TestPrivateByteSlice(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 100) - off, err := dns.PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("got error packing ISBN: %v", err) - } - - custrr := rr.(*dns.PrivateRR) - if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off { - t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln) - } - - rr1, off1, err := dns.UnpackRR(buf[:off], 0) - if err != nil { - t.Errorf("got error unpacking ISBN: %v", err) - return - } - - if off1 != off { - t.Errorf("offset after unpacking differs: %d != %d", off1, off) - } - - if rr1.String() != testrecord { - t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord) - } else { - t.Log(rr1.String()) - } -} - -const TypeVERSION uint16 = 0xFF01 - -type VERSION struct { - x string -} - -func NewVersion() dns.PrivateRdata { return &VERSION{""} } - -func (rd *VERSION) String() string { return rd.x } -func (rd *VERSION) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *VERSION) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *VERSION) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *VERSION) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*VERSION) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -func (rd *VERSION) Len() int { - return len([]byte(rd.x)) -} - -var smallzone = `$ORIGIN example.org. -@ SOA sns.dns.icann.org. noc.dns.icann.org. ( - 2014091518 7200 3600 1209600 3600 -) - A 1.2.3.4 -ok ISBN 1231-92110-12 -go VERSION ( - 1.3.1 ; comment -) -www ISBN 1231-92110-16 -* CNAME @ -` - -func TestPrivateZoneParser(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - dns.PrivateHandle("VERSION", TypeVERSION, NewVersion) - defer dns.PrivateHandleRemove(TypeISBN) - defer dns.PrivateHandleRemove(TypeVERSION) - - r := strings.NewReader(smallzone) - for x := range dns.ParseZone(r, ".", "") { - if err := x.Error; err != nil { - t.Fatal(err) - } - t.Log(x.RR) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/rawmsg.go deleted file mode 100644 index 6e21fba7e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/rawmsg.go +++ /dev/null @@ -1,49 +0,0 @@ -package dns - -import "encoding/binary" - -// rawSetRdlength sets the rdlength in the header of -// the RR. The offset 'off' must be positioned at the -// start of the header of the RR, 'end' must be the -// end of the RR. -func rawSetRdlength(msg []byte, off, end int) bool { - l := len(msg) -Loop: - for { - if off+1 > l { - return false - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // End of the domainname - break Loop - } - if off+c > l { - return false - } - off += c - - case 0xC0: - // pointer, next byte included, ends domainname - off++ - break Loop - } - } - // The domainname has been seen, we at the start of the fixed part in the header. - // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. - off += 2 + 2 + 4 - if off+2 > l { - return false - } - //off+1 is the end of the header, 'end' is the end of the rr - //so 'end' - 'off+2' is the length of the rdata - rdatalen := end - (off + 2) - if rdatalen > 0xFFFF { - return false - } - binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) - return true -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/remote_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/remote_test.go deleted file mode 100644 index 4cf701fe4..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/remote_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package dns - -import "testing" - -const LinodeAddr = "176.58.119.54:53" - -func TestClientRemote(t *testing.T) { - m := new(Msg) - m.SetQuestion("go.dns.miek.nl.", TypeTXT) - - c := new(Client) - r, _, err := c.Exchange(m, LinodeAddr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/reverse.go deleted file mode 100644 index f6e7a47a6..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/reverse.go +++ /dev/null @@ -1,38 +0,0 @@ -package dns - -// StringToType is the reverse of TypeToString, needed for string parsing. -var StringToType = reverseInt16(TypeToString) - -// StringToClass is the reverse of ClassToString, needed for string parsing. -var StringToClass = reverseInt16(ClassToString) - -// StringToOpcode is a map of opcodes to strings. -var StringToOpcode = reverseInt(OpcodeToString) - -// StringToRcode is a map of rcodes to strings. -var StringToRcode = reverseInt(RcodeToString) - -// Reverse a map -func reverseInt8(m map[uint8]string) map[string]uint8 { - n := make(map[string]uint8, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt16(m map[uint16]string) map[string]uint16 { - n := make(map[string]uint16, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt(m map[int]string) map[string]int { - n := make(map[string]int, len(m)) - for u, s := range m { - n[s] = u - } - return n -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize.go deleted file mode 100644 index b489f3f05..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize.go +++ /dev/null @@ -1,84 +0,0 @@ -package dns - -// Dedup removes identical RRs from rrs. It preserves the original ordering. -// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies -// rrs. -// m is used to store the RRs temporay. If it is nil a new map will be allocated. -func Dedup(rrs []RR, m map[string]RR) []RR { - if m == nil { - m = make(map[string]RR) - } - // Save the keys, so we don't have to call normalizedString twice. - keys := make([]*string, 0, len(rrs)) - - for _, r := range rrs { - key := normalizedString(r) - keys = append(keys, &key) - if _, ok := m[key]; ok { - // Shortest TTL wins. - if m[key].Header().Ttl > r.Header().Ttl { - m[key].Header().Ttl = r.Header().Ttl - } - continue - } - - m[key] = r - } - // If the length of the result map equals the amount of RRs we got, - // it means they were all different. We can then just return the original rrset. - if len(m) == len(rrs) { - return rrs - } - - j := 0 - for i, r := range rrs { - // If keys[i] lives in the map, we should copy and remove it. - if _, ok := m[*keys[i]]; ok { - delete(m, *keys[i]) - rrs[j] = r - j++ - } - - if len(m) == 0 { - break - } - } - - return rrs[:j] -} - -// normalizedString returns a normalized string from r. The TTL -// is removed and the domain name is lowercased. We go from this: -// DomainNameTTLCLASSTYPERDATA to: -// lowercasenameCLASSTYPE... -func normalizedString(r RR) string { - // A string Go DNS makes has: domainnameTTL... - b := []byte(r.String()) - - // find the first non-escaped tab, then another, so we capture where the TTL lives. - esc := false - ttlStart, ttlEnd := 0, 0 - for i := 0; i < len(b) && ttlEnd == 0; i++ { - switch { - case b[i] == '\\': - esc = !esc - case b[i] == '\t' && !esc: - if ttlStart == 0 { - ttlStart = i - continue - } - if ttlEnd == 0 { - ttlEnd = i - } - case b[i] >= 'A' && b[i] <= 'Z' && !esc: - b[i] += 32 - default: - esc = false - } - } - - // remove TTL. - copy(b[ttlStart:], b[ttlEnd:]) - cut := ttlEnd - ttlStart - return string(b[:len(b)-cut]) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize_test.go deleted file mode 100644 index 2ba3fe9a3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sanitize_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package dns - -import "testing" - -func TestDedup(t *testing.T) { - // make it []string - testcases := map[[3]RR][]string{ - [...]RR{ - newRR(t, "mIek.nl. IN A 127.0.0.1"), - newRR(t, "mieK.nl. IN A 127.0.0.1"), - newRR(t, "miek.Nl. IN A 127.0.0.1"), - }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"}, - [...]RR{ - newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"), - newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"), - newRR(t, "Miek.nL. 500 IN A 127.0.0.1"), - }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"}, - [...]RR{ - newRR(t, "miek.nl. IN A 127.0.0.1"), - newRR(t, "miek.nl. CH A 127.0.0.1"), - newRR(t, "miek.nl. IN A 127.0.0.1"), - }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1", - "miek.nl.\t3600\tCH\tA\t127.0.0.1", - }, - [...]RR{ - newRR(t, "miek.nl. CH A 127.0.0.1"), - newRR(t, "miek.nl. IN A 127.0.0.1"), - newRR(t, "miek.de. IN A 127.0.0.1"), - }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1", - "miek.nl.\t3600\tIN\tA\t127.0.0.1", - "miek.de.\t3600\tIN\tA\t127.0.0.1", - }, - [...]RR{ - newRR(t, "miek.de. IN A 127.0.0.1"), - newRR(t, "miek.nl. 200 IN A 127.0.0.1"), - newRR(t, "miek.nl. 300 IN A 127.0.0.1"), - }: {"miek.de.\t3600\tIN\tA\t127.0.0.1", - "miek.nl.\t200\tIN\tA\t127.0.0.1", - }, - } - - for rr, expected := range testcases { - out := Dedup([]RR{rr[0], rr[1], rr[2]}, nil) - for i, o := range out { - if o.String() != expected[i] { - t.Fatalf("expected %v, got %v", expected[i], o.String()) - } - } - } -} - -func BenchmarkDedup(b *testing.B) { - rrs := []RR{ - newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"), - newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"), - newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"), - } - m := make(map[string]RR) - for i := 0; i < b.N; i++ { - Dedup(rrs, m) - } -} - -func TestNormalizedString(t *testing.T) { - tests := map[RR]string{ - newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", - newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", - newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\009iek.nl.\tIN\tA\t127.0.0.1", - } - for tc, expected := range tests { - n := normalizedString(tc) - if n != expected { - t.Errorf("expected %s, got %s", expected, n) - } - } -} - -func newRR(t *testing.T, s string) RR { - r, err := NewRR(s) - if err != nil { - t.Logf("newRR: %v", err) - } - return r -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan.go deleted file mode 100644 index 8d4773c3e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan.go +++ /dev/null @@ -1,987 +0,0 @@ -package dns - -import ( - "io" - "log" - "os" - "strconv" - "strings" -) - -type debugging bool - -const debug debugging = false - -func (d debugging) Printf(format string, args ...interface{}) { - if d { - log.Printf(format, args...) - } -} - -const maxTok = 2048 // Largest token we can return. -const maxUint16 = 1<<16 - 1 - -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: -// * Add ownernames if they are left blank; -// * Suppress sequences of spaces; -// * Make each RR fit on one line (_NEWLINE is send as last) -// * Handle comments: ; -// * Handle braces - anywhere. -const ( - // Zonefile - zEOF = iota - zString - zBlank - zQuote - zNewline - zRrtpe - zOwner - zClass - zDirOrigin // $ORIGIN - zDirTtl // $TTL - zDirInclude // $INCLUDE - zDirGenerate // $GENERATE - - // Privatekey file - zValue - zKey - - zExpectOwnerDir // Ownername - zExpectOwnerBl // Whitespace after the ownername - zExpectAny // Expect rrtype, ttl or class - zExpectAnyNoClass // Expect rrtype or ttl - zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTtl // Expect rrtype or class - zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL - zExpectRrtype // Expect rrtype - zExpectRrtypeBl // Whitespace BEFORE rrtype - zExpectRdata // The first element of the rdata - zExpectDirTtlBl // Space after directive $TTL - zExpectDirTtl // Directive $TTL - zExpectDirOriginBl // Space after directive $ORIGIN - zExpectDirOrigin // Directive $ORIGIN - zExpectDirIncludeBl // Space after directive $INCLUDE - zExpectDirInclude // Directive $INCLUDE - zExpectDirGenerate // Directive $GENERATE - zExpectDirGenerateBl // Space after directive $GENERATE -) - -// ParseError is a parsing error. It contains the parse error and the location in the io.Reader -// where the error occurred. -type ParseError struct { - file string - err string - lex lex -} - -func (e *ParseError) Error() (s string) { - if e.file != "" { - s = e.file + ": " - } - s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + - strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) - return -} - -type lex struct { - token string // text of the token - tokenUpper string // uppercase text of the token - length int // length of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - line int // line in the file - column int // column in the file - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - comment string // any comment text seen -} - -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occurred, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - -// NewRR reads the RR contained in the string s. Only the first RR is -// returned. If s contains no RR, return nil with no error. The class -// defaults to IN and TTL defaults to 3600. The full zone file syntax -// like $TTL, $ORIGIN, etc. is supported. All fields of the returned -// RR are set, except RR.Header().Rdlength which is set to 0. -func NewRR(s string) (RR, error) { - if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline - return ReadRR(strings.NewReader(s+"\n"), "") - } - return ReadRR(strings.NewReader(s), "") -} - -// ReadRR reads the RR contained in q. -// See NewRR for more documentation. -func ReadRR(q io.Reader, filename string) (RR, error) { - r := <-parseZoneHelper(q, ".", filename, 1) - if r == nil { - return nil, nil - } - - if r.Error != nil { - return nil, r.Error - } - return r.RR, nil -} - -// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the -// returned channel, which consist out the parsed RR, a potential comment or an error. -// If there is an error the RR is nil. The string file is only used -// in error reporting. The string origin is used as the initial origin, as -// if the file would start with: $ORIGIN origin . -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. -// The channel t is closed by ParseZone when the end of r is reached. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // log.Println(x.Error) -// } else { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment. Comments inside the -// RR are discarded. Comments on a line by themselves are discarded too. -func ParseZone(r io.Reader, origin, file string) chan *Token { - return parseZoneHelper(r, origin, file, 10000) -} - -func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { - t := make(chan *Token, chansize) - go parseZone(r, origin, file, t, 0) - return t -} - -func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { - defer func() { - if include == 0 { - close(t) - } - }() - s := scanInit(r) - c := make(chan lex) - // Start the lexer - go zlexer(s, c) - // 6 possible beginnings of a line, _ is a space - // 0. zRRTYPE -> all omitted until the rrtype - // 1. zOwner _ zRrtype -> class/ttl omitted - // 2. zOwner _ zString _ zRrtype -> class omitted - // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class - // 4. zOwner _ zClass _ zRrtype -> ttl omitted - // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) - // After detecting these, we know the zRrtype so we can jump to functions - // handling the rdata for each of these types. - - if origin == "" { - origin = "." - } - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} - return - } - - st := zExpectOwnerDir // initial state - var h RR_Header - var defttl uint32 = defaultTtl - var prevName string - for l := range c { - // Lexer spotted an error already - if l.err == true { - t <- &Token{Error: &ParseError{f, l.token, l}} - return - - } - switch st { - case zExpectOwnerDir: - // We can also expect a directive, like $TTL or $ORIGIN - h.Ttl = defttl - h.Class = ClassINET - switch l.value { - case zNewline: - st = zExpectOwnerDir - case zOwner: - h.Name = l.token - if l.token[0] == '@' { - h.Name = origin - prevName = h.Name - st = zExpectOwnerBl - break - } - if h.Name[l.length-1] != '.' { - h.Name = appendOrigin(h.Name, origin) - } - _, ok := IsDomainName(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "bad owner name", l}} - return - } - prevName = h.Name - st = zExpectOwnerBl - case zDirTtl: - st = zExpectDirTtlBl - case zDirOrigin: - st = zExpectDirOriginBl - case zDirInclude: - st = zExpectDirIncludeBl - case zDirGenerate: - st = zExpectDirGenerateBl - case zRrtpe: - h.Name = prevName - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Name = prevName - h.Class = l.torc - st = zExpectAnyNoClassBl - case zBlank: - // Discard, can happen when there is nothing on the - // line except the RR type - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // Don't about the defttl, we should take the $TTL value - // defttl = ttl - st = zExpectAnyNoTtlBl - - default: - t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} - return - } - case zExpectDirIncludeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} - return - } - st = zExpectDirInclude - case zExpectDirInclude: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} - return - } - neworigin := origin // There may be optionally a new origin set after the filename, if not use current one - l := <-c - switch l.value { - case zBlank: - l := <-c - if l.value == zString { - if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - // a new origin is specified. - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - neworigin = l.token + "." + origin - } else { - neworigin = l.token + origin - } - } else { - neworigin = l.token - } - } - case zNewline, zEOF: - // Ok - default: - t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} - return - } - // Start with the new file - r1, e1 := os.Open(l.token) - if e1 != nil { - t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} - return - } - if include+1 > 7 { - t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} - return - } - parseZone(r1, l.token, neworigin, t, include+1) - st = zExpectOwnerDir - case zExpectDirTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} - return - } - st = zExpectDirTtl - case zExpectDirTtl: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - return - } - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - defttl = ttl - st = zExpectOwnerDir - case zExpectDirOriginBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} - return - } - st = zExpectDirOrigin - case zExpectDirOrigin: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - } - if _, ok := IsDomainName(l.token); !ok { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - origin = l.token + "." + origin - } else { - origin = l.token + origin - } - } else { - origin = l.token - } - st = zExpectOwnerDir - case zExpectDirGenerateBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} - return - } - st = zExpectDirGenerate - case zExpectDirGenerate: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} - return - } - if errMsg := generate(l, c, t, origin); errMsg != "" { - t <- &Token{Error: &ParseError{f, errMsg, l}} - return - } - st = zExpectOwnerDir - case zExpectOwnerBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after owner", l}} - return - } - st = zExpectAny - case zExpectAny: - switch l.value { - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Class = l.torc - st = zExpectAnyNoClassBl - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the defttl here - st = zExpectAnyNoTtlBl - default: - t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} - return - } - case zExpectAnyNoClassBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before class", l}} - return - } - st = zExpectAnyNoClass - case zExpectAnyNoTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} - return - } - st = zExpectAnyNoTtl - case zExpectAnyNoTtl: - switch l.value { - case zClass: - h.Class = l.torc - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} - return - } - case zExpectAnyNoClass: - switch l.value { - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the def ttl anymore - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} - return - } - case zExpectRrtypeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} - return - } - st = zExpectRrtype - case zExpectRrtype: - if l.value != zRrtpe { - t <- &Token{Error: &ParseError{f, "unknown RR type", l}} - return - } - h.Rrtype = l.torc - st = zExpectRdata - case zExpectRdata: - r, e, c1 := setRR(h, c, origin, f) - if e != nil { - // If e.lex is nil than we have encounter a unknown RR type - // in that case we substitute our current lex token - if e.lex.token == "" && e.lex.value == 0 { - e.lex = l // Uh, dirty - } - t <- &Token{Error: e} - return - } - t <- &Token{RR: r, Comment: c1} - st = zExpectOwnerDir - } - } - // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this - // is not an error, because an empty zone file is still a zone file. -} - -// zlexer scans the sourcefile and returns tokens on the channel c. -func zlexer(s *scan, c chan lex) { - var l lex - str := make([]byte, maxTok) // Should be enough for any token - stri := 0 // Offset in str (0 means empty) - com := make([]byte, maxTok) // Hold comment text - comi := 0 - quote := false - escape := false - space := false - commt := false - rrtype := false - owner := true - brace := 0 - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - if stri >= maxTok { - l.token = "token length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - if comi >= maxTok { - l.token = "comment length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - - switch x { - case ' ', '\t': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if commt { - com[comi] = x - comi++ - break - } - if stri == 0 { - // Space directly in the beginning, handled in the grammar - } else if owner { - // If we have a string and its the first, make it an owner - l.value = zOwner - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - // escape $... start with a \ not a $, so this will work - switch l.tokenUpper { - case "$TTL": - l.value = zDirTtl - case "$ORIGIN": - l.value = zDirOrigin - case "$INCLUDE": - l.value = zDirInclude - case "$GENERATE": - l.value = zDirGenerate - } - debug.Printf("[7 %+v]", l.token) - c <- l - } else { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } else { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - c <- l - return - } - l.value = zRrtpe - l.torc = t - } - } - if t, ok := StringToClass[l.tokenUpper]; ok { - l.value = zClass - l.torc = t - } else { - if strings.HasPrefix(l.tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - c <- l - return - } - l.value = zClass - l.torc = t - } - } - } - debug.Printf("[6 %+v]", l.token) - c <- l - } - stri = 0 - // I reverse space stuff here - if !space && !commt { - l.value = zBlank - l.token = " " - l.length = 1 - debug.Printf("[5 %+v]", l.token) - c <- l - } - owner = false - space = true - case ';': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if stri > 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - debug.Printf("[4 %+v]", l.token) - c <- l - stri = 0 - } - commt = true - com[comi] = ';' - comi++ - case '\r': - escape = false - if quote { - str[stri] = x - stri++ - break - } - // discard if outside of quotes - case '\n': - escape = false - // Escaped newline - if quote { - str[stri] = x - stri++ - break - } - // inside quotes this is legal - if commt { - // Reset a comment - commt = false - rrtype = false - stri = 0 - // If not in a brace this ends the comment AND the RR - if brace == 0 { - owner = true - owner = true - l.value = zNewline - l.token = "\n" - l.tokenUpper = l.token - l.length = 1 - l.comment = string(com[:comi]) - debug.Printf("[3 %+v %+v]", l.token, l.comment) - c <- l - l.comment = "" - comi = 0 - break - } - com[comi] = ' ' // convert newline to space - comi++ - break - } - - if brace == 0 { - // If there is previous text, we should output it here - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } - } - debug.Printf("[2 %+v]", l.token) - c <- l - } - l.value = zNewline - l.token = "\n" - l.tokenUpper = l.token - l.length = 1 - debug.Printf("[1 %+v]", l.token) - c <- l - stri = 0 - commt = false - rrtype = false - owner = true - comi = 0 - } - case '\\': - // comments do not get escaped chars, everything is copied - if commt { - com[comi] = x - comi++ - break - } - // something already escaped must be in string - if escape { - str[stri] = x - stri++ - escape = false - break - } - // something escaped outside of string gets added to string - str[stri] = x - stri++ - escape = true - case '"': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - space = false - // send previous gathered text and the quote - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - - debug.Printf("[%+v]", l.token) - c <- l - stri = 0 - } - - // send quote itself as separate token - l.value = zQuote - l.token = "\"" - l.tokenUpper = l.token - l.length = 1 - c <- l - quote = !quote - case '(', ')': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - if quote { - str[stri] = x - stri++ - break - } - switch x { - case ')': - brace-- - if brace < 0 { - l.token = "extra closing brace" - l.tokenUpper = l.token - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - case '(': - brace++ - } - default: - escape = false - if commt { - com[comi] = x - comi++ - break - } - str[stri] = x - stri++ - space = false - } - x, err = s.tokenText() - } - if stri > 0 { - // Send remainder - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - l.value = zString - debug.Printf("[%+v]", l.token) - c <- l - } - if brace != 0 { - l.token = "unbalanced brace" - l.tokenUpper = l.token - l.err = true - c <- l - } -} - -// Extract the class number from CLASSxx -func classToInt(token string) (uint16, bool) { - offset := 5 - if len(token) < offset+1 { - return 0, false - } - class, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(class), true -} - -// Extract the rr number from TYPExxx -func typeToInt(token string) (uint16, bool) { - offset := 4 - if len(token) < offset+1 { - return 0, false - } - typ, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(typ), true -} - -// Parse things like 2w, 2m, etc, Return the time in seconds. -func stringToTtl(token string) (uint32, bool) { - s := uint32(0) - i := uint32(0) - for _, c := range token { - switch c { - case 's', 'S': - s += i - i = 0 - case 'm', 'M': - s += i * 60 - i = 0 - case 'h', 'H': - s += i * 60 * 60 - i = 0 - case 'd', 'D': - s += i * 60 * 60 * 24 - i = 0 - case 'w', 'W': - s += i * 60 * 60 * 24 * 7 - i = 0 - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i *= 10 - i += uint32(c) - '0' - default: - return 0, false - } - } - return s + i, true -} - -// Parse LOC records' [.][mM] into a -// mantissa exponent format. Token should contain the entire -// string (i.e. no spaces allowed) -func stringToCm(token string) (e, m uint8, ok bool) { - if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { - token = token[0 : len(token)-1] - } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - case 0: - // huh? - return 0, 0, false - } - ok = true - if meters > 0 { - e = 2 - val = meters - } else { - e = 0 - val = cmeters - } - for val > 10 { - e++ - val /= 10 - } - if e > 9 { - ok = false - } - m = uint8(val) - return -} - -func appendOrigin(name, origin string) string { - if origin == "." { - return name + origin - } - return name + "." + origin -} - -// LOC record helper function -func locCheckNorth(token string, latitude uint32) (uint32, bool) { - switch token { - case "n", "N": - return LOC_EQUATOR + latitude, true - case "s", "S": - return LOC_EQUATOR - latitude, true - } - return latitude, false -} - -// LOC record helper function -func locCheckEast(token string, longitude uint32) (uint32, bool) { - switch token { - case "e", "E": - return LOC_EQUATOR + longitude, true - case "w", "W": - return LOC_EQUATOR - longitude, true - } - return longitude, false -} - -// "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c chan lex, f string) (*ParseError, string) { - l := <-c - com := "" - switch l.value { - case zBlank: - l = <-c - com = l.comment - if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l}, "" - } - case zNewline: - com = l.comment - case zEOF: - default: - return &ParseError{f, "garbage after rdata", l}, "" - } - return nil, com -} - -// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" -// Used for NID and L64 record. -func stringToNodeID(l lex) (uint64, *ParseError) { - if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - // There must be three colons at fixes postitions, if not its a parse error - if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - return u, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan_rr.go deleted file mode 100644 index b8b18fd77..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scan_rr.go +++ /dev/null @@ -1,2184 +0,0 @@ -package dns - -import ( - "encoding/base64" - "net" - "strconv" - "strings" -) - -type parserFunc struct { - // Func defines the function that parses the tokens and returns the RR - // or an error. The last string contains any comments in the line as - // they returned by the lexer as well. - Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) - // Signals if the RR ending is of variable length, like TXT or records - // that have Hexadecimal or Base64 as their last element in the Rdata. Records - // that have a fixed ending or for instance A, AAAA, SOA and etc. - Variable bool -} - -// Parse the rdata of each rrtype. -// All data from the channel c is either zString or zBlank. -// After the rdata there may come a zBlank and then a zNewline -// or immediately a zNewline. If this is not the case we flag -// an *ParseError: garbage after rdata. -func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - parserfunc, ok := typeToparserFunc[h.Rrtype] - if ok { - r, e, cm := parserfunc.Func(h, c, o, f) - if parserfunc.Variable { - return r, e, cm - } - if e != nil { - return nil, e, "" - } - e, cm = slurpRemainder(c, f) - if e != nil { - return nil, e, "" - } - return r, nil, cm - } - // RFC3957 RR (Unknown RR handling) - return setRFC3597(h, c, o, f) -} - -// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) -// or an error -func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { - s := "" - l := <-c // zString - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - s += l.token - case zBlank: // Ok - default: - return "", &ParseError{f, errstr, l}, "" - } - l = <-c - } - return s, nil, l.comment -} - -// A remainder of the rdata with embedded spaces, split on unquoted whitespace -// and return the parsed string slice or an error -func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { - // Get the remaining data until we see a zNewline - l := <-c - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - - // Build the slice - s := make([]string, 0) - quote := false - empty := false - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 - } - s = append(s, sx...) - break - } - - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l}, "" - } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: - return nil, &ParseError{f, errstr, l}, "" - } - l = <-c - } - if quote { - return nil, &ParseError{f, errstr, l}, "" - } - return s, nil, l.comment -} - -func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(A) - rr.Hdr = h - - l := <-c - if l.length == 0 { // Dynamic updates. - return rr, nil, "" - } - rr.A = net.ParseIP(l.token) - if rr.A == nil || l.err { - return nil, &ParseError{f, "bad A A", l}, "" - } - return rr, nil, "" -} - -func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AAAA) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - rr.AAAA = net.ParseIP(l.token) - if rr.AAAA == nil || l.err { - return nil, &ParseError{f, "bad AAAA AAAA", l}, "" - } - return rr, nil, "" -} - -func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NS) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ns = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NS Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - return rr, nil, "" -} - -func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSAPPTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RP) - rr.Hdr = h - - l := <-c - rr.Mbox = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RP Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - l = <-c - rr.Txt = l.token - if l.token == "@" { - rr.Txt = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RP Txt", l}, "" - } - if rr.Txt[l.length-1] != '.' { - rr.Txt = appendOrigin(rr.Txt, o) - } - return rr, nil, "" -} - -func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MR) - rr.Hdr = h - - l := <-c - rr.Mr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MR Mr", l}, "" - } - if rr.Mr[l.length-1] != '.' { - rr.Mr = appendOrigin(rr.Mr, o) - } - return rr, nil, "" -} - -func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MB) - rr.Hdr = h - - l := <-c - rr.Mb = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mb = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MB Mb", l}, "" - } - if rr.Mb[l.length-1] != '.' { - rr.Mb = appendOrigin(rr.Mb, o) - } - return rr, nil, "" -} - -func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MG) - rr.Hdr = h - - l := <-c - rr.Mg = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mg = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MG Mg", l}, "" - } - if rr.Mg[l.length-1] != '.' { - rr.Mg = appendOrigin(rr.Mg, o) - } - return rr, nil, "" -} - -func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HINFO) - rr.Hdr = h - - chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) - if e != nil { - return nil, e, c1 - } - - if ln := len(chunks); ln == 0 { - return rr, nil, "" - } else if ln == 1 { - // Can we split it? - if out := strings.Fields(chunks[0]); len(out) > 1 { - chunks = out - } else { - chunks = append(chunks, "") - } - } - - rr.Cpu = chunks[0] - rr.Os = strings.Join(chunks[1:], " ") - - return rr, nil, "" -} - -func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MINFO) - rr.Hdr = h - - l := <-c - rr.Rmail = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Rmail = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" - } - if rr.Rmail[l.length-1] != '.' { - rr.Rmail = appendOrigin(rr.Rmail, o) - } - } - <-c // zBlank - l = <-c - rr.Email = l.token - if l.token == "@" { - rr.Email = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MINFO Email", l}, "" - } - if rr.Email[l.length-1] != '.' { - rr.Email = appendOrigin(rr.Email, o) - } - return rr, nil, "" -} - -func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MF) - rr.Hdr = h - - l := <-c - rr.Mf = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mf = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MF Mf", l}, "" - } - if rr.Mf[l.length-1] != '.' { - rr.Mf = appendOrigin(rr.Mf, o) - } - return rr, nil, "" -} - -func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MD) - rr.Hdr = h - - l := <-c - rr.Md = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Md = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MD Md", l}, "" - } - if rr.Md[l.length-1] != '.' { - rr.Md = appendOrigin(rr.Md, o) - } - return rr, nil, "" -} - -func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad MX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Mx = l.token - if l.token == "@" { - rr.Mx = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MX Mx", l}, "" - } - if rr.Mx[l.length-1] != '.' { - rr.Mx = appendOrigin(rr.Mx, o) - } - return rr, nil, "" -} - -func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RT) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil { - return nil, &ParseError{f, "bad RT Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Host = l.token - if l.token == "@" { - rr.Host = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RT Host", l}, "" - } - if rr.Host[l.length-1] != '.' { - rr.Host = appendOrigin(rr.Host, o) - } - return rr, nil, "" -} - -func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AFSDB) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" - } - rr.Subtype = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Hostname = l.token - if l.token == "@" { - rr.Hostname = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" - } - if rr.Hostname[l.length-1] != '.' { - rr.Hostname = appendOrigin(rr.Hostname, o) - } - return rr, nil, "" -} - -func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(X25) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.err { - return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" - } - rr.PSDNAddress = l.token - return rr, nil, "" -} - -func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(KX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad KX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Exchanger = l.token - if l.token == "@" { - rr.Exchanger = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad KX Exchanger", l}, "" - } - if rr.Exchanger[l.length-1] != '.' { - rr.Exchanger = appendOrigin(rr.Exchanger, o) - } - return rr, nil, "" -} - -func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(DNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SOA) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - <-c // zBlank - if l.token == "@" { - rr.Ns = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SOA Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - } - - l = <-c - rr.Mbox = l.token - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - - var ( - v uint32 - ok bool - ) - for i := 0; i < 5; i++ { - l = <-c - if l.err { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { - if i == 0 { - // Serial should be a number - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if v, ok = stringToTtl(l.token); !ok { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - - } - } else { - v = uint32(j) - } - switch i { - case 0: - rr.Serial = v - <-c // zBlank - case 1: - rr.Refresh = v - <-c // zBlank - case 2: - rr.Retry = v - <-c // zBlank - case 3: - rr.Expire = v - <-c // zBlank - case 4: - rr.Minttl = v - } - } - return rr, nil, "" -} - -func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SRV) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Weight", l}, "" - } - rr.Weight = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Port", l}, "" - } - rr.Port = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Target = l.token - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SRV Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NAPTR) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Order", l}, "" - } - rr.Order = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Preference", l}, "" - } - rr.Preference = uint16(i) - // Flags - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Flags = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - } else if l.value == zQuote { - rr.Flags = "" - } else { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - - // Service - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Service = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - } else if l.value == zQuote { - rr.Service = "" - } else { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - - // Regexp - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Regexp = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - } else if l.value == zQuote { - rr.Regexp = "" - } else { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - // After quote no space?? - <-c // zBlank - l = <-c // zString - rr.Replacement = l.token - if l.token == "@" { - rr.Replacement = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" - } - if rr.Replacement[l.length-1] != '.' { - rr.Replacement = appendOrigin(rr.Replacement, o) - } - return rr, nil, "" -} - -func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TALINK) - rr.Hdr = h - - l := <-c - rr.PreviousName = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.PreviousName = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" - } - if rr.PreviousName[l.length-1] != '.' { - rr.PreviousName = appendOrigin(rr.PreviousName, o) - } - } - <-c // zBlank - l = <-c - rr.NextName = l.token - if l.token == "@" { - rr.NextName = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad TALINK NextName", l}, "" - } - if rr.NextName[l.length-1] != '.' { - rr.NextName = appendOrigin(rr.NextName, o) - } - return rr, nil, "" -} - -func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LOC) - rr.Hdr = h - // Non zero defaults for LOC record, see RFC 1876, Section 3. - rr.HorizPre = 165 // 10000 - rr.VertPre = 162 // 10 - rr.Size = 18 // 1 - ok := false - // North - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude", l}, "" - } - rr.Latitude = 1000 * 60 * 60 * uint32(i) - - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - i, e = strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" - } - rr.Latitude += 1000 * 60 * uint32(i) - - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" - } else { - rr.Latitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" - -East: - // East - <-c // zBlank - l = <-c - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude", l}, "" - } else { - rr.Longitude = 1000 * 60 * 60 * uint32(i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" - } else { - rr.Longitude += 1000 * 60 * uint32(i) - } - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" - } else { - rr.Longitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" - -Altitude: - <-c // zBlank - l = <-c - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } - if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { - l.token = l.token[0 : len(l.token)-1] - } - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } else { - rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) - } - - // And now optionally the other values - l = <-c - count := 0 - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - switch count { - case 0: // Size - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC Size", l}, "" - } - rr.Size = (e & 0x0f) | (m << 4 & 0xf0) - case 1: // HorizPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC HorizPre", l}, "" - } - rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) - case 2: // VertPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC VertPre", l}, "" - } - rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) - } - count++ - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" - } - l = <-c - } - return rr, nil, "" -} - -func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HIP) - rr.Hdr = h - - // HitLength is not represented - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" - } - rr.PublicKeyAlgorithm = uint8(i) - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP Hit", l}, "" - } - rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. - rr.HitLength = uint8(len(rr.Hit)) / 2 - - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP PublicKey", l}, "" - } - rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) - - // RendezvousServers (if any) - l = <-c - var xs []string - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - if l.token == "@" { - xs = append(xs, o) - l = <-c - continue - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - if l.token[l.length-1] != '.' { - l.token = appendOrigin(l.token, o) - } - xs = append(xs, l.token) - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - l = <-c - } - rr.RendezvousServers = xs - return rr, nil, l.comment -} - -func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CERT) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if v, ok := StringToCertType[l.token]; ok { - rr.Type = v - } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { - return nil, &ParseError{f, "bad CERT Type", l}, "" - } else { - rr.Type = uint16(i) - } - <-c // zBlank - l = <-c // zString - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad CERT KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c // zString - if v, ok := StringToAlgorithm[l.token]; ok { - rr.Algorithm = v - } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - return nil, &ParseError{f, "bad CERT Algorithm", l}, "" - } else { - rr.Algorithm = uint8(i) - } - s, e1, c1 := endingToString(c, "bad CERT Certificate", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(OPENPGPKEY) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setRRSIG(h, c, o, f) - if r != nil { - return &SIG{*r.(*RRSIG)}, e, s - } - return nil, e, s -} - -func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RRSIG) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if t, ok := StringToType[l.tokenUpper]; !ok { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok = typeToInt(l.tokenUpper) - if !ok { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - rr.TypeCovered = t - } else { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - } else { - rr.TypeCovered = t - } - <-c // zBlank - l = <-c - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Labels", l}, "" - } - rr.Labels = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.ParseUint(l.token, 10, 32) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" - } - rr.OrigTtl = uint32(i) - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below - rr.Expiration = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" - } - } else { - rr.Expiration = i - } - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - rr.Inception = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Inception", l}, "" - } - } else { - rr.Inception = i - } - <-c // zBlank - l = <-c - i, err = strconv.ParseUint(l.token, 10, 16) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - rr.SignerName = l.token - if l.token == "@" { - rr.SignerName = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" - } - if rr.SignerName[l.length-1] != '.' { - rr.SignerName = appendOrigin(rr.SignerName, o) - } - } - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) - if e != nil { - return nil, e, c1 - } - rr.Signature = s - return rr, nil, c1 -} - -func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC) - rr.Hdr = h - - l := <-c - rr.NextDomain = l.token - if l.length == 0 { - return rr, nil, l.comment - } - if l.token == "@" { - rr.NextDomain = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" - } - if rr.NextDomain[l.length-1] != '.' { - rr.NextDomain = appendOrigin(rr.NextDomain, o) - } - } - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" - } - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - - <-c - l = <-c - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" - } - rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) - rr.NextDomain = l.token - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3PARAM) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - return rr, nil, "" -} - -func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI48) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 17 || l.err { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - addr := make([]byte, 12) - dash := 0 - for i := 0; i < 10; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - } - addr[10] = l.token[15] - addr[11] = l.token[16] - - i, e := strconv.ParseUint(string(addr), 16, 48) - if e != nil { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - rr.Address = i - return rr, nil, "" -} - -func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 23 || l.err { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - addr := make([]byte, 16) - dash := 0 - for i := 0; i < 14; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - } - addr[14] = l.token[21] - addr[15] = l.token[22] - - i, e := strconv.ParseUint(string(addr), 16, 64) - if e != nil { - return nil, &ParseError{f, "bad EUI68 Address", l}, "" - } - rr.Address = uint64(i) - return rr, nil, "" -} - -func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SSHFP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Type", l}, "" - } - rr.Type = uint8(i) - <-c // zBlank - s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) - if e1 != nil { - return nil, e1, c1 - } - rr.FingerPrint = s - return rr, nil, "" -} - -func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DNSKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "KEY") - if r != nil { - return &KEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") - return r, e, s -} - -func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") - if r != nil { - return &CDNSKEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EID) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad EID Endpoint", f) - if e != nil { - return nil, e, c1 - } - rr.Endpoint = s - return rr, nil, c1 -} - -func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NIMLOC) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) - if e != nil { - return nil, e, c1 - } - rr.Locator = s - return rr, nil, c1 -} - -func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GPOS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - _, e := strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Longitude", l}, "" - } - rr.Longitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Latitude", l}, "" - } - rr.Latitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Altitude", l}, "" - } - rr.Altitude = l.token - return rr, nil, "" -} - -func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DS") - return r, e, s -} - -func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DLV") - if r != nil { - return &DLV{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "CDS") - if r != nil { - return &CDS{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad TA Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e, c1 := endingToString(c, "bad TA Digest", f) - if e != nil { - return nil, e.(*ParseError), c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TLSA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SMIMEA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RFC3597) - rr.Hdr = h - l := <-c - if l.token != "\\#" { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - <-c // zBlank - l = <-c - rdlength, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" - } - - s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) - if e1 != nil { - return nil, e1, c1 - } - if rdlength*2 != len(s) { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - rr.Rdata = s - return rr, nil, c1 -} - -func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SPF) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AVC) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TXT) - rr.Hdr = h - - // no zBlank reading here, because all this rdata is TXT - s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -// identical to setTXT -func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) - if e != nil { - return nil, e, "" - } - rr.ZSData = s - return rr, nil, c1 -} - -func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(URI) - rr.Hdr = h - - l := <-c - if l.length == 0 { // Dynamic updates. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Weight", l}, "" - } - rr.Weight = uint16(i) - - <-c // zBlank - s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) - if err != nil { - return nil, err, "" - } - if len(s) > 1 { - return nil, &ParseError{f, "bad URI Target", l}, "" - } - rr.Target = s[0] - return rr, nil, c1 -} - -func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - // awesome record to parse! - rr := new(DHCID) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad DHCID Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NID) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NID Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.NodeID = u - return rr, nil, "" -} - -func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L32) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad L32 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Locator32 = net.ParseIP(l.token) - if rr.Locator32 == nil || l.err { - return nil, &ParseError{f, "bad L32 Locator", l}, "" - } - return rr, nil, "" -} - -func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad LP Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Fqdn = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Fqdn = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad LP Fqdn", l}, "" - } - if rr.Fqdn[l.length-1] != '.' { - rr.Fqdn = appendOrigin(rr.Fqdn, o) - } - return rr, nil, "" -} - -func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad L64 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.Locator64 = u - return rr, nil, "" -} - -func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad UID Uid", l}, "" - } - rr.Uid = uint32(i) - return rr, nil, "" -} - -func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad GID Gid", l}, "" - } - rr.Gid = uint32(i) - return rr, nil, "" -} - -func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UINFO) - rr.Hdr = h - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) - if e != nil { - return nil, e, c1 - } - if ln := len(s); ln == 0 { - return rr, nil, c1 - } - rr.Uinfo = s[0] // silently discard anything after the first character-string - return rr, nil, c1 -} - -func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad PX Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Map822 = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Map822 = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PX Map822", l}, "" - } - if rr.Map822[l.length-1] != '.' { - rr.Map822 = appendOrigin(rr.Map822, o) - } - <-c // zBlank - l = <-c // zString - rr.Mapx400 = l.token - if l.token == "@" { - rr.Mapx400 = o - return rr, nil, "" - } - _, ok = IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PX Mapx400", l}, "" - } - if rr.Mapx400[l.length-1] != '.' { - rr.Mapx400 = appendOrigin(rr.Mapx400, o) - } - return rr, nil, "" -} - -func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CAA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad CAA Flag", l}, "" - } - rr.Flag = uint8(i) - - <-c // zBlank - l = <-c // zString - if l.value != zString { - return nil, &ParseError{f, "bad CAA Tag", l}, "" - } - rr.Tag = l.token - - <-c // zBlank - s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) - if e != nil { - return nil, e, "" - } - if len(s) > 1 { - return nil, &ParseError{f, "bad CAA Value", l}, "" - } - rr.Value = s[0] - return rr, nil, c1 -} - -var typeToparserFunc = map[uint16]parserFunc{ - TypeAAAA: {setAAAA, false}, - TypeAFSDB: {setAFSDB, false}, - TypeA: {setA, false}, - TypeCAA: {setCAA, true}, - TypeCDS: {setCDS, true}, - TypeCDNSKEY: {setCDNSKEY, true}, - TypeCERT: {setCERT, true}, - TypeCNAME: {setCNAME, false}, - TypeDHCID: {setDHCID, true}, - TypeDLV: {setDLV, true}, - TypeDNAME: {setDNAME, false}, - TypeKEY: {setKEY, true}, - TypeDNSKEY: {setDNSKEY, true}, - TypeDS: {setDS, true}, - TypeEID: {setEID, true}, - TypeEUI48: {setEUI48, false}, - TypeEUI64: {setEUI64, false}, - TypeGID: {setGID, false}, - TypeGPOS: {setGPOS, false}, - TypeHINFO: {setHINFO, true}, - TypeHIP: {setHIP, true}, - TypeKX: {setKX, false}, - TypeL32: {setL32, false}, - TypeL64: {setL64, false}, - TypeLOC: {setLOC, true}, - TypeLP: {setLP, false}, - TypeMB: {setMB, false}, - TypeMD: {setMD, false}, - TypeMF: {setMF, false}, - TypeMG: {setMG, false}, - TypeMINFO: {setMINFO, false}, - TypeMR: {setMR, false}, - TypeMX: {setMX, false}, - TypeNAPTR: {setNAPTR, false}, - TypeNID: {setNID, false}, - TypeNIMLOC: {setNIMLOC, true}, - TypeNINFO: {setNINFO, true}, - TypeNSAPPTR: {setNSAPPTR, false}, - TypeNSEC3PARAM: {setNSEC3PARAM, false}, - TypeNSEC3: {setNSEC3, true}, - TypeNSEC: {setNSEC, true}, - TypeNS: {setNS, false}, - TypeOPENPGPKEY: {setOPENPGPKEY, true}, - TypePTR: {setPTR, false}, - TypePX: {setPX, false}, - TypeSIG: {setSIG, true}, - TypeRKEY: {setRKEY, true}, - TypeRP: {setRP, false}, - TypeRRSIG: {setRRSIG, true}, - TypeRT: {setRT, false}, - TypeSMIMEA: {setSMIMEA, true}, - TypeSOA: {setSOA, false}, - TypeSPF: {setSPF, true}, - TypeAVC: {setAVC, true}, - TypeSRV: {setSRV, false}, - TypeSSHFP: {setSSHFP, true}, - TypeTALINK: {setTALINK, false}, - TypeTA: {setTA, true}, - TypeTLSA: {setTLSA, true}, - TypeTXT: {setTXT, true}, - TypeUID: {setUID, false}, - TypeUINFO: {setUINFO, true}, - TypeURI: {setURI, true}, - TypeX25: {setX25, false}, -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scanner.go deleted file mode 100644 index c29bc2f38..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/scanner.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -// Implement a simple scanner, return a byte stream from an io reader. - -import ( - "bufio" - "io" - "text/scanner" -) - -type scan struct { - src *bufio.Reader - position scanner.Position - eof bool // Have we just seen a eof -} - -func scanInit(r io.Reader) *scan { - s := new(scan) - s.src = bufio.NewReader(r) - s.position.Line = 1 - return s -} - -// tokenText returns the next byte from the input -func (s *scan) tokenText() (byte, error) { - c, err := s.src.ReadByte() - if err != nil { - return c, err - } - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if s.eof == true { - s.position.Line++ - s.position.Column = 0 - s.eof = false - } - if c == '\n' { - s.eof = true - return c, nil - } - s.position.Column++ - return c, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server.go deleted file mode 100644 index 0ca6e008c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server.go +++ /dev/null @@ -1,734 +0,0 @@ -// DNS server implementation. - -package dns - -import ( - "bytes" - "crypto/tls" - "encoding/binary" - "io" - "net" - "sync" - "time" -) - -// Maximum number of TCP queries before we close the socket. -const maxTCPQueries = 128 - -// Handler is implemented by any value that implements ServeDNS. -type Handler interface { - ServeDNS(w ResponseWriter, r *Msg) -} - -// A ResponseWriter interface is used by an DNS handler to -// construct an DNS response. -type ResponseWriter interface { - // LocalAddr returns the net.Addr of the server - LocalAddr() net.Addr - // RemoteAddr returns the net.Addr of the client that sent the current request. - RemoteAddr() net.Addr - // WriteMsg writes a reply back to the client. - WriteMsg(*Msg) error - // Write writes a raw buffer back to the client. - Write([]byte) (int, error) - // Close closes the connection. - Close() error - // TsigStatus returns the status of the Tsig. - TsigStatus() error - // TsigTimersOnly sets the tsig timers only boolean. - TsigTimersOnly(bool) - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the DNS package will not do anything with the connection. - Hijack() -} - -type response struct { - hijacked bool // connection has been hijacked by handler - tsigStatus error - tsigTimersOnly bool - tsigRequestMAC string - tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used - tcp net.Conn // i/o connection if TCP was used - udpSession *SessionUDP // oob data to get egress interface right - remoteAddr net.Addr // address of the client - writer Writer // writer to output the raw DNS bits -} - -// ServeMux is an DNS request multiplexer. It matches the -// zone name of each incoming request against a list of -// registered patterns add calls the handler for the pattern -// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning -// that queries for the DS record are redirected to the parent zone (if that -// is also registered), otherwise the child gets the query. -// ServeMux is also safe for concurrent access from multiple goroutines. -type ServeMux struct { - z map[string]Handler - m *sync.RWMutex -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as DNS handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Msg) - -// ServeDNS calls f(w, r). -func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { - f(w, r) -} - -// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. -func HandleFailed(w ResponseWriter, r *Msg) { - m := new(Msg) - m.SetRcode(r, RcodeServerFailure) - // does not matter if this write fails - w.WriteMsg(m) -} - -func failedHandler() Handler { return HandlerFunc(HandleFailed) } - -// ListenAndServe Starts a server on address and network specified Invoke handler -// for incoming queries. -func ListenAndServe(addr string, network string, handler Handler) error { - server := &Server{Addr: addr, Net: network, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in -// http://golang.org/pkg/net/http/#ListenAndServeTLS -func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - server := &Server{ - Addr: addr, - Net: "tcp-tls", - TLSConfig: &config, - Handler: handler, - } - - return server.ListenAndServe() -} - -// ActivateAndServe activates a server with a listener from systemd, -// l and p should not both be non-nil. -// If both l and p are not nil only p will be used. -// Invoke handler for incoming queries. -func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { - server := &Server{Listener: l, PacketConn: p, Handler: handler} - return server.ActivateAndServe() -} - -func (mux *ServeMux) match(q string, t uint16) Handler { - mux.m.RLock() - defer mux.m.RUnlock() - var handler Handler - b := make([]byte, len(q)) // worst case, one label of length q - off := 0 - end := false - for { - l := len(q[off:]) - for i := 0; i < l; i++ { - b[i] = q[off+i] - if b[i] >= 'A' && b[i] <= 'Z' { - b[i] |= ('a' - 'A') - } - } - if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key - if t != TypeDS { - return h - } - // Continue for DS to see if we have a parent too, if so delegeate to the parent - handler = h - } - off, end = NextLabel(q, off) - if end { - break - } - } - // Wildcard match, if we have found nothing try the root zone as a last resort. - if h, ok := mux.z["."]; ok { - return h - } - return handler -} - -// Handle adds a handler to the ServeMux for pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - mux.z[Fqdn(pattern)] = handler - mux.m.Unlock() -} - -// HandleFunc adds a handler function to the ServeMux for pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// HandleRemove deregistrars the handler specific for pattern from the ServeMux. -func (mux *ServeMux) HandleRemove(pattern string) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - delete(mux.z, Fqdn(pattern)) - mux.m.Unlock() -} - -// ServeDNS dispatches the request to the handler whose -// pattern most closely matches the request message. If DefaultServeMux -// is used the correct thing for DS queries is done: a possible parent -// is sought. -// If no handler is found a standard SERVFAIL message is returned -// If the request message does not have exactly one question in the -// question section a SERVFAIL is returned, unlesss Unsafe is true. -func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { - var h Handler - if len(request.Question) < 1 { // allow more than one question - h = failedHandler() - } else { - if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { - h = failedHandler() - } - } - h.ServeDNS(w, request) -} - -// Handle registers the handler with the given pattern -// in the DefaultServeMux. The documentation for -// ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleRemove deregisters the handle with the given pattern -// in the DefaultServeMux. -func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } - -// HandleFunc registers the handler function with the given pattern -// in the DefaultServeMux. -func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// Writer writes raw DNS messages; each call to Write should send an entire message. -type Writer interface { - io.Writer -} - -// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. -type Reader interface { - // ReadTCP reads a raw message from a TCP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) - // ReadUDP reads a raw message from a UDP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) -} - -// defaultReader is an adapter for the Server struct that implements the Reader interface -// using the readTCP and readUDP func of the embedded Server. -type defaultReader struct { - *Server -} - -func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - return dr.readTCP(conn, timeout) -} - -func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - return dr.readUDP(conn, timeout) -} - -// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. -// Implementations should never return a nil Reader. -type DecorateReader func(Reader) Reader - -// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. -// Implementations should never return a nil Writer. -type DecorateWriter func(Writer) Writer - -// A Server defines parameters for running an DNS server. -type Server struct { - // Address to listen on, ":dns" if empty. - Addr string - // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one - Net string - // TCP Listener to use, this is to aid in systemd's socket activation. - Listener net.Listener - // TLS connection configuration - TLSConfig *tls.Config - // UDP "Listener" to use, this is to aid in systemd's socket activation. - PacketConn net.PacketConn - // Handler to invoke, dns.DefaultServeMux if nil. - Handler Handler - // Default buffer size to use to read incoming UDP messages. If not set - // it defaults to MinMsgSize (512 B). - UDPSize int - // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. - ReadTimeout time.Duration - // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. - WriteTimeout time.Duration - // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). - IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. - TsigSecret map[string]string - // Unsafe instructs the server to disregard any sanity checks and directly hand the message to - // the handler. It will specifically not check if the query has the QR bit not set. - Unsafe bool - // If NotifyStartedFunc is set it is called once the server has started listening. - NotifyStartedFunc func() - // DecorateReader is optional, allows customization of the process that reads raw DNS messages. - DecorateReader DecorateReader - // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. - DecorateWriter DecorateWriter - - // Graceful shutdown handling - - inFlight sync.WaitGroup - - lock sync.RWMutex - started bool -} - -// ListenAndServe starts a nameserver on the configured address in *Server. -func (srv *Server) ListenAndServe() error { - srv.lock.Lock() - defer srv.lock.Unlock() - if srv.started { - return &Error{err: "server already started"} - } - addr := srv.Addr - if addr == "" { - addr = ":domain" - } - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - switch srv.Net { - case "tcp", "tcp4", "tcp6": - a, err := net.ResolveTCPAddr(srv.Net, addr) - if err != nil { - return err - } - l, err := net.ListenTCP(srv.Net, a) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - srv.lock.Unlock() - err = srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - case "tcp-tls", "tcp4-tls", "tcp6-tls": - network := "tcp" - if srv.Net == "tcp4-tls" { - network = "tcp4" - } else if srv.Net == "tcp6-tls" { - network = "tcp6" - } - - l, err := tls.Listen(network, addr, srv.TLSConfig) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - srv.lock.Unlock() - err = srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - case "udp", "udp4", "udp6": - a, err := net.ResolveUDPAddr(srv.Net, addr) - if err != nil { - return err - } - l, err := net.ListenUDP(srv.Net, a) - if err != nil { - return err - } - if e := setUDPSocketOptions(l); e != nil { - return e - } - srv.PacketConn = l - srv.started = true - srv.lock.Unlock() - err = srv.serveUDP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - } - return &Error{err: "bad network"} -} - -// ActivateAndServe starts a nameserver with the PacketConn or Listener -// configured in *Server. Its main use is to start a server from systemd. -func (srv *Server) ActivateAndServe() error { - srv.lock.Lock() - defer srv.lock.Unlock() - if srv.started { - return &Error{err: "server already started"} - } - pConn := srv.PacketConn - l := srv.Listener - if pConn != nil { - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - // Check PacketConn interface's type is valid and value - // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { - if e := setUDPSocketOptions(t); e != nil { - return e - } - srv.started = true - srv.lock.Unlock() - e := srv.serveUDP(t) - srv.lock.Lock() // to satisfy the defer at the top - return e - } - } - if l != nil { - srv.started = true - srv.lock.Unlock() - e := srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return e - } - return &Error{err: "bad listeners"} -} - -// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. All in progress queries are completed before the server -// is taken down. If the Shutdown is taking longer than the reading timeout an error -// is returned. -func (srv *Server) Shutdown() error { - srv.lock.Lock() - if !srv.started { - srv.lock.Unlock() - return &Error{err: "server not started"} - } - srv.started = false - srv.lock.Unlock() - - if srv.PacketConn != nil { - srv.PacketConn.Close() - } - if srv.Listener != nil { - srv.Listener.Close() - } - - fin := make(chan bool) - go func() { - srv.inFlight.Wait() - fin <- true - }() - - select { - case <-time.After(srv.getReadTimeout()): - return &Error{err: "server shutdown is pending"} - case <-fin: - return nil - } -} - -// getReadTimeout is a helper func to use system timeout if server did not intend to change it. -func (srv *Server) getReadTimeout() time.Duration { - rtimeout := dnsTimeout - if srv.ReadTimeout != 0 { - rtimeout = srv.ReadTimeout - } - return rtimeout -} - -// serveTCP starts a TCP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveTCP(l net.Listener) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - rw, err := l.Accept() - if err != nil { - if neterr, ok := err.(net.Error); ok && neterr.Temporary() { - continue - } - return err - } - m, err := reader.ReadTCP(rw, rtimeout) - srv.lock.RLock() - if !srv.started { - srv.lock.RUnlock() - return nil - } - srv.lock.RUnlock() - if err != nil { - continue - } - srv.inFlight.Add(1) - go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) - } -} - -// serveUDP starts a UDP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveUDP(l *net.UDPConn) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - m, s, err := reader.ReadUDP(l, rtimeout) - srv.lock.RLock() - if !srv.started { - srv.lock.RUnlock() - return nil - } - srv.lock.RUnlock() - if err != nil { - continue - } - srv.inFlight.Add(1) - go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) - } -} - -// Serve a new connection. -func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { - defer srv.inFlight.Done() - - w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} - if srv.DecorateWriter != nil { - w.writer = srv.DecorateWriter(w) - } else { - w.writer = w - } - - q := 0 // counter for the amount of TCP queries we get - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } -Redo: - req := new(Msg) - err := req.Unpack(m) - if err != nil { // Send a FormatError back - x := new(Msg) - x.SetRcodeFormatError(req) - w.WriteMsg(x) - goto Exit - } - if !srv.Unsafe && req.Response { - goto Exit - } - - w.tsigStatus = nil - if w.tsigSecret != nil { - if t := req.IsTsig(); t != nil { - secret := t.Hdr.Name - if _, ok := w.tsigSecret[secret]; !ok { - w.tsigStatus = ErrKeyAlg - } - w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) - w.tsigTimersOnly = false - w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC - } - } - h.ServeDNS(w, req) // Writes back to the client - -Exit: - if w.tcp == nil { - return - } - // TODO(miek): make this number configurable? - if q > maxTCPQueries { // close socket after this many queries - w.Close() - return - } - - if w.hijacked { - return // client calls Close() - } - if u != nil { // UDP, "close" and return - w.Close() - return - } - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - m, err = reader.ReadTCP(w.tcp, idleTimeout) - if err == nil { - q++ - goto Redo - } - w.Close() - return -} - -func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - l := make([]byte, 2) - n, err := conn.Read(l) - if err != nil || n != 2 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - length := binary.BigEndian.Uint16(l) - if length == 0 { - return nil, ErrShortRead - } - m := make([]byte, int(length)) - n, err = conn.Read(m[:int(length)]) - if err != nil || n == 0 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - i := n - for i < int(length) { - j, err := conn.Read(m[i:int(length)]) - if err != nil { - return nil, err - } - i += j - } - n = i - m = m[:n] - return m, nil -} - -func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - m := make([]byte, srv.UDPSize) - n, s, err := ReadFromSessionUDP(conn, m) - if err != nil || n == 0 { - if err != nil { - return nil, nil, err - } - return nil, nil, ErrShortRead - } - m = m[:n] - return m, s, nil -} - -// WriteMsg implements the ResponseWriter.WriteMsg method. -func (w *response) WriteMsg(m *Msg) (err error) { - var data []byte - if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) - if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err - } - } - data, err = m.Pack() - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err -} - -// Write implements the ResponseWriter.Write method. -func (w *response) Write(m []byte) (int, error) { - switch { - case w.udp != nil: - n, err := WriteToSessionUDP(w.udp, m, w.udpSession) - return n, err - case w.tcp != nil: - lm := len(m) - if lm < 2 { - return 0, io.ErrShortBuffer - } - if lm > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, 2+lm) - binary.BigEndian.PutUint16(l, uint16(lm)) - m = append(l, m...) - - n, err := io.Copy(w.tcp, bytes.NewReader(m)) - return int(n), err - } - panic("not reached") -} - -// LocalAddr implements the ResponseWriter.LocalAddr method. -func (w *response) LocalAddr() net.Addr { - if w.tcp != nil { - return w.tcp.LocalAddr() - } - return w.udp.LocalAddr() -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } - -// TsigStatus implements the ResponseWriter.TsigStatus method. -func (w *response) TsigStatus() error { return w.tsigStatus } - -// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. -func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } - -// Hijack implements the ResponseWriter.Hijack method. -func (w *response) Hijack() { w.hijacked = true } - -// Close implements the ResponseWriter.Close method -func (w *response) Close() error { - // Can't close the udp conn, as that is actually the listener. - if w.tcp != nil { - e := w.tcp.Close() - w.tcp = nil - return e - } - return nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server_test.go deleted file mode 100644 index f17a2f90f..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/server_test.go +++ /dev/null @@ -1,719 +0,0 @@ -package dns - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "runtime" - "sync" - "testing" - "time" -) - -func HelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func HelloServerBadID(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Id++ - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func AnotherHelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}} - w.WriteMsg(m) -} - -func RunLocalUDPServer(laddr string) (*Server, string, error) { - server, l, _, err := RunLocalUDPServerWithFinChan(laddr) - - return server, l, err -} - -func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", nil, err - } - server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - fin := make(chan struct{}, 0) - - go func() { - server.ActivateAndServe() - close(fin) - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), fin, nil -} - -func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &Server{PacketConn: pc, Unsafe: true, - ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} - -func RunLocalTCPServer(laddr string) (*Server, string, error) { - l, err := net.Listen("tcp", laddr) - if err != nil { - return nil, "", err - } - - server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) { - l, err := tls.Listen("tcp", laddr, config) - if err != nil { - return nil, "", err - } - - server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -func TestServing(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("miek.nl.") - defer HandleRemove("example.com.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatal("failed to exchange miek.nl", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello world" { - t.Error("unexpected result for miek.nl", txt, "!= Hello world") - } - - m.SetQuestion("example.com.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - - // Test Mixes cased as noticed by Ask. - m.SetQuestion("eXaMplE.cOm.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Error("failed to exchange eXaMplE.cOm", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } -} - -func TestServingTLS(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("miek.nl.") - defer HandleRemove("example.com.") - - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - c.Net = "tcp-tls" - c.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatal("failed to exchange miek.nl", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello world" { - t.Error("unexpected result for miek.nl", txt, "!= Hello world") - } - - m.SetQuestion("example.com.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - - // Test Mixes cased as noticed by Ask. - m.SetQuestion("eXaMplE.cOm.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Error("failed to exchange eXaMplE.cOm", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } -} - -func BenchmarkServe(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func benchmarkServe6(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("[::1]:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func HelloServerCompress(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - m.Compress = true - w.WriteMsg(m) -} - -func BenchmarkServeCompress(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServerCompress) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func TestDotAsCatchAllWildcard(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - mux.Handle("example.com.", HandlerFunc(AnotherHelloServer)) - - handler := mux.match("www.miek.nl.", TypeTXT) - if handler == nil { - t.Error("wildcard match failed") - } - - handler = mux.match("www.example.com.", TypeTXT) - if handler == nil { - t.Error("example.com match failed") - } - - handler = mux.match("a.www.example.com.", TypeTXT) - if handler == nil { - t.Error("a.www.example.com match failed") - } - - handler = mux.match("boe.", TypeTXT) - if handler == nil { - t.Error("boe. match failed") - } -} - -func TestCaseFolding(t *testing.T) { - mux := NewServeMux() - mux.Handle("_udp.example.com.", HandlerFunc(HelloServer)) - - handler := mux.match("_dns._udp.example.com.", TypeSRV) - if handler == nil { - t.Error("case sensitive characters folded") - } - - handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV) - if handler == nil { - t.Error("case insensitive characters not folded") - } -} - -func TestRootServer(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - - handler := mux.match(".", TypeNS) - if handler == nil { - t.Error("root match failed") - } -} - -type maxRec struct { - max int - sync.RWMutex -} - -var M = new(maxRec) - -func HelloServerLargeResponse(resp ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Authoritative = true - m1 := 0 - M.RLock() - m1 = M.max - M.RUnlock() - for i := 0; i < m1; i++ { - aRec := &A{ - Hdr: RR_Header{ - Name: req.Question[0].Name, - Rrtype: TypeA, - Class: ClassINET, - Ttl: 0, - }, - A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(), - } - m.Answer = append(m.Answer, aRec) - } - resp.WriteMsg(m) -} - -func TestServingLargeResponses(t *testing.T) { - HandleFunc("example.", HelloServerLargeResponse) - defer HandleRemove("example.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - // Create request - m := new(Msg) - m.SetQuestion("web.service.example.", TypeANY) - - c := new(Client) - c.Net = "udp" - M.Lock() - M.max = 2 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - // This must fail - M.Lock() - M.max = 20 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Error("failed to fail exchange, this should generate packet error") - } - // But this must work again - c.UDPSize = 7000 - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } -} - -func TestServingResponse(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - HandleFunc("miek.nl.", HelloServer) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - m.Response = false - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange", err) - } - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Fatal("exchanged response message") - } - - s.Shutdown() - s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("could exchanged response message in Unsafe mode") - } -} - -func TestShutdownTCP(t *testing.T) { - s, _, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test TCP server, %v", err) - } -} - -func TestShutdownTLS(t *testing.T) { - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, _, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test TLS server, %v", err) - } -} - -type trigger struct { - done bool - sync.RWMutex -} - -func (t *trigger) Set() { - t.Lock() - defer t.Unlock() - t.done = true -} -func (t *trigger) Get() bool { - t.RLock() - defer t.RUnlock() - return t.done -} - -func TestHandlerCloseTCP(t *testing.T) { - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - addr := ln.Addr().String() - - server := &Server{Addr: addr, Net: "tcp", Listener: ln} - - hname := "testhandlerclosetcp." - triggered := &trigger{} - HandleFunc(hname, func(w ResponseWriter, r *Msg) { - triggered.Set() - w.Close() - }) - defer HandleRemove(hname) - - go func() { - defer server.Shutdown() - c := &Client{Net: "tcp"} - m := new(Msg).SetQuestion(hname, 1) - tries := 0 - exchange: - _, _, err := c.Exchange(m, addr) - if err != nil && err != io.EOF { - t.Logf("exchange failed: %s\n", err) - if tries == 3 { - return - } - time.Sleep(time.Second / 10) - tries++ - goto exchange - } - }() - server.ActivateAndServe() - if !triggered.Get() { - t.Fatalf("handler never called") - } -} - -func TestShutdownUDP(t *testing.T) { - s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test UDP server, %v", err) - } - select { - case <-fin: - case <-time.After(2 * time.Second): - t.Error("Could not shutdown test UDP server. Gave up waiting") - } -} - -type ExampleFrameLengthWriter struct { - Writer -} - -func (e *ExampleFrameLengthWriter) Write(m []byte) (int, error) { - fmt.Println("writing raw DNS message of length", len(m)) - return e.Writer.Write(m) -} - -func ExampleDecorateWriter() { - // instrument raw DNS message writing - wf := DecorateWriter(func(w Writer) Writer { - return &ExampleFrameLengthWriter{w} - }) - - // simple UDP server - pc, err := net.ListenPacket("udp", "127.0.0.1:0") - if err != nil { - fmt.Println(err.Error()) - return - } - server := &Server{ - PacketConn: pc, - DecorateWriter: wf, - ReadTimeout: time.Hour, WriteTimeout: time.Hour, - } - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - defer server.Shutdown() - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - - HandleFunc("miek.nl.", HelloServer) - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - _, _, err = c.Exchange(m, pc.LocalAddr().String()) - if err != nil { - fmt.Println("failed to exchange", err.Error()) - return - } - // Output: writing raw DNS message of length 56 -} - -var ( - // CertPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) - CertPEMBlock = []byte(`-----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIRAJFYMkcn+b8dpU15wjf++GgwDQYJKoZIhvcNAQELBQAw -EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjAxMDgxMjAzNTNaFw0xNzAxMDcxMjAz -NTNaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDXjqO6skvP03k58CNjQggd9G/mt+Wa+xRU+WXiKCCHttawM8x+slq5 -yfsHCwxlwsGn79HmJqecNqgHb2GWBXAvVVokFDTcC1hUP4+gp2gu9Ny27UHTjlLm -O0l/xZ5MN8tfKyYlFw18tXu3fkaPyHj8v/D1RDkuo4ARdFvGSe8TqisbhLk2+9ow -xfIGbEM9Fdiw8qByC2+d+FfvzIKz3GfQVwn0VoRom8L6NBIANq1IGrB5JefZB6nv -DnfuxkBmY7F1513HKuEJ8KsLWWZWV9OPU4j4I4Rt+WJNlKjbD2srHxyrS2RDsr91 -8nCkNoWVNO3sZq0XkWKecdc921vL4ginAgMBAAGjVDBSMA4GA1UdDwEB/wQEAwIC -pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBoGA1UdEQQT -MBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAGcU3iyLBIVZj -aDzSvEDHUd1bnLBl1C58Xu/CyKlPqVU7mLfK0JcgEaYQTSX6fCJVNLbbCrcGLsPJ -fbjlBbyeLjTV413fxPVuona62pBFjqdtbli2Qe8FRH2KBdm41JUJGdo+SdsFu7nc -BFOcubdw6LLIXvsTvwndKcHWx1rMX709QU1Vn1GAIsbJV/DWI231Jyyb+lxAUx/C -8vce5uVxiKcGS+g6OjsN3D3TtiEQGSXLh013W6Wsih8td8yMCMZ3w8LQ38br1GUe -ahLIgUJ9l6HDguM17R7kGqxNvbElsMUHfTtXXP7UDQUiYXDakg8xDP6n9DCDhJ8Y -bSt7OLB7NQ== ------END CERTIFICATE-----`) - - // KeyPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) - KeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA146jurJLz9N5OfAjY0IIHfRv5rflmvsUVPll4iggh7bWsDPM -frJaucn7BwsMZcLBp+/R5iannDaoB29hlgVwL1VaJBQ03AtYVD+PoKdoLvTctu1B -045S5jtJf8WeTDfLXysmJRcNfLV7t35Gj8h4/L/w9UQ5LqOAEXRbxknvE6orG4S5 -NvvaMMXyBmxDPRXYsPKgcgtvnfhX78yCs9xn0FcJ9FaEaJvC+jQSADatSBqweSXn -2Qep7w537sZAZmOxdeddxyrhCfCrC1lmVlfTj1OI+COEbfliTZSo2w9rKx8cq0tk -Q7K/dfJwpDaFlTTt7GatF5FinnHXPdtby+IIpwIDAQABAoIBAAJK4RDmPooqTJrC -JA41MJLo+5uvjwCT9QZmVKAQHzByUFw1YNJkITTiognUI0CdzqNzmH7jIFs39ZeG -proKusO2G6xQjrNcZ4cV2fgyb5g4QHStl0qhs94A+WojduiGm2IaumAgm6Mc5wDv -ld6HmknN3Mku/ZCyanVFEIjOVn2WB7ZQLTBs6ZYaebTJG2Xv6p9t2YJW7pPQ9Xce -s9ohAWohyM4X/OvfnfnLtQp2YLw/BxwehBsCR5SXM3ibTKpFNtxJC8hIfTuWtxZu -2ywrmXShYBRB1WgtZt5k04bY/HFncvvcHK3YfI1+w4URKtwdaQgPUQRbVwDwuyBn -flfkCJECgYEA/eWt01iEyE/lXkGn6V9lCocUU7lCU6yk5UT8VXVUc5If4KZKPfCk -p4zJDOqwn2eM673aWz/mG9mtvAvmnugaGjcaVCyXOp/D/GDmKSoYcvW5B/yjfkLy -dK6Yaa5LDRVYlYgyzcdCT5/9Qc626NzFwKCZNI4ncIU8g7ViATRxWJ8CgYEA2Ver -vZ0M606sfgC0H3NtwNBxmuJ+lIF5LNp/wDi07lDfxRR1rnZMX5dnxjcpDr/zvm8J -WtJJX3xMgqjtHuWKL3yKKony9J5ZPjichSbSbhrzfovgYIRZLxLLDy4MP9L3+CX/ -yBXnqMWuSnFX+M5fVGxdDWiYF3V+wmeOv9JvavkCgYEAiXAPDFzaY+R78O3xiu7M -r0o3wqqCMPE/wav6O/hrYrQy9VSO08C0IM6g9pEEUwWmzuXSkZqhYWoQFb8Lc/GI -T7CMXAxXQLDDUpbRgG79FR3Wr3AewHZU8LyiXHKwxcBMV4WGmsXGK3wbh8fyU1NO -6NsGk+BvkQVOoK1LBAPzZ1kCgYEAsBSmD8U33T9s4dxiEYTrqyV0lH3g/SFz8ZHH -pAyNEPI2iC1ONhyjPWKlcWHpAokiyOqeUpVBWnmSZtzC1qAydsxYB6ShT+sl9BHb -RMix/QAauzBJhQhUVJ3OIys0Q1UBDmqCsjCE8SfOT4NKOUnA093C+YT+iyrmmktZ -zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb -7+a1OMhLwWbuz+pl64wFKrbSUyimMOYQpjVE/1vk/kb99pxbgol27hdKyTH1d+ov -kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA= ------END RSA PRIVATE KEY-----`) -) - -func testShutdownBindPort(t *testing.T, protocol string, port string) { - handler := NewServeMux() - handler.HandleFunc(".", func(w ResponseWriter, r *Msg) {}) - startedCh := make(chan struct{}) - s := &Server{ - Addr: net.JoinHostPort("127.0.0.1", port), - Net: protocol, - Handler: handler, - NotifyStartedFunc: func() { - startedCh <- struct{}{} - }, - } - go func() { - if err := s.ListenAndServe(); err != nil { - t.Log(err) - } - }() - <-startedCh - t.Logf("DNS server is started on: %s", s.Addr) - if err := s.Shutdown(); err != nil { - t.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - go func() { - if err := s.ListenAndServe(); err != nil { - t.Fatal(err) - } - }() - <-startedCh - t.Logf("DNS server is started on: %s", s.Addr) -} - -func TestShutdownBindPortUDP(t *testing.T) { - testShutdownBindPort(t, "udp", "1153") -} - -func TestShutdownBindPortTCP(t *testing.T) { - testShutdownBindPort(t, "tcp", "1154") -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0.go deleted file mode 100644 index f31e9e684..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0.go +++ /dev/null @@ -1,218 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "encoding/binary" - "math/big" - "strings" - "time" -) - -// Sign signs a dns.Msg. It fills the signature with the appropriate data. -// The SIG record should have the SignerName, KeyTag, Algorithm, Inception -// and Expiration set. -func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { - if k == nil { - return nil, ErrPrivKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return nil, ErrKey - } - rr.Header().Rrtype = TypeSIG - rr.Header().Class = ClassANY - rr.Header().Ttl = 0 - rr.Header().Name = "." - rr.OrigTtl = 0 - rr.TypeCovered = 0 - rr.Labels = 0 - - buf := make([]byte, m.Len()+rr.len()) - mbuf, err := m.PackBuffer(buf) - if err != nil { - return nil, err - } - if &buf[0] != &mbuf[0] { - return nil, ErrBuf - } - off, err := PackRR(rr, buf, len(mbuf), nil, false) - if err != nil { - return nil, err - } - buf = buf[:off:cap(buf)] - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return nil, ErrAlg - } - - hasher := hash.New() - // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) - // Write message - hasher.Write(buf[:len(mbuf)]) - - signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) - if err != nil { - return nil, err - } - - rr.Signature = toBase64(signature) - - buf = append(buf, signature...) - if len(buf) > int(^uint16(0)) { - return nil, ErrBuf - } - // Adjust sig data length - rdoff := len(mbuf) + 1 + 2 + 2 + 4 - rdlen := binary.BigEndian.Uint16(buf[rdoff:]) - rdlen += uint16(len(signature)) - binary.BigEndian.PutUint16(buf[rdoff:], rdlen) - // Adjust additional count - adc := binary.BigEndian.Uint16(buf[10:]) - adc++ - binary.BigEndian.PutUint16(buf[10:], adc) - return buf, nil -} - -// Verify validates the message buf using the key k. -// It's assumed that buf is a valid message from which rr was unpacked. -func (rr *SIG) Verify(k *KEY, buf []byte) error { - if k == nil { - return ErrKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() - - buflen := len(buf) - qdc := binary.BigEndian.Uint16(buf[4:]) - anc := binary.BigEndian.Uint16(buf[6:]) - auc := binary.BigEndian.Uint16(buf[8:]) - adc := binary.BigEndian.Uint16(buf[10:]) - offset := 12 - var err error - for i := uint16(0); i < qdc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type and Class - offset += 2 + 2 - } - for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type, Class and TTL - offset += 2 + 2 + 4 - if offset+1 >= buflen { - continue - } - var rdlen uint16 - rdlen = binary.BigEndian.Uint16(buf[offset:]) - offset += 2 - offset += int(rdlen) - } - if offset >= buflen { - return &Error{err: "overflowing unpacking signed message"} - } - - // offset should be just prior to SIG - bodyend := offset - // owner name SHOULD be root - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip Type, Class, TTL, RDLen - offset += 2 + 2 + 4 + 2 - sigstart := offset - // Skip Type Covered, Algorithm, Labels, Original TTL - offset += 2 + 1 + 1 + 4 - if offset+4+4 >= buflen { - return &Error{err: "overflow unpacking signed message"} - } - expire := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - incept := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - now := uint32(time.Now().Unix()) - if now < incept || now > expire { - return ErrTime - } - // Skip key tag - offset += 2 - var signername string - signername, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // If key has come from the DNS name compression might - // have mangled the case of the name - if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { - return &Error{err: "signer name doesn't match key name"} - } - sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ - byte((adc - 1) << 8), - byte(adc - 1), - }) - hasher.Write(buf[12:bodyend]) - - hashed := hasher.Sum(nil) - sig := buf[sigend:] - switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - case RSASHA1, RSASHA256, RSASHA512: - pk := k.publicKeyRSA() - if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) - } - case ECDSAP256SHA256, ECDSAP384SHA384: - pk := k.publicKeyECDSA() - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if ecdsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - } - return ErrKeyAlg -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0_test.go deleted file mode 100644 index 122de6a8e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/sig0_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package dns - -import ( - "crypto" - "testing" - "time" -) - -func TestSIG0(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - m := new(Msg) - m.SetQuestion("example.org.", TypeSOA) - for _, alg := range []uint8{ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { - algstr := AlgorithmToString[alg] - keyrr := new(KEY) - keyrr.Hdr.Name = algstr + "." - keyrr.Hdr.Rrtype = TypeKEY - keyrr.Hdr.Class = ClassINET - keyrr.Algorithm = alg - keysize := 1024 - switch alg { - case ECDSAP256SHA256: - keysize = 256 - case ECDSAP384SHA384: - keysize = 384 - } - pk, err := keyrr.Generate(keysize) - if err != nil { - t.Errorf("failed to generate key for “%sâ€: %v", algstr, err) - continue - } - now := uint32(time.Now().Unix()) - sigrr := new(SIG) - sigrr.Hdr.Name = "." - sigrr.Hdr.Rrtype = TypeSIG - sigrr.Hdr.Class = ClassANY - sigrr.Algorithm = alg - sigrr.Expiration = now + 300 - sigrr.Inception = now - 300 - sigrr.KeyTag = keyrr.KeyTag() - sigrr.SignerName = keyrr.Hdr.Name - mb, err := sigrr.Sign(pk.(crypto.Signer), m) - if err != nil { - t.Errorf("failed to sign message using “%sâ€: %v", algstr, err) - continue - } - m := new(Msg) - if err := m.Unpack(mb); err != nil { - t.Errorf("failed to unpack message signed using “%sâ€: %v", algstr, err) - continue - } - if len(m.Extra) != 1 { - t.Errorf("missing SIG for message signed using “%sâ€", algstr) - continue - } - var sigrrwire *SIG - switch rr := m.Extra[0].(type) { - case *SIG: - sigrrwire = rr - default: - t.Errorf("expected SIG RR, instead: %v", rr) - continue - } - for _, rr := range []*SIG{sigrr, sigrrwire} { - id := "sigrr" - if rr == sigrrwire { - id = "sigrrwire" - } - if err := rr.Verify(keyrr, mb); err != nil { - t.Errorf("failed to verify “%s†signed SIG(%s): %v", algstr, id, err) - continue - } - } - mb[13]++ - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("verify succeeded on an altered message using “%sâ€", algstr) - continue - } - sigrr.Expiration = 2 - sigrr.Inception = 1 - mb, _ = sigrr.Sign(pk.(crypto.Signer), m) - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("verify succeeded on an expired message using “%sâ€", algstr) - continue - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index 9573c7d0b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - g.Lock() - delete(g.m, key) - g.Unlock() - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/smimea.go deleted file mode 100644 index 4e7ded4b3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/smimea.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/x509" - "encoding/hex" -) - -// Sign creates a SMIMEA record from an SSL certificate. -func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeSMIMEA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a SMIMEA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *SMIMEA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// SMIMEAName returns the ownername of a SMIMEA resource record as per the -// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 -func SMIMEAName(email, domain string) (string, error) { - hasher := sha256.New() - hasher.Write([]byte(email)) - - // RFC Section 3: "The local-part is hashed using the SHA2-256 - // algorithm with the hash truncated to 28 octets and - // represented in its hexadecimal representation to become the - // left-most label in the prepared domain name" - return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tlsa.go deleted file mode 100644 index 431e2fb5a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tlsa.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/x509" - "net" - "strconv" -) - -// Sign creates a TLSA record from an SSL certificate. -func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeTLSA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a TLSA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *TLSA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// TLSAName returns the ownername of a TLSA resource record as per the -// rules specified in RFC 6698, Section 3. -func TLSAName(name, service, network string) (string, error) { - if !IsFqdn(name) { - return "", ErrFqdn - } - p, err := net.LookupPort(network, service) - if err != nil { - return "", err - } - return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig.go deleted file mode 100644 index 24013096b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig.go +++ /dev/null @@ -1,383 +0,0 @@ -package dns - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/binary" - "encoding/hex" - "hash" - "strconv" - "strings" - "time" -) - -// HMAC hashing codes. These are transmitted as domain names. -const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." - HmacSHA1 = "hmac-sha1." - HmacSHA256 = "hmac-sha256." - HmacSHA512 = "hmac-sha512." -) - -// TSIG is the RR the holds the transaction signature of a message. -// See RFC 2845 and RFC 4635. -type TSIG struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` - OrigId uint16 - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TSIG has no official presentation format, but this will suffice. - -func (rr *TSIG) String() string { - s := "\n;; TSIG PSEUDOSECTION:\n" - s += rr.Hdr.String() + - " " + rr.Algorithm + - " " + tsigTimeToString(rr.TimeSigned) + - " " + strconv.Itoa(int(rr.Fudge)) + - " " + strconv.Itoa(int(rr.MACSize)) + - " " + strings.ToUpper(rr.MAC) + - " " + strconv.Itoa(int(rr.OrigId)) + - " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -// The following values must be put in wireformat, so that the MAC can be calculated. -// RFC 2845, section 3.4.2. TSIG Variables. -type tsigWireFmt struct { - // From RR_Header - Name string `dns:"domain-name"` - Class uint16 - Ttl uint32 - // Rdata of the TSIG - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - // MACSize, MAC and OrigId excluded - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC -type macWireFmt struct { - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` -} - -// 3.3. Time values used in TSIG calculations -type timerWireFmt struct { - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 -} - -// TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. -func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - if m.IsTsig() == nil { - panic("dns: TSIG not last RR in additional") - } - // If we barf here, the caller is to blame - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return nil, "", err - } - - rr := m.Extra[len(m.Extra)-1].(*TSIG) - m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg - mbuf, err := m.Pack() - if err != nil { - return nil, "", err - } - buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) - - t := new(TSIG) - var h hash.Hash - switch strings.ToLower(rr.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, []byte(rawsecret)) - case HmacSHA1: - h = hmac.New(sha1.New, []byte(rawsecret)) - case HmacSHA256: - h = hmac.New(sha256.New, []byte(rawsecret)) - case HmacSHA512: - h = hmac.New(sha512.New, []byte(rawsecret)) - default: - return nil, "", ErrKeyAlg - } - h.Write(buf) - t.MAC = hex.EncodeToString(h.Sum(nil)) - t.MACSize = uint16(len(t.MAC) / 2) // Size is half! - - t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} - t.Fudge = rr.Fudge - t.TimeSigned = rr.TimeSigned - t.Algorithm = rr.Algorithm - t.OrigId = m.Id - - tbuf := make([]byte, t.len()) - if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { - tbuf = tbuf[:off] // reset to actual size used - } else { - return nil, "", err - } - mbuf = append(mbuf, tbuf...) - // Update the ArCount directly in the buffer. - binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) - - return mbuf, t.MAC, nil -} - -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. -func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return err - } - // Strip the TSIG from the incoming msg - stripped, tsig, err := stripTsig(msg) - if err != nil { - return err - } - - msgMAC, err := hex.DecodeString(tsig.MAC) - if err != nil { - return err - } - - buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - - // Fudge factor works both ways. A message can arrive before it was signed because - // of clock skew. - now := uint64(time.Now().Unix()) - ti := now - tsig.TimeSigned - if now < tsig.TimeSigned { - ti = tsig.TimeSigned - now - } - if uint64(tsig.Fudge) < ti { - return ErrTime - } - - var h hash.Hash - switch strings.ToLower(tsig.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return ErrKeyAlg - } - h.Write(buf) - if !hmac.Equal(h.Sum(nil), msgMAC) { - return ErrSig - } - return nil -} - -// Create a wiredata buffer for the MAC calculation. -func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { - var buf []byte - if rr.TimeSigned == 0 { - rr.TimeSigned = uint64(time.Now().Unix()) - } - if rr.Fudge == 0 { - rr.Fudge = 300 // Standard (RFC) default. - } - - if requestMAC != "" { - m := new(macWireFmt) - m.MACSize = uint16(len(requestMAC) / 2) - m.MAC = requestMAC - buf = make([]byte, len(requestMAC)) // long enough - n, _ := packMacWire(m, buf) - buf = buf[:n] - } - - tsigvar := make([]byte, DefaultMsgSize) - if timersOnly { - tsig := new(timerWireFmt) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - n, _ := packTimerWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } else { - tsig := new(tsigWireFmt) - tsig.Name = strings.ToLower(rr.Hdr.Name) - tsig.Class = ClassANY - tsig.Ttl = rr.Hdr.Ttl - tsig.Algorithm = strings.ToLower(rr.Algorithm) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - tsig.Error = rr.Error - tsig.OtherLen = rr.OtherLen - tsig.OtherData = rr.OtherData - n, _ := packTsigWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } - - if requestMAC != "" { - x := append(buf, msgbuf...) - buf = append(x, tsigvar...) - } else { - buf = append(msgbuf, tsigvar...) - } - return buf -} - -// Strip the TSIG from the raw message. -func stripTsig(msg []byte) ([]byte, *TSIG, error) { - // Copied from msg.go's Unpack() Header, but modified. - var ( - dh Header - err error - ) - off, tsigoff := 0, 0 - - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return nil, nil, err - } - if dh.Arcount == 0 { - return nil, nil, ErrNoSig - } - - // Rcode, see msg.go Unpack() - if int(dh.Bits&0xF) == RcodeNotAuth { - return nil, nil, ErrAuth - } - - for i := 0; i < int(dh.Qdcount); i++ { - _, off, err = unpackQuestion(msg, off) - if err != nil { - return nil, nil, err - } - } - - _, off, err = unpackRRslice(int(dh.Ancount), msg, off) - if err != nil { - return nil, nil, err - } - _, off, err = unpackRRslice(int(dh.Nscount), msg, off) - if err != nil { - return nil, nil, err - } - - rr := new(TSIG) - var extra RR - for i := 0; i < int(dh.Arcount); i++ { - tsigoff = off - extra, off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - if extra.Header().Rrtype == TypeTSIG { - rr = extra.(*TSIG) - // Adjust Arcount. - arcount := binary.BigEndian.Uint16(msg[10:]) - binary.BigEndian.PutUint16(msg[10:], arcount-1) - break - } - } - if rr == nil { - return nil, nil, ErrNoSig - } - return msg[:tsigoff], rr, nil -} - -// Translate the TSIG time signed into a date. There is no -// need for RFC1982 calculations as this date is 48 bits. -func tsigTimeToString(t uint64) string { - ti := time.Unix(int64(t), 0).UTC() - return ti.Format("20060102150405") -} - -func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go TSIG packing - // RR_Header - off, err := PackDomainName(tw.Name, msg, 0, nil, false) - if err != nil { - return off, err - } - off, err = packUint16(tw.Class, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(tw.Ttl, msg, off) - if err != nil { - return off, err - } - - off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) - if err != nil { - return off, err - } - off, err = packUint48(tw.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - - off, err = packUint16(tw.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(tw.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packMacWire(mw *macWireFmt, msg []byte) (int, error) { - off, err := packUint16(mw.MACSize, msg, 0) - if err != nil { - return off, err - } - off, err = packStringHex(mw.MAC, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { - off, err := packUint48(tw.TimeSigned, msg, 0) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig_test.go deleted file mode 100644 index 48b9988b6..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/tsig_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package dns - -import ( - "testing" - "time" -) - -func newTsig(algo string) *Msg { - m := new(Msg) - m.SetQuestion("example.org.", TypeA) - m.SetTsig("example.", algo, 300, time.Now().Unix()) - return m -} - -func TestTsig(t *testing.T) { - m := newTsig(HmacMD5) - buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } -} - -func TestTsigCase(t *testing.T) { - m := newTsig("HmAc-mD5.sig-ALg.rEg.int.") // HmacMD5 - buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types.go deleted file mode 100644 index 53da4755c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types.go +++ /dev/null @@ -1,1287 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" -) - -type ( - // Type is a DNS type. - Type uint16 - // Class is a DNS class. - Class uint16 - // Name is a DNS domain name. - Name string -) - -// Packet formats - -// Wire constants and supported types. -const ( - // valid RR_Header.Rrtype and Question.qtype - - TypeNone uint16 = 0 - TypeA uint16 = 1 - TypeNS uint16 = 2 - TypeMD uint16 = 3 - TypeMF uint16 = 4 - TypeCNAME uint16 = 5 - TypeSOA uint16 = 6 - TypeMB uint16 = 7 - TypeMG uint16 = 8 - TypeMR uint16 = 9 - TypeNULL uint16 = 10 - TypePTR uint16 = 12 - TypeHINFO uint16 = 13 - TypeMINFO uint16 = 14 - TypeMX uint16 = 15 - TypeTXT uint16 = 16 - TypeRP uint16 = 17 - TypeAFSDB uint16 = 18 - TypeX25 uint16 = 19 - TypeISDN uint16 = 20 - TypeRT uint16 = 21 - TypeNSAPPTR uint16 = 23 - TypeSIG uint16 = 24 - TypeKEY uint16 = 25 - TypePX uint16 = 26 - TypeGPOS uint16 = 27 - TypeAAAA uint16 = 28 - TypeLOC uint16 = 29 - TypeNXT uint16 = 30 - TypeEID uint16 = 31 - TypeNIMLOC uint16 = 32 - TypeSRV uint16 = 33 - TypeATMA uint16 = 34 - TypeNAPTR uint16 = 35 - TypeKX uint16 = 36 - TypeCERT uint16 = 37 - TypeDNAME uint16 = 39 - TypeOPT uint16 = 41 // EDNS - TypeDS uint16 = 43 - TypeSSHFP uint16 = 44 - TypeRRSIG uint16 = 46 - TypeNSEC uint16 = 47 - TypeDNSKEY uint16 = 48 - TypeDHCID uint16 = 49 - TypeNSEC3 uint16 = 50 - TypeNSEC3PARAM uint16 = 51 - TypeTLSA uint16 = 52 - TypeSMIMEA uint16 = 53 - TypeHIP uint16 = 55 - TypeNINFO uint16 = 56 - TypeRKEY uint16 = 57 - TypeTALINK uint16 = 58 - TypeCDS uint16 = 59 - TypeCDNSKEY uint16 = 60 - TypeOPENPGPKEY uint16 = 61 - TypeSPF uint16 = 99 - TypeUINFO uint16 = 100 - TypeUID uint16 = 101 - TypeGID uint16 = 102 - TypeUNSPEC uint16 = 103 - TypeNID uint16 = 104 - TypeL32 uint16 = 105 - TypeL64 uint16 = 106 - TypeLP uint16 = 107 - TypeEUI48 uint16 = 108 - TypeEUI64 uint16 = 109 - TypeURI uint16 = 256 - TypeCAA uint16 = 257 - TypeAVC uint16 = 258 - - TypeTKEY uint16 = 249 - TypeTSIG uint16 = 250 - - // valid Question.Qtype only - TypeIXFR uint16 = 251 - TypeAXFR uint16 = 252 - TypeMAILB uint16 = 253 - TypeMAILA uint16 = 254 - TypeANY uint16 = 255 - - TypeTA uint16 = 32768 - TypeDLV uint16 = 32769 - TypeReserved uint16 = 65535 - - // valid Question.Qclass - ClassINET = 1 - ClassCSNET = 2 - ClassCHAOS = 3 - ClassHESIOD = 4 - ClassNONE = 254 - ClassANY = 255 - - // Message Response Codes. - RcodeSuccess = 0 - RcodeFormatError = 1 - RcodeServerFailure = 2 - RcodeNameError = 3 - RcodeNotImplemented = 4 - RcodeRefused = 5 - RcodeYXDomain = 6 - RcodeYXRrset = 7 - RcodeNXRrset = 8 - RcodeNotAuth = 9 - RcodeNotZone = 10 - RcodeBadSig = 16 // TSIG - RcodeBadVers = 16 // EDNS0 - RcodeBadKey = 17 - RcodeBadTime = 18 - RcodeBadMode = 19 // TKEY - RcodeBadName = 20 - RcodeBadAlg = 21 - RcodeBadTrunc = 22 // TSIG - RcodeBadCookie = 23 // DNS Cookies - - // Message Opcodes. There is no 3. - OpcodeQuery = 0 - OpcodeIQuery = 1 - OpcodeStatus = 2 - OpcodeNotify = 4 - OpcodeUpdate = 5 -) - -// Header is the wire format for the DNS packet header. -type Header struct { - Id uint16 - Bits uint16 - Qdcount, Ancount, Nscount, Arcount uint16 -} - -const ( - headerSize = 12 - - // Header.Bits - _QR = 1 << 15 // query/response (response=1) - _AA = 1 << 10 // authoritative - _TC = 1 << 9 // truncated - _RD = 1 << 8 // recursion desired - _RA = 1 << 7 // recursion available - _Z = 1 << 6 // Z - _AD = 1 << 5 // authticated data - _CD = 1 << 4 // checking disabled - - LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. - LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - - LOC_ALTITUDEBASE = 100000 -) - -// Different Certificate Types, see RFC 4398, Section 2.1 -const ( - CertPKIX = 1 + iota - CertSPKI - CertPGP - CertIPIX - CertISPKI - CertIPGP - CertACPKIX - CertIACPKIX - CertURI = 253 - CertOID = 254 -) - -// CertTypeToString converts the Cert Type to its string representation. -// See RFC 4398 and RFC 6944. -var CertTypeToString = map[uint16]string{ - CertPKIX: "PKIX", - CertSPKI: "SPKI", - CertPGP: "PGP", - CertIPIX: "IPIX", - CertISPKI: "ISPKI", - CertIPGP: "IPGP", - CertACPKIX: "ACPKIX", - CertIACPKIX: "IACPKIX", - CertURI: "URI", - CertOID: "OID", -} - -// StringToCertType is the reverseof CertTypeToString. -var StringToCertType = reverseInt16(CertTypeToString) - -//go:generate go run types_generate.go - -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. -type Question struct { - Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) - Qtype uint16 - Qclass uint16 -} - -func (q *Question) len() int { - return len(q.Name) + 1 + 2 + 2 -} - -func (q *Question) String() (s string) { - // prefix with ; (as in dig) - s = ";" + sprintName(q.Name) + "\t" - s += Class(q.Qclass).String() + "\t" - s += " " + Type(q.Qtype).String() - return s -} - -// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY -// is named "*" there. -type ANY struct { - Hdr RR_Header - // Does not have any rdata -} - -func (rr *ANY) String() string { return rr.Hdr.String() } - -type CNAME struct { - Hdr RR_Header - Target string `dns:"cdomain-name"` -} - -func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } - -type HINFO struct { - Hdr RR_Header - Cpu string - Os string -} - -func (rr *HINFO) String() string { - return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) -} - -type MB struct { - Hdr RR_Header - Mb string `dns:"cdomain-name"` -} - -func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } - -type MG struct { - Hdr RR_Header - Mg string `dns:"cdomain-name"` -} - -func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } - -type MINFO struct { - Hdr RR_Header - Rmail string `dns:"cdomain-name"` - Email string `dns:"cdomain-name"` -} - -func (rr *MINFO) String() string { - return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) -} - -type MR struct { - Hdr RR_Header - Mr string `dns:"cdomain-name"` -} - -func (rr *MR) String() string { - return rr.Hdr.String() + sprintName(rr.Mr) -} - -type MF struct { - Hdr RR_Header - Mf string `dns:"cdomain-name"` -} - -func (rr *MF) String() string { - return rr.Hdr.String() + sprintName(rr.Mf) -} - -type MD struct { - Hdr RR_Header - Md string `dns:"cdomain-name"` -} - -func (rr *MD) String() string { - return rr.Hdr.String() + sprintName(rr.Md) -} - -type MX struct { - Hdr RR_Header - Preference uint16 - Mx string `dns:"cdomain-name"` -} - -func (rr *MX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) -} - -type AFSDB struct { - Hdr RR_Header - Subtype uint16 - Hostname string `dns:"cdomain-name"` -} - -func (rr *AFSDB) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) -} - -type X25 struct { - Hdr RR_Header - PSDNAddress string -} - -func (rr *X25) String() string { - return rr.Hdr.String() + rr.PSDNAddress -} - -type RT struct { - Hdr RR_Header - Preference uint16 - Host string `dns:"cdomain-name"` -} - -func (rr *RT) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) -} - -type NS struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` -} - -func (rr *NS) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) -} - -type PTR struct { - Hdr RR_Header - Ptr string `dns:"cdomain-name"` -} - -func (rr *PTR) String() string { - return rr.Hdr.String() + sprintName(rr.Ptr) -} - -type RP struct { - Hdr RR_Header - Mbox string `dns:"domain-name"` - Txt string `dns:"domain-name"` -} - -func (rr *RP) String() string { - return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) -} - -type SOA struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` - Mbox string `dns:"cdomain-name"` - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - Minttl uint32 -} - -func (rr *SOA) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + - " " + strconv.FormatInt(int64(rr.Serial), 10) + - " " + strconv.FormatInt(int64(rr.Refresh), 10) + - " " + strconv.FormatInt(int64(rr.Retry), 10) + - " " + strconv.FormatInt(int64(rr.Expire), 10) + - " " + strconv.FormatInt(int64(rr.Minttl), 10) -} - -type TXT struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func sprintName(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - dst = appendDomainNameByte(dst, b) - } - i += n - } - } - return string(dst) -} - -func sprintTxtOctet(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - dst = append(dst, '"') - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - if b < ' ' || b > '~' { - dst = appendByte(dst, b) - } else { - dst = append(dst, b) - } - } - i += n - } - } - dst = append(dst, '"') - return string(dst) -} - -func sprintTxt(txt []string) string { - var out []byte - for i, s := range txt { - if i > 0 { - out = append(out, ` "`...) - } else { - out = append(out, '"') - } - bs := []byte(s) - for j := 0; j < len(bs); { - b, n := nextByte(bs, j) - if n == 0 { - break - } - out = appendTXTStringByte(out, b) - j += n - } - out = append(out, '"') - } - return string(out) -} - -func appendDomainNameByte(s []byte, b byte) []byte { - switch b { - case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - return append(s, '\\', b) - } - return appendTXTStringByte(s, b) -} - -func appendTXTStringByte(s []byte, b byte) []byte { - switch b { - case '"', '\\': - return append(s, '\\', b) - } - if b < ' ' || b > '~' { - return appendByte(s, b) - } - return append(s, b) -} - -func appendByte(s []byte, b byte) []byte { - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - return s -} - -func nextByte(b []byte, offset int) (byte, int) { - if offset >= len(b) { - return 0, 0 - } - if b[offset] != '\\' { - // not an escape sequence - return b[offset], 1 - } - switch len(b) - offset { - case 1: // dangling escape - return 0, 0 - case 2, 3: // too short to be \ddd - default: // maybe \ddd - if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { - return dddToByte(b[offset+1:]), 4 - } - } - // not \ddd, just an RFC 1035 "quoted" character - return b[offset+1], 2 -} - -type SPF struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -type AVC struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -type SRV struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Port uint16 - Target string `dns:"domain-name"` -} - -func (rr *SRV) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Priority)) + " " + - strconv.Itoa(int(rr.Weight)) + " " + - strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) -} - -type NAPTR struct { - Hdr RR_Header - Order uint16 - Preference uint16 - Flags string - Service string - Regexp string - Replacement string `dns:"domain-name"` -} - -func (rr *NAPTR) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Order)) + " " + - strconv.Itoa(int(rr.Preference)) + " " + - "\"" + rr.Flags + "\" " + - "\"" + rr.Service + "\" " + - "\"" + rr.Regexp + "\" " + - rr.Replacement -} - -// The CERT resource record, see RFC 4398. -type CERT struct { - Hdr RR_Header - Type uint16 - KeyTag uint16 - Algorithm uint8 - Certificate string `dns:"base64"` -} - -func (rr *CERT) String() string { - var ( - ok bool - certtype, algorithm string - ) - if certtype, ok = CertTypeToString[rr.Type]; !ok { - certtype = strconv.Itoa(int(rr.Type)) - } - if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { - algorithm = strconv.Itoa(int(rr.Algorithm)) - } - return rr.Hdr.String() + certtype + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + algorithm + - " " + rr.Certificate -} - -// The DNAME resource record, see RFC 2672. -type DNAME struct { - Hdr RR_Header - Target string `dns:"domain-name"` -} - -func (rr *DNAME) String() string { - return rr.Hdr.String() + sprintName(rr.Target) -} - -type A struct { - Hdr RR_Header - A net.IP `dns:"a"` -} - -func (rr *A) String() string { - if rr.A == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.A.String() -} - -type AAAA struct { - Hdr RR_Header - AAAA net.IP `dns:"aaaa"` -} - -func (rr *AAAA) String() string { - if rr.AAAA == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.AAAA.String() -} - -type PX struct { - Hdr RR_Header - Preference uint16 - Map822 string `dns:"domain-name"` - Mapx400 string `dns:"domain-name"` -} - -func (rr *PX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) -} - -type GPOS struct { - Hdr RR_Header - Longitude string - Latitude string - Altitude string -} - -func (rr *GPOS) String() string { - return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude -} - -type LOC struct { - Hdr RR_Header - Version uint8 - Size uint8 - HorizPre uint8 - VertPre uint8 - Latitude uint32 - Longitude uint32 - Altitude uint32 -} - -// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent -// format and returns a string in m (two decimals for the cm) -func cmToM(m, e uint8) string { - if e < 2 { - if e == 1 { - m *= 10 - } - - return fmt.Sprintf("0.%02d", m) - } - - s := fmt.Sprintf("%d", m) - for e > 2 { - s += "0" - e-- - } - return s -} - -func (rr *LOC) String() string { - s := rr.Hdr.String() - - lat := rr.Latitude - ns := "N" - if lat > LOC_EQUATOR { - lat = lat - LOC_EQUATOR - } else { - ns = "S" - lat = LOC_EQUATOR - lat - } - h := lat / LOC_DEGREES - lat = lat % LOC_DEGREES - m := lat / LOC_HOURS - lat = lat % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) - - lon := rr.Longitude - ew := "E" - if lon > LOC_PRIMEMERIDIAN { - lon = lon - LOC_PRIMEMERIDIAN - } else { - ew = "W" - lon = LOC_PRIMEMERIDIAN - lon - } - h = lon / LOC_DEGREES - lon = lon % LOC_DEGREES - m = lon / LOC_HOURS - lon = lon % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) - - var alt = float64(rr.Altitude) / 100 - alt -= LOC_ALTITUDEBASE - if rr.Altitude%100 != 0 { - s += fmt.Sprintf("%.2fm ", alt) - } else { - s += fmt.Sprintf("%.0fm ", alt) - } - - s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " - s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " - s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" - - return s -} - -// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. -type SIG struct { - RRSIG -} - -type RRSIG struct { - Hdr RR_Header - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - Signature string `dns:"base64"` -} - -func (rr *RRSIG) String() string { - s := rr.Hdr.String() - s += Type(rr.TypeCovered).String() - s += " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Labels)) + - " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + - " " + TimeToString(rr.Expiration) + - " " + TimeToString(rr.Inception) + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + sprintName(rr.SignerName) + - " " + rr.Signature - return s -} - -type NSEC struct { - Hdr RR_Header - NextDomain string `dns:"domain-name"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC) String() string { - s := rr.Hdr.String() + sprintName(rr.NextDomain) - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC) len() int { - l := rr.Hdr.len() + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type DLV struct { - DS -} - -type CDS struct { - DS -} - -type DS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type KX struct { - Hdr RR_Header - Preference uint16 - Exchanger string `dns:"domain-name"` -} - -func (rr *KX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + sprintName(rr.Exchanger) -} - -type TA struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *TA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type TALINK struct { - Hdr RR_Header - PreviousName string `dns:"domain-name"` - NextName string `dns:"domain-name"` -} - -func (rr *TALINK) String() string { - return rr.Hdr.String() + - sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) -} - -type SSHFP struct { - Hdr RR_Header - Algorithm uint8 - Type uint8 - FingerPrint string `dns:"hex"` -} - -func (rr *SSHFP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Type)) + - " " + strings.ToUpper(rr.FingerPrint) -} - -type KEY struct { - DNSKEY -} - -type CDNSKEY struct { - DNSKEY -} - -type DNSKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *DNSKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type RKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *RKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type NSAPPTR struct { - Hdr RR_Header - Ptr string `dns:"domain-name"` -} - -func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } - -type NSEC3 struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` - HashLength uint8 - NextDomain string `dns:"size-base32:HashLength"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC3) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) + - " " + rr.NextDomain - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC3) len() int { - l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type NSEC3PARAM struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` -} - -func (rr *NSEC3PARAM) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) - return s -} - -type TKEY struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - Inception uint32 - Expiration uint32 - Mode uint16 - Error uint16 - KeySize uint16 - Key string - OtherLen uint16 - OtherData string -} - -func (rr *TKEY) String() string { - // It has no presentation format - return "" -} - -// RFC3597 represents an unknown/generic RR. -type RFC3597 struct { - Hdr RR_Header - Rdata string `dns:"hex"` -} - -func (rr *RFC3597) String() string { - // Let's call it a hack - s := rfc3597Header(rr.Hdr) - - s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata - return s -} - -func rfc3597Header(h RR_Header) string { - var s string - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" - s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" - return s -} - -type URI struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Target string `dns:"octet"` -} - -func (rr *URI) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + - " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) -} - -type DHCID struct { - Hdr RR_Header - Digest string `dns:"base64"` -} - -func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } - -type TLSA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *TLSA) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) + - " " + rr.Certificate -} - -type SMIMEA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *SMIMEA) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) - - // Every Nth char needs a space on this output. If we output - // this as one giant line, we can't read it can in because in some cases - // the cert length overflows scan.maxTok (2048). - sx := splitN(rr.Certificate, 1024) // conservative value here - s += " " + strings.Join(sx, " ") - return s -} - -type HIP struct { - Hdr RR_Header - HitLength uint8 - PublicKeyAlgorithm uint8 - PublicKeyLength uint16 - Hit string `dns:"size-hex:HitLength"` - PublicKey string `dns:"size-base64:PublicKeyLength"` - RendezvousServers []string `dns:"domain-name"` -} - -func (rr *HIP) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.PublicKeyAlgorithm)) + - " " + rr.Hit + - " " + rr.PublicKey - for _, d := range rr.RendezvousServers { - s += " " + sprintName(d) - } - return s -} - -type NINFO struct { - Hdr RR_Header - ZSData []string `dns:"txt"` -} - -func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } - -type NID struct { - Hdr RR_Header - Preference uint16 - NodeID uint64 -} - -func (rr *NID) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16x", rr.NodeID) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type L32 struct { - Hdr RR_Header - Preference uint16 - Locator32 net.IP `dns:"a"` -} - -func (rr *L32) String() string { - if rr.Locator32 == nil { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - } - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + rr.Locator32.String() -} - -type L64 struct { - Hdr RR_Header - Preference uint16 - Locator64 uint64 -} - -func (rr *L64) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16X", rr.Locator64) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type LP struct { - Hdr RR_Header - Preference uint16 - Fqdn string `dns:"domain-name"` -} - -func (rr *LP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) -} - -type EUI48 struct { - Hdr RR_Header - Address uint64 `dns:"uint48"` -} - -func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } - -type EUI64 struct { - Hdr RR_Header - Address uint64 -} - -func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } - -type CAA struct { - Hdr RR_Header - Flag uint8 - Tag string - Value string `dns:"octet"` -} - -func (rr *CAA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) -} - -type UID struct { - Hdr RR_Header - Uid uint32 -} - -func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } - -type GID struct { - Hdr RR_Header - Gid uint32 -} - -func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } - -type UINFO struct { - Hdr RR_Header - Uinfo string -} - -func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } - -type EID struct { - Hdr RR_Header - Endpoint string `dns:"hex"` -} - -func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } - -type NIMLOC struct { - Hdr RR_Header - Locator string `dns:"hex"` -} - -func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } - -type OPENPGPKEY struct { - Hdr RR_Header - PublicKey string `dns:"base64"` -} - -func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } - -// TimeToString translates the RRSIG's incep. and expir. times to the -// string representation used when printing the record. -// It takes serial arithmetic (RFC 1982) into account. -func TimeToString(t uint32) string { - mod := ((int64(t) - time.Now().Unix()) / year68) - 1 - if mod < 0 { - mod = 0 - } - ti := time.Unix(int64(t)-(mod*year68), 0).UTC() - return ti.Format("20060102150405") -} - -// StringToTime translates the RRSIG's incep. and expir. times from -// string values like "20110403154150" to an 32 bit integer. -// It takes serial arithmetic (RFC 1982) into account. -func StringToTime(s string) (uint32, error) { - t, err := time.Parse("20060102150405", s) - if err != nil { - return 0, err - } - mod := (t.Unix() / year68) - 1 - if mod < 0 { - mod = 0 - } - return uint32(t.Unix() - (mod * year68)), nil -} - -// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. -func saltToString(s string) string { - if len(s) == 0 { - return "-" - } - return strings.ToUpper(s) -} - -func euiToString(eui uint64, bits int) (hex string) { - switch bits { - case 64: - hex = fmt.Sprintf("%16.16x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] - case 48: - hex = fmt.Sprintf("%12.12x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] - } - return -} - -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p -} - -// SplitN splits a string into N sized string chunks. -// This might become an exported function once. -func splitN(s string, n int) []string { - if len(s) < n { - return []string{s} - } - sx := []string{} - p, i := 0, n - for { - if i <= len(s) { - sx = append(sx, s[p:i]) - } else { - sx = append(sx, s[p:]) - break - - } - p, i = p+n, i+n - } - - return sx -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index dd1310942..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,271 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, -} - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -// Header() functions -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len()\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: - o("l += len(rr.%s) + 1\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"*rr.Hdr.copyHeader()"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_test.go deleted file mode 100644 index c117cfbc7..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/types_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package dns - -import ( - "testing" -) - -func TestCmToM(t *testing.T) { - s := cmToM(0, 0) - if s != "0.00" { - t.Error("0, 0") - } - - s = cmToM(1, 0) - if s != "0.01" { - t.Error("1, 0") - } - - s = cmToM(3, 1) - if s != "0.30" { - t.Error("3, 1") - } - - s = cmToM(4, 2) - if s != "4" { - t.Error("4, 2") - } - - s = cmToM(5, 3) - if s != "50" { - t.Error("5, 3") - } - - s = cmToM(7, 5) - if s != "7000" { - t.Error("7, 5") - } - - s = cmToM(9, 9) - if s != "90000000" { - t.Error("9, 9") - } -} - -func TestSplitN(t *testing.T) { - xs := splitN("abc", 5) - if len(xs) != 1 && xs[0] != "abc" { - t.Errorf("Failure to split abc") - } - - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - - xs = splitN(s, 255) - if len(xs) != 1 && xs[0] != s { - t.Errorf("failure to split 255 char long string") - } - - s += "b" - xs = splitN(s, 255) - if len(xs) != 2 || xs[1] != "b" { - t.Errorf("failure to split 256 char long string: %d", len(xs)) - } - - // Make s longer - for i := 0; i < 255; i++ { - s += "a" - } - xs = splitN(s, 255) - if len(xs) != 3 || xs[2] != "a" { - t.Errorf("failure to split 510 char long string: %d", len(xs)) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp.go deleted file mode 100644 index af111b9a8..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package dns - -import ( - "net" -) - -// SessionUDP holds the remote address and the associated -// out-of-band data. -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, 40) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) - return n, err -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_linux.go deleted file mode 100644 index 033df4239..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_linux.go +++ /dev/null @@ -1,105 +0,0 @@ -// +build linux,!appengine - -package dns - -// See: -// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and -// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ -// -// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing -// interface, this might not always be the correct one. This code will make sure the egress -// packet's interface matched the ingress' one. - -import ( - "net" - "syscall" -) - -// setUDPSocketOptions sets the UDP socket options. -// This function is implemented on a per platform basis. See udp_*.go for more details -func setUDPSocketOptions(conn *net.UDPConn) error { - sa, err := getUDPSocketName(conn) - if err != nil { - return err - } - switch sa.(type) { - case *syscall.SockaddrInet6: - v6only, err := getUDPSocketOptions6Only(conn) - if err != nil { - return err - } - setUDPSocketOptions6(conn) - if !v6only { - setUDPSocketOptions4(conn) - } - case *syscall.SockaddrInet4: - setUDPSocketOptions4(conn) - } - return nil -} - -// setUDPSocketOptions4 prepares the v4 socket for sessions. -func setUDPSocketOptions4(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { - file.Close() - return err - } - // Calling File() above results in the connection becoming blocking, we must fix that. - // See https://github.com/miekg/dns/issues/279 - err = syscall.SetNonblock(int(file.Fd()), true) - if err != nil { - file.Close() - return err - } - file.Close() - return nil -} - -// setUDPSocketOptions6 prepares the v6 socket for sessions. -func setUDPSocketOptions6(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { - file.Close() - return err - } - err = syscall.SetNonblock(int(file.Fd()), true) - if err != nil { - file.Close() - return err - } - file.Close() - return nil -} - -// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined -// (dualstack). -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { - file, err := conn.File() - if err != nil { - return false, err - } - // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections - v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) - if err != nil { - file.Close() - return false, err - } - file.Close() - return v6only == 1, nil -} - -func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { - file, err := conn.File() - if err != nil { - return nil, err - } - defer file.Close() - return syscall.Getsockname(int(file.Fd())) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_other.go deleted file mode 100644 index 488a282b2..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux appengine - -package dns - -import ( - "net" -) - -// These do nothing. See udp_linux.go for an example of how to implement this. - -// We tried to adhire to some kind of naming scheme. -func setUDPSocketOptions(conn *net.UDPConn) error { return nil } -func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } -func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_windows.go deleted file mode 100644 index 51e532ac2..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/udp_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build windows - -package dns - -import "net" - -type SessionUDP struct { - raddr *net.UDPAddr -} - -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - n, raddr, err := conn.ReadFrom(b) - if err != nil { - return n, nil, err - } - session := &SessionUDP{raddr.(*net.UDPAddr)} - return n, session, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, err := conn.WriteTo(b, session.raddr) - return n, err -} - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update.go deleted file mode 100644 index e90c5c968..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update.go +++ /dev/null @@ -1,106 +0,0 @@ -package dns - -// NameUsed sets the RRs in the prereq section to -// "Name is in use" RRs. RFC 2136 section 2.4.4. -func (u *Msg) NameUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// NameNotUsed sets the RRs in the prereq section to -// "Name is in not use" RRs. RFC 2136 section 2.4.5. -func (u *Msg) NameNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) - } -} - -// Used sets the RRs in the prereq section to -// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. -func (u *Msg) Used(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Answer = append(u.Answer, r) - } -} - -// RRsetUsed sets the RRs in the prereq section to -// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. -func (u *Msg) RRsetUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RRsetNotUsed sets the RRs in the prereq section to -// "RRset does not exist" RRs. RFC 2136 section 2.4.3. -func (u *Msg) RRsetNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) - } -} - -// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. -func (u *Msg) Insert(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Ns = append(u.Ns, r) - } -} - -// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. -func (u *Msg) RemoveRRset(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 -func (u *Msg) RemoveName(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 -func (u *Msg) Remove(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = ClassNONE - r.Header().Ttl = 0 - u.Ns = append(u.Ns, r) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update_test.go deleted file mode 100644 index 12760a1ee..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/update_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package dns - -import ( - "bytes" - "testing" -) - -func TestDynamicUpdateParsing(t *testing.T) { - prefix := "example.com. IN " - for _, typ := range TypeToString { - if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || - typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" || - typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" { - continue - } - r, err := NewRR(prefix + typ) - if err != nil { - t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) - } else { - t.Logf("parsed: %s", r.String()) - } - } -} - -func TestDynamicUpdateUnpack(t *testing.T) { - // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 - // It should be an update message for the zone "example.", - // deleting the A RRset "example." and then adding an A record at "example.". - // class ANY, TYPE A - buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} - msg := new(Msg) - err := msg.Unpack(buf) - if err != nil { - t.Errorf("failed to unpack: %v\n%s", err, msg.String()) - } -} - -func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { - m := new(Msg) - rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} - m.Answer = []RR{rr, rr, rr, rr, rr} - m.Ns = m.Answer - for n, s := range TypeToString { - rr.Rrtype = n - bytes, err := m.Pack() - if err != nil { - t.Errorf("failed to pack %s: %v", s, err) - continue - } - if err := new(Msg).Unpack(bytes); err != nil { - t.Errorf("failed to unpack %s: %v", s, err) - } - } -} - -func TestRemoveRRset(t *testing.T) { - // Should add a zero data RR in Class ANY with a TTL of 0 - // for each set mentioned in the RRs provided to it. - rr, err := NewRR(". 100 IN A 127.0.0.1") - if err != nil { - t.Fatalf("error constructing RR: %v", err) - } - m := new(Msg) - m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} - expectstr := m.String() - expect, err := m.Pack() - if err != nil { - t.Fatalf("error packing expected msg: %v", err) - } - - m.Ns = nil - m.RemoveRRset([]RR{rr}) - actual, err := m.Pack() - if err != nil { - t.Fatalf("error packing actual msg: %v", err) - } - if !bytes.Equal(actual, expect) { - tmp := new(Msg) - if err := tmp.Unpack(actual); err != nil { - t.Fatalf("error unpacking actual msg: %v\nexpected: %v\ngot: %v\n", err, expect, actual) - } - t.Errorf("expected msg:\n%s", expectstr) - t.Errorf("actual msg:\n%v", tmp) - } -} - -func TestPreReqAndRemovals(t *testing.T) { - // Build a list of multiple prereqs and then somes removes followed by an insert. - // We should be able to add multiple prereqs and updates. - m := new(Msg) - m.SetUpdate("example.org.") - m.Id = 1234 - - // Use a full set of RRs each time, so we are sure the rdata is stripped. - rrName1, _ := NewRR("name_used. 3600 IN A 127.0.0.1") - rrName2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1") - rrRemove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1") - rrRemove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1") - rrRemove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1") - rrInsert, _ := NewRR("insert. 3600 IN A 127.0.0.1") - rrRrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1") - rrRrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1") - rrRrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1") - - // Handle the prereqs. - m.NameUsed([]RR{rrName1}) - m.NameNotUsed([]RR{rrName2}) - m.RRsetUsed([]RR{rrRrset1}) - m.Used([]RR{rrRrset2}) - m.RRsetNotUsed([]RR{rrRrset3}) - - // and now the updates. - m.RemoveName([]RR{rrRemove1}) - m.RemoveRRset([]RR{rrRemove2}) - m.Remove([]RR{rrRemove3}) - m.Insert([]RR{rrInsert}) - - // This test function isn't a Example function because we print these RR with tabs at the - // end and the Example function trim these, thus they never match. - // TODO(miek): don't print these tabs and make this into an Example function. - expect := `;; opcode: UPDATE, status: NOERROR, id: 1234 -;; flags:; QUERY: 1, ANSWER: 5, AUTHORITY: 4, ADDITIONAL: 0 - -;; QUESTION SECTION: -;example.org. IN SOA - -;; ANSWER SECTION: -name_used. 0 ANY ANY -name_not_used. 0 NONE ANY -rrset_used1. 0 ANY A -rrset_used2. 3600 IN A 127.0.0.1 -rrset_not_used. 0 NONE A - -;; AUTHORITY SECTION: -remove1. 0 ANY ANY -remove2. 0 ANY A -remove3. 0 NONE A 127.0.0.1 -insert. 3600 IN A 127.0.0.1 -` - - if m.String() != expect { - t.Errorf("expected msg:\n%s", expect) - t.Errorf("actual msg:\n%v", m.String()) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr.go deleted file mode 100644 index 576c5590a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr.go +++ /dev/null @@ -1,255 +0,0 @@ -package dns - -import ( - "fmt" - "time" -) - -// Envelope is used when doing a zone transfer with a remote server. -type Envelope struct { - RR []RR // The set of RRs in the answer section of the xfr reply message. - Error error // If something went wrong, this contains the error. -} - -// A Transfer defines parameters that are used during a zone transfer. -type Transfer struct { - *Conn - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified - tsigTimersOnly bool -} - -// Think we need to away to stop the transfer - -// In performs an incoming transfer with the server in a. -// If you would like to set the source IP, or some other attribute -// of a Dialer for a Transfer, you can do so by specifying the attributes -// in the Transfer.Conn: -// -// d := net.Dialer{LocalAddr: transfer_source} -// con, err := d.Dial("tcp", master) -// dnscon := &dns.Conn{Conn:con} -// transfer = &dns.Transfer{Conn: dnscon} -// channel, err := transfer.In(message, master) -// -func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { - timeout := dnsTimeout - if t.DialTimeout != 0 { - timeout = t.DialTimeout - } - if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) - if err != nil { - return nil, err - } - } - if err := t.WriteMsg(q); err != nil { - return nil, err - } - env = make(chan *Envelope) - go func() { - if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q.Id, env) - return - } - if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q.Id, env) - return - } - }() - return env, nil -} - -func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.Conn.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - first = !first - // only one answer that is SOA, receive more - if len(in.Answer) == 1 { - t.tsigTimersOnly = true - c <- &Envelope{in.Answer, nil} - continue - } - } - - if !first { - t.tsigTimersOnly = true // Subsequent envelopes use this. - if isSOALast(in) { - c <- &Envelope{in.Answer, nil} - return - } - c <- &Envelope{in.Answer, nil} - } - } -} - -func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { - serial := uint32(0) // The first serial seen is the current server serial - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - // A single SOA RR signals "no changes" - if len(in.Answer) == 1 && isSOAFirst(in) { - c <- &Envelope{in.Answer, nil} - return - } - - // Check if the returned answer is ok - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - // This serial is important - serial = in.Answer[0].(*SOA).Serial - first = !first - } - - // Now we need to check each message for SOA records, to see what we need to do - if !first { - t.tsigTimersOnly = true - // If the last record in the IXFR contains the servers' SOA, we should quit - if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { - if v.Serial == serial { - c <- &Envelope{in.Answer, nil} - return - } - } - c <- &Envelope{in.Answer, nil} - } - } -} - -// Out performs an outgoing transfer with the client connecting in w. -// Basic use pattern: -// -// ch := make(chan *dns.Envelope) -// tr := new(dns.Transfer) -// go tr.Out(w, r, ch) -// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} -// close(ch) -// w.Hijack() -// // w.Close() // Client closes connection -// -// The server is responsible for sending the correct sequence of RRs through the -// channel ch. -func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - for x := range ch { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return err - } - } - w.TsigTimersOnly(true) - return nil -} - -// ReadMsg reads a message from the transfer connection t. -func (t *Transfer) ReadMsg() (*Msg, error) { - m := new(Msg) - p := make([]byte, MaxMsgSize) - n, err := t.Read(p) - if err != nil && n == 0 { - return nil, err - } - p = p[:n] - if err := m.Unpack(p); err != nil { - return nil, err - } - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - t.tsigRequestMAC = ts.MAC - } - return m, err -} - -// WriteMsg writes a message through the transfer connection t. -func (t *Transfer) WriteMsg(m *Msg) (err error) { - var out []byte - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return ErrSecret - } - out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - if _, err = t.Write(out); err != nil { - return err - } - return nil -} - -func isSOAFirst(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[0].Header().Rrtype == TypeSOA - } - return false -} - -func isSOALast(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA - } - return false -} - -const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr_test.go deleted file mode 100644 index a478963a3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/xfr_test.go +++ /dev/null @@ -1,184 +0,0 @@ -// +build net - -package dns - -import ( - "net" - "strings" - "testing" - "time" -) - -func getIP(s string) string { - a, err := net.LookupAddr(s) - if err != nil { - return "" - } - return a[0] -} - -// flaky, need to setup local server and test from that. -func TestAXFR_Miek(t *testing.T) { - // This test runs against a server maintained by Miek - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("miek.nl.") - - server := getIP("linode.atoom.net") - - tr := new(Transfer) - - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} - -// fails. -func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) { - // This test runs against a server maintained by NLnet Labs - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("nlnetlabs.nl.") - - server := getIP("open.nlnetlabs.nl.") - - tr := new(Transfer) - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatalf("failed to setup axfr %v for server: %v", err, server) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - } - } -} - -func TestAXFR_Miek_Tsig(t *testing.T) { - // This test runs against a server maintained by Miek - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("example.nl.") - m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) - - tr := new(Transfer) - tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - - if a, err := tr.In(m, "176.58.119.54:53"); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} - -func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") } -func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) } -func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) } -func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) } - -func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") } -func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) } -func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) } -func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) } - -func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") } -func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) } -func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) } -func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) } - -func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") } -func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) } -func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) } -func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) } - -func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") } -func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) } -func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) } -func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) } - -func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") } -func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) } -func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) } -func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) } - -func testAXFRSIDN(t *testing.T, host, alg string) { - // This tests run against a server maintained by SIDN labs, see: - // https://workbench.sidnlabs.nl/ - if testing.Short() { - return - } - x := new(Transfer) - x.TsigSecret = map[string]string{ - "wb_md5.": "Wu/utSasZUkoeCNku152Zw==", - "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==", - "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=", - } - keyname := map[string]string{ - HmacMD5: "wb_md5.", - HmacSHA1: "wb_sha1_longkey.", - HmacSHA256: "wb_sha256.", - }[alg] - - m := new(Msg) - m.SetAxfr("types.wb.sidnlabs.nl.") - if keyname != "" { - m.SetTsig(keyname, alg, 300, time.Now().Unix()) - } - c, err := x.In(m, host+".sidnlabs.nl:53") - if err != nil { - t.Fatal(err) - } - for e := range c { - if e.Error != nil { - t.Fatal(e.Error) - } - } -} - -func TestAXFRFailNotAuth(t *testing.T) { - // This tests run against a server maintained by SIDN labs, see: - // https://workbench.sidnlabs.nl/ - if testing.Short() { - return - } - x := new(Transfer) - - m := new(Msg) - m.SetAxfr("sidnlabs.nl.") - c, err := x.In(m, "yadifa.sidnlabs.nl:53") - if err != nil { - t.Fatal(err) - } - for e := range c { - if e.Error != nil { - if !strings.HasPrefix(e.Error.Error(), "dns: bad xfr rcode:") { - t.Fatal(e.Error) - } - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zcompress.go deleted file mode 100644 index b277978b9..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zcompress.go +++ /dev/null @@ -1,119 +0,0 @@ -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from compress_generate.go - -package dns - -func compressionLenHelperType(c map[string]int, r RR) { - switch x := r.(type) { - case *PTR: - compressionLenHelper(c, x.Ptr) - case *SOA: - compressionLenHelper(c, x.Ns) - compressionLenHelper(c, x.Mbox) - case *AFSDB: - compressionLenHelper(c, x.Hostname) - case *HIP: - for i := range x.RendezvousServers { - compressionLenHelper(c, x.RendezvousServers[i]) - } - case *LP: - compressionLenHelper(c, x.Fqdn) - case *CNAME: - compressionLenHelper(c, x.Target) - case *MB: - compressionLenHelper(c, x.Mb) - case *RP: - compressionLenHelper(c, x.Mbox) - compressionLenHelper(c, x.Txt) - case *RRSIG: - compressionLenHelper(c, x.SignerName) - case *MF: - compressionLenHelper(c, x.Mf) - case *MINFO: - compressionLenHelper(c, x.Rmail) - compressionLenHelper(c, x.Email) - case *SIG: - compressionLenHelper(c, x.SignerName) - case *SRV: - compressionLenHelper(c, x.Target) - case *TSIG: - compressionLenHelper(c, x.Algorithm) - case *KX: - compressionLenHelper(c, x.Exchanger) - case *MG: - compressionLenHelper(c, x.Mg) - case *NSAPPTR: - compressionLenHelper(c, x.Ptr) - case *PX: - compressionLenHelper(c, x.Map822) - compressionLenHelper(c, x.Mapx400) - case *DNAME: - compressionLenHelper(c, x.Target) - case *MR: - compressionLenHelper(c, x.Mr) - case *MX: - compressionLenHelper(c, x.Mx) - case *TKEY: - compressionLenHelper(c, x.Algorithm) - case *NSEC: - compressionLenHelper(c, x.NextDomain) - case *TALINK: - compressionLenHelper(c, x.PreviousName) - compressionLenHelper(c, x.NextName) - case *MD: - compressionLenHelper(c, x.Md) - case *NAPTR: - compressionLenHelper(c, x.Replacement) - case *NS: - compressionLenHelper(c, x.Ns) - case *RT: - compressionLenHelper(c, x.Host) - } -} - -func compressionLenSearchType(c map[string]int, r RR) (int, bool) { - switch x := r.(type) { - case *MG: - k1, ok1 := compressionLenSearch(c, x.Mg) - return k1, ok1 - case *PTR: - k1, ok1 := compressionLenSearch(c, x.Ptr) - return k1, ok1 - case *AFSDB: - k1, ok1 := compressionLenSearch(c, x.Hostname) - return k1, ok1 - case *MB: - k1, ok1 := compressionLenSearch(c, x.Mb) - return k1, ok1 - case *MD: - k1, ok1 := compressionLenSearch(c, x.Md) - return k1, ok1 - case *MF: - k1, ok1 := compressionLenSearch(c, x.Mf) - return k1, ok1 - case *NS: - k1, ok1 := compressionLenSearch(c, x.Ns) - return k1, ok1 - case *RT: - k1, ok1 := compressionLenSearch(c, x.Host) - return k1, ok1 - case *SOA: - k1, ok1 := compressionLenSearch(c, x.Ns) - k2, ok2 := compressionLenSearch(c, x.Mbox) - return k1 + k2, ok1 && ok2 - case *CNAME: - k1, ok1 := compressionLenSearch(c, x.Target) - return k1, ok1 - case *MINFO: - k1, ok1 := compressionLenSearch(c, x.Rmail) - k2, ok2 := compressionLenSearch(c, x.Email) - return k1 + k2, ok1 && ok2 - case *MR: - k1, ok1 := compressionLenSearch(c, x.Mr) - return k1, ok1 - case *MX: - k1, ok1 := compressionLenSearch(c, x.Mx) - return k1, ok1 - } - return 0, false -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zmsg.go deleted file mode 100644 index 418fb1fe3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/zmsg.go +++ /dev/null @@ -1,3565 +0,0 @@ -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -// pack*() functions - -func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataA(rr.A, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataAAAA(rr.AAAA, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Subtype, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Flag, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Tag, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Value, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringBase64(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Endpoint, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint48(rr.Address, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint64(rr.Address, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint32(rr.Gid, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Altitude, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Cpu, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Os, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.HitLength, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.PublicKeyLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Hit, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDataA(rr.Locator32, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.Locator64, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Version, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Size, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.HorizPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.VertPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Altitude, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mb, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Md, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mf, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mg, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Email, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mr, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mx, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Order, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Service, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Regexp, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Replacement, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.NodeID, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Locator, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.ZSData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } - } - off, err = packUint8(rr.HashLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase32(rr.NextDomain, msg, off) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataOpt(rr.Option, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Map822, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Rdata, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mbox, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Txt, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Host, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Refresh, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Retry, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expire, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Minttl, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Port, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.FingerPrint, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.NextName, msg, off, compression, false) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Mode, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeySize, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Key, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.OtherData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packUint48(rr.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Fudge, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.MACSize, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.MAC, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OrigId, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint32(rr.Uid, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Uinfo, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Target, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.PSDNAddress, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -// unpack*() functions - -func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(A) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.A, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AAAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.AAAA, off, err = unpackDataAAAA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AFSDB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Subtype, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hostname, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(ANY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - return rr, off, err -} - -func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AVC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flag, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Tag, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Value, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CERT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Type, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DHCID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DLV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI48) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Gid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GPOS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Longitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Cpu, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Os, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HIP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.HitLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyLength, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) - if err != nil { - return rr, off, err - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) - if err != nil { - return rr, off, err - } - rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Exchanger, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L32) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator32, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator64, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Version, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Size, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.HorizPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.VertPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Longitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fqdn, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mb, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MD) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Md, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mf, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mg, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rmail, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Email, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mx, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NAPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Order, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Service, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Regexp, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Replacement, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NodeID, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NIMLOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.ZSData, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSAPPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.NextDomain, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - rr.HashLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) - if err != nil { - return rr, off, err - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3PARAM) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPENPGPKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Option, off, err = unpackDataOpt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Map822, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mapx400, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RFC3597) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Txt, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RRSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Host, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SMIMEA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SOA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Refresh, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Retry, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expire, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Minttl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SPF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SRV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Port, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SSHFP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Type, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TALINK) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PreviousName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mode, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeySize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Key, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TLSA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TimeSigned, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fudge, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MACSize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) - if err != nil { - return rr, off, err - } - rr.OrigId, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TXT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uinfo, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(URI) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(X25) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PSDNAddress, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ - TypeA: unpackA, - TypeAAAA: unpackAAAA, - TypeAFSDB: unpackAFSDB, - TypeANY: unpackANY, - TypeAVC: unpackAVC, - TypeCAA: unpackCAA, - TypeCDNSKEY: unpackCDNSKEY, - TypeCDS: unpackCDS, - TypeCERT: unpackCERT, - TypeCNAME: unpackCNAME, - TypeDHCID: unpackDHCID, - TypeDLV: unpackDLV, - TypeDNAME: unpackDNAME, - TypeDNSKEY: unpackDNSKEY, - TypeDS: unpackDS, - TypeEID: unpackEID, - TypeEUI48: unpackEUI48, - TypeEUI64: unpackEUI64, - TypeGID: unpackGID, - TypeGPOS: unpackGPOS, - TypeHINFO: unpackHINFO, - TypeHIP: unpackHIP, - TypeKEY: unpackKEY, - TypeKX: unpackKX, - TypeL32: unpackL32, - TypeL64: unpackL64, - TypeLOC: unpackLOC, - TypeLP: unpackLP, - TypeMB: unpackMB, - TypeMD: unpackMD, - TypeMF: unpackMF, - TypeMG: unpackMG, - TypeMINFO: unpackMINFO, - TypeMR: unpackMR, - TypeMX: unpackMX, - TypeNAPTR: unpackNAPTR, - TypeNID: unpackNID, - TypeNIMLOC: unpackNIMLOC, - TypeNINFO: unpackNINFO, - TypeNS: unpackNS, - TypeNSAPPTR: unpackNSAPPTR, - TypeNSEC: unpackNSEC, - TypeNSEC3: unpackNSEC3, - TypeNSEC3PARAM: unpackNSEC3PARAM, - TypeOPENPGPKEY: unpackOPENPGPKEY, - TypeOPT: unpackOPT, - TypePTR: unpackPTR, - TypePX: unpackPX, - TypeRKEY: unpackRKEY, - TypeRP: unpackRP, - TypeRRSIG: unpackRRSIG, - TypeRT: unpackRT, - TypeSIG: unpackSIG, - TypeSMIMEA: unpackSMIMEA, - TypeSOA: unpackSOA, - TypeSPF: unpackSPF, - TypeSRV: unpackSRV, - TypeSSHFP: unpackSSHFP, - TypeTA: unpackTA, - TypeTALINK: unpackTALINK, - TypeTKEY: unpackTKEY, - TypeTLSA: unpackTLSA, - TypeTSIG: unpackTSIG, - TypeTXT: unpackTXT, - TypeUID: unpackUID, - TypeUINFO: unpackUINFO, - TypeURI: unpackURI, - TypeX25: unpackX25, -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/ztypes.go deleted file mode 100644 index 3e534f12e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/miekg/dns/ztypes.go +++ /dev/null @@ -1,857 +0,0 @@ -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ - TypeA: func() RR { return new(A) }, - TypeAAAA: func() RR { return new(AAAA) }, - TypeAFSDB: func() RR { return new(AFSDB) }, - TypeANY: func() RR { return new(ANY) }, - TypeAVC: func() RR { return new(AVC) }, - TypeCAA: func() RR { return new(CAA) }, - TypeCDNSKEY: func() RR { return new(CDNSKEY) }, - TypeCDS: func() RR { return new(CDS) }, - TypeCERT: func() RR { return new(CERT) }, - TypeCNAME: func() RR { return new(CNAME) }, - TypeDHCID: func() RR { return new(DHCID) }, - TypeDLV: func() RR { return new(DLV) }, - TypeDNAME: func() RR { return new(DNAME) }, - TypeDNSKEY: func() RR { return new(DNSKEY) }, - TypeDS: func() RR { return new(DS) }, - TypeEID: func() RR { return new(EID) }, - TypeEUI48: func() RR { return new(EUI48) }, - TypeEUI64: func() RR { return new(EUI64) }, - TypeGID: func() RR { return new(GID) }, - TypeGPOS: func() RR { return new(GPOS) }, - TypeHINFO: func() RR { return new(HINFO) }, - TypeHIP: func() RR { return new(HIP) }, - TypeKEY: func() RR { return new(KEY) }, - TypeKX: func() RR { return new(KX) }, - TypeL32: func() RR { return new(L32) }, - TypeL64: func() RR { return new(L64) }, - TypeLOC: func() RR { return new(LOC) }, - TypeLP: func() RR { return new(LP) }, - TypeMB: func() RR { return new(MB) }, - TypeMD: func() RR { return new(MD) }, - TypeMF: func() RR { return new(MF) }, - TypeMG: func() RR { return new(MG) }, - TypeMINFO: func() RR { return new(MINFO) }, - TypeMR: func() RR { return new(MR) }, - TypeMX: func() RR { return new(MX) }, - TypeNAPTR: func() RR { return new(NAPTR) }, - TypeNID: func() RR { return new(NID) }, - TypeNIMLOC: func() RR { return new(NIMLOC) }, - TypeNINFO: func() RR { return new(NINFO) }, - TypeNS: func() RR { return new(NS) }, - TypeNSAPPTR: func() RR { return new(NSAPPTR) }, - TypeNSEC: func() RR { return new(NSEC) }, - TypeNSEC3: func() RR { return new(NSEC3) }, - TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, - TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, - TypeOPT: func() RR { return new(OPT) }, - TypePTR: func() RR { return new(PTR) }, - TypePX: func() RR { return new(PX) }, - TypeRKEY: func() RR { return new(RKEY) }, - TypeRP: func() RR { return new(RP) }, - TypeRRSIG: func() RR { return new(RRSIG) }, - TypeRT: func() RR { return new(RT) }, - TypeSIG: func() RR { return new(SIG) }, - TypeSMIMEA: func() RR { return new(SMIMEA) }, - TypeSOA: func() RR { return new(SOA) }, - TypeSPF: func() RR { return new(SPF) }, - TypeSRV: func() RR { return new(SRV) }, - TypeSSHFP: func() RR { return new(SSHFP) }, - TypeTA: func() RR { return new(TA) }, - TypeTALINK: func() RR { return new(TALINK) }, - TypeTKEY: func() RR { return new(TKEY) }, - TypeTLSA: func() RR { return new(TLSA) }, - TypeTSIG: func() RR { return new(TSIG) }, - TypeTXT: func() RR { return new(TXT) }, - TypeUID: func() RR { return new(UID) }, - TypeUINFO: func() RR { return new(UINFO) }, - TypeURI: func() RR { return new(URI) }, - TypeX25: func() RR { return new(X25) }, -} - -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ - TypeA: "A", - TypeAAAA: "AAAA", - TypeAFSDB: "AFSDB", - TypeANY: "ANY", - TypeATMA: "ATMA", - TypeAVC: "AVC", - TypeAXFR: "AXFR", - TypeCAA: "CAA", - TypeCDNSKEY: "CDNSKEY", - TypeCDS: "CDS", - TypeCERT: "CERT", - TypeCNAME: "CNAME", - TypeDHCID: "DHCID", - TypeDLV: "DLV", - TypeDNAME: "DNAME", - TypeDNSKEY: "DNSKEY", - TypeDS: "DS", - TypeEID: "EID", - TypeEUI48: "EUI48", - TypeEUI64: "EUI64", - TypeGID: "GID", - TypeGPOS: "GPOS", - TypeHINFO: "HINFO", - TypeHIP: "HIP", - TypeISDN: "ISDN", - TypeIXFR: "IXFR", - TypeKEY: "KEY", - TypeKX: "KX", - TypeL32: "L32", - TypeL64: "L64", - TypeLOC: "LOC", - TypeLP: "LP", - TypeMAILA: "MAILA", - TypeMAILB: "MAILB", - TypeMB: "MB", - TypeMD: "MD", - TypeMF: "MF", - TypeMG: "MG", - TypeMINFO: "MINFO", - TypeMR: "MR", - TypeMX: "MX", - TypeNAPTR: "NAPTR", - TypeNID: "NID", - TypeNIMLOC: "NIMLOC", - TypeNINFO: "NINFO", - TypeNS: "NS", - TypeNSEC: "NSEC", - TypeNSEC3: "NSEC3", - TypeNSEC3PARAM: "NSEC3PARAM", - TypeNULL: "NULL", - TypeNXT: "NXT", - TypeNone: "None", - TypeOPENPGPKEY: "OPENPGPKEY", - TypeOPT: "OPT", - TypePTR: "PTR", - TypePX: "PX", - TypeRKEY: "RKEY", - TypeRP: "RP", - TypeRRSIG: "RRSIG", - TypeRT: "RT", - TypeReserved: "Reserved", - TypeSIG: "SIG", - TypeSMIMEA: "SMIMEA", - TypeSOA: "SOA", - TypeSPF: "SPF", - TypeSRV: "SRV", - TypeSSHFP: "SSHFP", - TypeTA: "TA", - TypeTALINK: "TALINK", - TypeTKEY: "TKEY", - TypeTLSA: "TLSA", - TypeTSIG: "TSIG", - TypeTXT: "TXT", - TypeUID: "UID", - TypeUINFO: "UINFO", - TypeUNSPEC: "UNSPEC", - TypeURI: "URI", - TypeX25: "X25", - TypeNSAPPTR: "NSAP-PTR", -} - -// Header() functions -func (rr *A) Header() *RR_Header { return &rr.Hdr } -func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } -func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } -func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *AVC) Header() *RR_Header { return &rr.Hdr } -func (rr *CAA) Header() *RR_Header { return &rr.Hdr } -func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *CDS) Header() *RR_Header { return &rr.Hdr } -func (rr *CERT) Header() *RR_Header { return &rr.Hdr } -func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } -func (rr *DLV) Header() *RR_Header { return &rr.Hdr } -func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *DS) Header() *RR_Header { return &rr.Hdr } -func (rr *EID) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } -func (rr *GID) Header() *RR_Header { return &rr.Hdr } -func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } -func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *HIP) Header() *RR_Header { return &rr.Hdr } -func (rr *KEY) Header() *RR_Header { return &rr.Hdr } -func (rr *KX) Header() *RR_Header { return &rr.Hdr } -func (rr *L32) Header() *RR_Header { return &rr.Hdr } -func (rr *L64) Header() *RR_Header { return &rr.Hdr } -func (rr *LOC) Header() *RR_Header { return &rr.Hdr } -func (rr *LP) Header() *RR_Header { return &rr.Hdr } -func (rr *MB) Header() *RR_Header { return &rr.Hdr } -func (rr *MD) Header() *RR_Header { return &rr.Hdr } -func (rr *MF) Header() *RR_Header { return &rr.Hdr } -func (rr *MG) Header() *RR_Header { return &rr.Hdr } -func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *MR) Header() *RR_Header { return &rr.Hdr } -func (rr *MX) Header() *RR_Header { return &rr.Hdr } -func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NID) Header() *RR_Header { return &rr.Hdr } -func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } -func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *NS) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } -func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *OPT) Header() *RR_Header { return &rr.Hdr } -func (rr *PTR) Header() *RR_Header { return &rr.Hdr } -func (rr *PX) Header() *RR_Header { return &rr.Hdr } -func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } -func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *RP) Header() *RR_Header { return &rr.Hdr } -func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *RT) Header() *RR_Header { return &rr.Hdr } -func (rr *SIG) Header() *RR_Header { return &rr.Hdr } -func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } -func (rr *SOA) Header() *RR_Header { return &rr.Hdr } -func (rr *SPF) Header() *RR_Header { return &rr.Hdr } -func (rr *SRV) Header() *RR_Header { return &rr.Hdr } -func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } -func (rr *TA) Header() *RR_Header { return &rr.Hdr } -func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } -func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } -func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *TXT) Header() *RR_Header { return &rr.Hdr } -func (rr *UID) Header() *RR_Header { return &rr.Hdr } -func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *URI) Header() *RR_Header { return &rr.Hdr } -func (rr *X25) Header() *RR_Header { return &rr.Hdr } - -// len() functions -func (rr *A) len() int { - l := rr.Hdr.len() - l += net.IPv4len // A - return l -} -func (rr *AAAA) len() int { - l := rr.Hdr.len() - l += net.IPv6len // AAAA - return l -} -func (rr *AFSDB) len() int { - l := rr.Hdr.len() - l += 2 // Subtype - l += len(rr.Hostname) + 1 - return l -} -func (rr *ANY) len() int { - l := rr.Hdr.len() - return l -} -func (rr *AVC) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *CAA) len() int { - l := rr.Hdr.len() - l++ // Flag - l += len(rr.Tag) + 1 - l += len(rr.Value) - return l -} -func (rr *CERT) len() int { - l := rr.Hdr.len() - l += 2 // Type - l += 2 // KeyTag - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) - return l -} -func (rr *CNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 - return l -} -func (rr *DHCID) len() int { - l := rr.Hdr.len() - l += base64.StdEncoding.DecodedLen(len(rr.Digest)) - return l -} -func (rr *DNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 - return l -} -func (rr *DNSKEY) len() int { - l := rr.Hdr.len() - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *DS) len() int { - l := rr.Hdr.len() - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *EID) len() int { - l := rr.Hdr.len() - l += len(rr.Endpoint)/2 + 1 - return l -} -func (rr *EUI48) len() int { - l := rr.Hdr.len() - l += 6 // Address - return l -} -func (rr *EUI64) len() int { - l := rr.Hdr.len() - l += 8 // Address - return l -} -func (rr *GID) len() int { - l := rr.Hdr.len() - l += 4 // Gid - return l -} -func (rr *GPOS) len() int { - l := rr.Hdr.len() - l += len(rr.Longitude) + 1 - l += len(rr.Latitude) + 1 - l += len(rr.Altitude) + 1 - return l -} -func (rr *HINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Cpu) + 1 - l += len(rr.Os) + 1 - return l -} -func (rr *HIP) len() int { - l := rr.Hdr.len() - l++ // HitLength - l++ // PublicKeyAlgorithm - l += 2 // PublicKeyLength - l += len(rr.Hit)/2 + 1 - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - for _, x := range rr.RendezvousServers { - l += len(x) + 1 - } - return l -} -func (rr *KX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Exchanger) + 1 - return l -} -func (rr *L32) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += net.IPv4len // Locator32 - return l -} -func (rr *L64) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += 8 // Locator64 - return l -} -func (rr *LOC) len() int { - l := rr.Hdr.len() - l++ // Version - l++ // Size - l++ // HorizPre - l++ // VertPre - l += 4 // Latitude - l += 4 // Longitude - l += 4 // Altitude - return l -} -func (rr *LP) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Fqdn) + 1 - return l -} -func (rr *MB) len() int { - l := rr.Hdr.len() - l += len(rr.Mb) + 1 - return l -} -func (rr *MD) len() int { - l := rr.Hdr.len() - l += len(rr.Md) + 1 - return l -} -func (rr *MF) len() int { - l := rr.Hdr.len() - l += len(rr.Mf) + 1 - return l -} -func (rr *MG) len() int { - l := rr.Hdr.len() - l += len(rr.Mg) + 1 - return l -} -func (rr *MINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Rmail) + 1 - l += len(rr.Email) + 1 - return l -} -func (rr *MR) len() int { - l := rr.Hdr.len() - l += len(rr.Mr) + 1 - return l -} -func (rr *MX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Mx) + 1 - return l -} -func (rr *NAPTR) len() int { - l := rr.Hdr.len() - l += 2 // Order - l += 2 // Preference - l += len(rr.Flags) + 1 - l += len(rr.Service) + 1 - l += len(rr.Regexp) + 1 - l += len(rr.Replacement) + 1 - return l -} -func (rr *NID) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += 8 // NodeID - return l -} -func (rr *NIMLOC) len() int { - l := rr.Hdr.len() - l += len(rr.Locator)/2 + 1 - return l -} -func (rr *NINFO) len() int { - l := rr.Hdr.len() - for _, x := range rr.ZSData { - l += len(x) + 1 - } - return l -} -func (rr *NS) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 - return l -} -func (rr *NSAPPTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 - return l -} -func (rr *NSEC3PARAM) len() int { - l := rr.Hdr.len() - l++ // Hash - l++ // Flags - l += 2 // Iterations - l++ // SaltLength - l += len(rr.Salt)/2 + 1 - return l -} -func (rr *OPENPGPKEY) len() int { - l := rr.Hdr.len() - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *PTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 - return l -} -func (rr *PX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Map822) + 1 - l += len(rr.Mapx400) + 1 - return l -} -func (rr *RFC3597) len() int { - l := rr.Hdr.len() - l += len(rr.Rdata)/2 + 1 - return l -} -func (rr *RKEY) len() int { - l := rr.Hdr.len() - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *RP) len() int { - l := rr.Hdr.len() - l += len(rr.Mbox) + 1 - l += len(rr.Txt) + 1 - return l -} -func (rr *RRSIG) len() int { - l := rr.Hdr.len() - l += 2 // TypeCovered - l++ // Algorithm - l++ // Labels - l += 4 // OrigTtl - l += 4 // Expiration - l += 4 // Inception - l += 2 // KeyTag - l += len(rr.SignerName) + 1 - l += base64.StdEncoding.DecodedLen(len(rr.Signature)) - return l -} -func (rr *RT) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Host) + 1 - return l -} -func (rr *SMIMEA) len() int { - l := rr.Hdr.len() - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *SOA) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 - l += len(rr.Mbox) + 1 - l += 4 // Serial - l += 4 // Refresh - l += 4 // Retry - l += 4 // Expire - l += 4 // Minttl - return l -} -func (rr *SPF) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *SRV) len() int { - l := rr.Hdr.len() - l += 2 // Priority - l += 2 // Weight - l += 2 // Port - l += len(rr.Target) + 1 - return l -} -func (rr *SSHFP) len() int { - l := rr.Hdr.len() - l++ // Algorithm - l++ // Type - l += len(rr.FingerPrint)/2 + 1 - return l -} -func (rr *TA) len() int { - l := rr.Hdr.len() - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *TALINK) len() int { - l := rr.Hdr.len() - l += len(rr.PreviousName) + 1 - l += len(rr.NextName) + 1 - return l -} -func (rr *TKEY) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 - l += 4 // Inception - l += 4 // Expiration - l += 2 // Mode - l += 2 // Error - l += 2 // KeySize - l += len(rr.Key) + 1 - l += 2 // OtherLen - l += len(rr.OtherData) + 1 - return l -} -func (rr *TLSA) len() int { - l := rr.Hdr.len() - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *TSIG) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 - l += 6 // TimeSigned - l += 2 // Fudge - l += 2 // MACSize - l += len(rr.MAC)/2 + 1 - l += 2 // OrigId - l += 2 // Error - l += 2 // OtherLen - l += len(rr.OtherData)/2 + 1 - return l -} -func (rr *TXT) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *UID) len() int { - l := rr.Hdr.len() - l += 4 // Uid - return l -} -func (rr *UINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Uinfo) + 1 - return l -} -func (rr *URI) len() int { - l := rr.Hdr.len() - l += 2 // Priority - l += 2 // Weight - l += len(rr.Target) - return l -} -func (rr *X25) len() int { - l := rr.Hdr.len() - l += len(rr.PSDNAddress) + 1 - return l -} - -// copy() functions -func (rr *A) copy() RR { - return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} -} -func (rr *AAAA) copy() RR { - return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} -} -func (rr *AFSDB) copy() RR { - return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} -} -func (rr *ANY) copy() RR { - return &ANY{*rr.Hdr.copyHeader()} -} -func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{*rr.Hdr.copyHeader(), Txt} -} -func (rr *CAA) copy() RR { - return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} -} -func (rr *CERT) copy() RR { - return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} -} -func (rr *CNAME) copy() RR { - return &CNAME{*rr.Hdr.copyHeader(), rr.Target} -} -func (rr *DHCID) copy() RR { - return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} -} -func (rr *DNAME) copy() RR { - return &DNAME{*rr.Hdr.copyHeader(), rr.Target} -} -func (rr *DNSKEY) copy() RR { - return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *DS) copy() RR { - return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *EID) copy() RR { - return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} -} -func (rr *EUI48) copy() RR { - return &EUI48{*rr.Hdr.copyHeader(), rr.Address} -} -func (rr *EUI64) copy() RR { - return &EUI64{*rr.Hdr.copyHeader(), rr.Address} -} -func (rr *GID) copy() RR { - return &GID{*rr.Hdr.copyHeader(), rr.Gid} -} -func (rr *GPOS) copy() RR { - return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} -} -func (rr *HINFO) copy() RR { - return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} -} -func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} -} -func (rr *KX) copy() RR { - return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} -} -func (rr *L32) copy() RR { - return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} -} -func (rr *L64) copy() RR { - return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} -} -func (rr *LOC) copy() RR { - return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} -} -func (rr *LP) copy() RR { - return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} -} -func (rr *MB) copy() RR { - return &MB{*rr.Hdr.copyHeader(), rr.Mb} -} -func (rr *MD) copy() RR { - return &MD{*rr.Hdr.copyHeader(), rr.Md} -} -func (rr *MF) copy() RR { - return &MF{*rr.Hdr.copyHeader(), rr.Mf} -} -func (rr *MG) copy() RR { - return &MG{*rr.Hdr.copyHeader(), rr.Mg} -} -func (rr *MINFO) copy() RR { - return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} -} -func (rr *MR) copy() RR { - return &MR{*rr.Hdr.copyHeader(), rr.Mr} -} -func (rr *MX) copy() RR { - return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} -} -func (rr *NAPTR) copy() RR { - return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} -} -func (rr *NID) copy() RR { - return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} -} -func (rr *NIMLOC) copy() RR { - return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} -} -func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{*rr.Hdr.copyHeader(), ZSData} -} -func (rr *NS) copy() RR { - return &NS{*rr.Hdr.copyHeader(), rr.Ns} -} -func (rr *NSAPPTR) copy() RR { - return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} -} -func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} -} -func (rr *OPENPGPKEY) copy() RR { - return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} -} -func (rr *OPT) copy() RR { - Option := make([]EDNS0, len(rr.Option)) - copy(Option, rr.Option) - return &OPT{*rr.Hdr.copyHeader(), Option} -} -func (rr *PTR) copy() RR { - return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} -} -func (rr *PX) copy() RR { - return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} -} -func (rr *RFC3597) copy() RR { - return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} -} -func (rr *RKEY) copy() RR { - return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *RP) copy() RR { - return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} -} -func (rr *RRSIG) copy() RR { - return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} -} -func (rr *RT) copy() RR { - return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} -} -func (rr *SMIMEA) copy() RR { - return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *SOA) copy() RR { - return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} -} -func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{*rr.Hdr.copyHeader(), Txt} -} -func (rr *SRV) copy() RR { - return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} -} -func (rr *SSHFP) copy() RR { - return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} -} -func (rr *TA) copy() RR { - return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *TALINK) copy() RR { - return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} -} -func (rr *TKEY) copy() RR { - return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} -} -func (rr *TLSA) copy() RR { - return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *TSIG) copy() RR { - return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} -} -func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{*rr.Hdr.copyHeader(), Txt} -} -func (rr *UID) copy() RR { - return &UID{*rr.Hdr.copyHeader(), rr.Uid} -} -func (rr *UINFO) copy() RR { - return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} -} -func (rr *URI) copy() RR { - return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} -} -func (rr *X25) copy() RR { - return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitcookies.enc b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitcookies.enc deleted file mode 100644 index 09c303c94..000000000 Binary files a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitcookies.enc and /dev/null differ diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitignore b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitignore deleted file mode 100644 index 74d32f0ab..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -lego.exe -lego -.lego -.idea diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.travis.yml b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.travis.yml deleted file mode 100644 index ff9ae963a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go: -- 1.7 -- 1.8 -- tip -services: - - memcached -env: - - MEMCACHED_HOSTS=localhost:11211 -install: -- go get -t ./... -script: -- go vet ./... -- go test -v ./... -before_install: -- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_26c593b079d9_key -iv $encrypted_26c593b079d9_iv -in .gitcookies.enc -out .gitcookies -d || true' diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CHANGELOG.md b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CHANGELOG.md deleted file mode 100644 index c43c4a936..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CHANGELOG.md +++ /dev/null @@ -1,94 +0,0 @@ -# Changelog - -## [0.3.1] - 2016-04-19 - -### Added: -- lib: A new DNS provider for Vultr. - -### Fixed: -- lib: DNS Provider for DigitalOcean could not handle subdomains properly. -- lib: handleHTTPError should only try to JSON decode error messages with the right content type. -- lib: The propagation checker for the DNS challenge would not retry on send errors. - - -## [0.3.0] - 2016-03-19 - -### Added: -- CLI: The `--dns` switch. To include the DNS challenge for consideration. When using this switch, all other solvers are disabled. Supported are the following solvers: cloudflare, digitalocean, dnsimple, dyn, gandi, googlecloud, namecheap, route53, rfc2136 and manual. -- CLI: The `--accept-tos` switch. Indicates your acceptance of the Let's Encrypt terms of service without prompting you. -- CLI: The `--webroot` switch. The HTTP-01 challenge may now be completed by dropping a file into a webroot. When using this switch, all other solvers are disabled. -- CLI: The `--key-type` switch. This replaces the `--rsa-key-size` switch and supports the following key types: EC256, EC384, RSA2048, RSA4096 and RSA8192. -- CLI: The `--dnshelp` switch. This displays a more in-depth help topic for DNS solvers. -- CLI: The `--no-bundle` sub switch for the `run` and `renew` commands. When this switch is set, the CLI will not bundle the issuer certificate with your certificate. -- lib: A new type for challenge identifiers `Challenge` -- lib: A new interface for custom challenge providers `acme.ChallengeProvider` -- lib: A new interface for DNS-01 providers to allow for custom timeouts for the validation function `acme.ChallengeProviderTimeout` -- lib: SetChallengeProvider function. Pass a challenge identifier and a Provider to replace the default behaviour of a challenge. -- lib: The DNS-01 challenge has been implemented with modular solvers using the `ChallengeProvider` interface. Included solvers are: cloudflare, digitalocean, dnsimple, gandi, namecheap, route53, rfc2136 and manual. -- lib: The `acme.KeyType` type was added and is used for the configuration of crypto parameters for RSA and EC keys. Valid KeyTypes are: EC256, EC384, RSA2048, RSA4096 and RSA8192. - -### Changed -- lib: ExcludeChallenges now expects to be passed an array of `Challenge` types. -- lib: HTTP-01 now supports custom solvers using the `ChallengeProvider` interface. -- lib: TLS-SNI-01 now supports custom solvers using the `ChallengeProvider` interface. -- lib: The `GetPrivateKey` function in the `acme.User` interface is now expected to return a `crypto.PrivateKey` instead of an `rsa.PrivateKey` for EC compat. -- lib: The `acme.NewClient` function now expects an `acme.KeyType` instead of the keyBits parameter. - -### Removed -- CLI: The `rsa-key-size` switch was removed in favor of `key-type` to support EC keys. - -### Fixed -- lib: Fixed a race condition in HTTP-01 -- lib: Fixed an issue where status codes on ACME challenge responses could lead to no action being taken. -- lib: Fixed a regression when calling the Renew function with a SAN certificate. - -## [0.2.0] - 2016-01-09 - -### Added: -- CLI: The `--exclude` or `-x` switch. To exclude a challenge from being solved. -- CLI: The `--http` switch. To set the listen address and port of HTTP based challenges. Supports `host:port` and `:port` for any interface. -- CLI: The `--tls` switch. To set the listen address and port of TLS based challenges. Supports `host:port` and `:port` for any interface. -- CLI: The `--reuse-key` switch for the `renew` operation. This lets you reuse an existing private key for renewals. -- lib: ExcludeChallenges function. Pass an array of challenge identifiers to exclude them from solving. -- lib: SetHTTPAddress function. Pass a port to set the listen port for HTTP based challenges. -- lib: SetTLSAddress function. Pass a port to set the listen port of TLS based challenges. -- lib: acme.UserAgent variable. Use this to customize the user agent on all requests sent by lego. - -### Changed: -- lib: NewClient does no longer accept the optPort parameter -- lib: ObtainCertificate now returns a SAN certificate if you pass more then one domain. -- lib: GetOCSPForCert now returns the parsed OCSP response instead of just the status. -- lib: ObtainCertificate has a new parameter `privKey crypto.PrivateKey` which lets you reuse an existing private key for new certificates. -- lib: RenewCertificate now expects the PrivateKey property of the CertificateResource to be set only if you want to reuse the key. - -### Removed: -- CLI: The `--port` switch was removed. -- lib: RenewCertificate does no longer offer to also revoke your old certificate. - -### Fixed: -- CLI: Fix logic using the `--days` parameter for renew - -## [0.1.1] - 2015-12-18 - -### Added: -- CLI: Added a way to automate renewal through a cronjob using the --days parameter to renew - -### Changed: -- lib: Improved log output on challenge failures. - -### Fixed: -- CLI: The short parameter for domains would not get accepted -- CLI: The cli did not return proper exit codes on error library errors. -- lib: RenewCertificate did not properly renew SAN certificates. - -### Security -- lib: Fix possible DOS on GetOCSPForCert - -## [0.1.0] - 2015-12-03 -- Initial release - -[0.3.1]: https://github.com/xenolf/lego/compare/v0.3.0...v0.3.1 -[0.3.0]: https://github.com/xenolf/lego/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/xenolf/lego/compare/v0.1.1...v0.2.0 -[0.1.1]: https://github.com/xenolf/lego/compare/v0.1.0...v0.1.1 -[0.1.0]: https://github.com/xenolf/lego/tree/v0.1.0 diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CONTRIBUTING.md b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CONTRIBUTING.md deleted file mode 100644 index 9939a5ab3..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/CONTRIBUTING.md +++ /dev/null @@ -1,32 +0,0 @@ -# How to contribute to lego - -Contributions in the form of patches and proposals are essential to keep lego great and to make it even better. -To ensure a great and easy experience for everyone, please review the few guidelines in this document. - -## Bug reports - -- Use the issue search to see if the issue has already been reported. -- Also look for closed issues to see if your issue has already been fixed. -- If both of the above do not apply create a new issue and include as much information as possible. - -Bug reports should include all information a person could need to reproduce your problem without the need to -follow up for more information. If possible, provide detailed steps for us to reproduce it, the expected behaviour and the actual behaviour. - -## Feature proposals and requests - -Feature requests are welcome and should be discussed in an issue. -Please keep proposals focused on one thing at a time and be as detailed as possible. -It is up to you to make a strong point about your proposal and convince us of the merits and the added complexity of this feature. - -## Pull requests - -Patches, new features and improvements are a great way to help the project. -Please keep them focused on one thing and do not include unrelated commits. - -All pull requests which alter the behaviour of the program, add new behaviour or somehow alter code in a non-trivial way should **always** include tests. - -If you want to contribute a significant pull request (with a non-trivial workload for you) please **ask first**. We do not want you to spend -a lot of time on something the project's developers might not want to merge into the project. - -**IMPORTANT**: By submitting a patch, you agree to allow the project -owners to license your work under the terms of the [MIT License](LICENSE). diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/Dockerfile b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/Dockerfile deleted file mode 100644 index c03964076..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine:3.4 - -ENV GOPATH /go - -RUN apk update && apk add ca-certificates go git && \ - rm -rf /var/cache/apk/* && \ - go get -u github.com/xenolf/lego && \ - cd /go/src/github.com/xenolf/lego && \ - go build -o /usr/bin/lego . && \ - apk del go git && \ - rm -rf /var/cache/apk/* && \ - rm -rf /go - -ENTRYPOINT [ "/usr/bin/lego" ] diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE deleted file mode 100644 index 17460b716..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Sebastian Erhart - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/README.md deleted file mode 100644 index 5dc9d550d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/README.md +++ /dev/null @@ -1,267 +0,0 @@ -# lego -Let's Encrypt client and ACME library written in Go - -[![GoDoc](https://godoc.org/github.com/xenolf/lego/acme?status.svg)](https://godoc.org/github.com/xenolf/lego/acme) -[![Build Status](https://travis-ci.org/xenolf/lego.svg?branch=master)](https://travis-ci.org/xenolf/lego) -[![Dev Chat](https://img.shields.io/badge/dev%20chat-gitter-blue.svg?label=dev+chat)](https://gitter.im/xenolf/lego) - -#### General -This is a work in progress. Please do *NOT* run this on a production server and please report any bugs you find! - -#### Installation -lego supports both binary installs and install from source. - -To get the binary just download the latest release for your OS/Arch from [the release page](https://github.com/xenolf/lego/releases) -and put the binary somewhere convenient. lego does not assume anything about the location you run it from. - -To install from source, just run -``` -go get -u github.com/xenolf/lego -``` - -To build lego inside a Docker container, just run -``` -docker build -t lego . -``` -##### From the package manager -- [ArchLinux (AUR)](https://aur.archlinux.org/packages/lego-git): -``` -yaourt -S lego-git -``` -#### Features - -- Register with CA -- Obtain certificates, both from scratch or with an existing CSR -- Renew certificates -- Revoke certificates -- Robust implementation of all ACME challenges - - HTTP (http-01) - - TLS with Server Name Indication (tls-sni-01) - - DNS (dns-01) -- SAN certificate support -- Comes with multiple optional [DNS providers](https://github.com/xenolf/lego/tree/master/providers/dns) -- [Custom challenge solvers](https://github.com/xenolf/lego/wiki/Writing-a-Challenge-Solver) -- Certificate bundling -- OCSP helper function - -Please keep in mind that CLI switches and APIs are still subject to change. - -When using the standard `--path` option, all certificates and account configurations are saved to a folder *.lego* in the current working directory. - -#### Sudo -The CLI does not require root permissions but needs to bind to port 80 and 443 for certain challenges. -To run the CLI without sudo, you have four options: - -- Use setcap 'cap_net_bind_service=+ep' /path/to/program -- Pass the `--http` or/and the `--tls` option and specify a custom port to bind to. In this case you have to forward port 80/443 to these custom ports (see [Port Usage](#port-usage)). -- Pass the `--webroot` option and specify the path to your webroot folder. In this case the challenge will be written in a file in `.well-known/acme-challenge/` inside your webroot. -- Pass the `--dns` option and specify a DNS provider. - -#### Port Usage -By default lego assumes it is able to bind to ports 80 and 443 to solve challenges. -If this is not possible in your environment, you can use the `--http` and `--tls` options to instruct -lego to listen on that interface:port for any incoming challenges. - -If you are using this option, make sure you proxy all of the following traffic to these ports. - -HTTP Port: -- All plaintext HTTP requests to port 80 which begin with a request path of `/.well-known/acme-challenge/` for the HTTP challenge. - -TLS Port: -- All TLS handshakes on port 443 for the TLS-SNI challenge. - -This traffic redirection is only needed as long as lego solves challenges. As soon as you have received your certificates you can deactivate the forwarding. - -#### Usage - -``` -NAME: - lego - Let's Encrypt client written in Go - -USAGE: - lego [global options] command [command options] [arguments...] - -VERSION: - 0.3.1 - -COMMANDS: - run Register an account, then create and install a certificate - revoke Revoke a certificate - renew Renew a certificate - dnshelp Shows additional help for the --dns global option - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --domains, -d [--domains option --domains option] Add domains to the process - --csr, -c Certificate signing request filename, if an external CSR is to be used - --server, -s "https://acme-v01.api.letsencrypt.org/directory" CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client. - --email, -m Email used for registration and recovery contact. - --accept-tos, -a By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service. - --key-type, -k "rsa2048" Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384 - --path "${CWD}/.lego" Directory to use for storing the data - --exclude, -x [--exclude option --exclude option] Explicitly disallow solvers by name from being used. Solvers: "http-01", "tls-sni-01". - --webroot Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge - --http Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port - --tls Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port - --dns Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage. - --help, -h show help - --version, -v print the version -``` - -##### CLI Example - -Assumes the `lego` binary has permission to bind to ports 80 and 443. You can get a pre-built binary from the [releases](https://github.com/xenolf/lego/releases) page. -If your environment does not allow you to bind to these ports, please read [Port Usage](#port-usage). - -Obtain a certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" run -``` - -(Find your certificate in the `.lego` folder of current working directory.) - -To renew the certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" renew -``` - -To renew the certificate only if it's older than 30 days - -```bash -$ lego --email="foo@bar.com" --domains="example.com" renew --days 30 -``` - -Obtain a certificate using the DNS challenge and AWS Route 53: - -```bash -$ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=my_id AWS_SECRET_ACCESS_KEY=my_key lego --email="foo@bar.com" --domains="example.com" --dns="route53" run -``` - -Note that `--dns=foo` implies `--exclude=http-01` and `--exclude=tls-sni-01`. lego will not attempt other challenges if you've told it to use DNS instead. - -Obtain a certificate given a certificate signing request (CSR) generated by something else: - -```bash -$ lego --email="foo@bar.com" --csr=/path/to/csr.pem run -``` - -(lego will infer the domains to be validated based on the contents of the CSR, so make sure the CSR's Common Name and optional SubjectAltNames are set correctly.) - -lego defaults to communicating with the production Let's Encrypt ACME server. If you'd like to test something without issuing real certificates, consider using the staging endpoint instead: - -```bash -$ lego --server=https://acme-staging.api.letsencrypt.org/directory … -``` - -#### DNS Challenge API Details - -##### AWS Route 53 - -The following AWS IAM policy document describes the permissions required for lego to complete the DNS challenge. -Replace `` with the Route 53 zone ID of the domain you are authorizing. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "route53:GetChange", - "route53:ListHostedZonesByName" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:ChangeResourceRecordSets" - ], - "Resource": [ - "arn:aws:route53:::hostedzone/" - ] - } - ] -} -``` - -#### ACME Library Usage - -A valid, but bare-bones example use of the acme package: - -```go -// You'll need a user or account type that implements acme.User -type MyUser struct { - Email string - Registration *acme.RegistrationResource - key crypto.PrivateKey -} -func (u MyUser) GetEmail() string { - return u.Email -} -func (u MyUser) GetRegistration() *acme.RegistrationResource { - return u.Registration -} -func (u MyUser) GetPrivateKey() crypto.PrivateKey { - return u.key -} - -// Create a user. New accounts need an email and private key to start. -const rsaKeySize = 2048 -privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize) -if err != nil { - log.Fatal(err) -} -myUser := MyUser{ - Email: "you@yours.com", - key: privateKey, -} - -// A client facilitates communication with the CA server. This CA URL is -// configured for a local dev instance of Boulder running in Docker in a VM. -client, err := acme.NewClient("http://192.168.99.100:4000", &myUser, acme.RSA2048) -if err != nil { - log.Fatal(err) -} - -// We specify an http port of 5002 and an tls port of 5001 on all interfaces -// because we aren't running as root and can't bind a listener to port 80 and 443 -// (used later when we attempt to pass challenges). Keep in mind that we still -// need to proxy challenge traffic to port 5002 and 5001. -client.SetHTTPAddress(":5002") -client.SetTLSAddress(":5001") - -// New users will need to register -reg, err := client.Register() -if err != nil { - log.Fatal(err) -} -myUser.Registration = reg - -// SAVE THE USER. - -// The client has a URL to the current Let's Encrypt Subscriber -// Agreement. The user will need to agree to it. -err = client.AgreeToTOS() -if err != nil { - log.Fatal(err) -} - -// The acme library takes care of completing the challenges to obtain the certificate(s). -// The domains must resolve to this machine or you have to use the DNS challenge. -bundle := false -certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil, false) -if len(failures) > 0 { - log.Fatal(failures) -} - -// Each certificate comes back with the cert bytes, the bytes of the client's -// private key, and a certificate URL. SAVE THESE TO DISK. -fmt.Printf("%#v\n", certificates) - -// ... all done. -``` diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go deleted file mode 100644 index 857900507..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go +++ /dev/null @@ -1,16 +0,0 @@ -package acme - -// Challenge is a string that identifies a particular type and version of ACME challenge. -type Challenge string - -const ( - // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http - // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge - HTTP01 = Challenge("http-01") - // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni - // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge - TLSSNI01 = Challenge("tls-sni-01") - // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns - // Note: DNS01Record returns a DNS record which will fulfill this challenge - DNS01 = Challenge("dns-01") -) diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go deleted file mode 100644 index bcb844371..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go +++ /dev/null @@ -1,825 +0,0 @@ -// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. -package acme - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "regexp" - "strconv" - "strings" - "time" -) - -var ( - // Logger is an optional custom logger. - Logger *log.Logger -) - -const ( - // maxBodySize is the maximum size of body that we will read. - maxBodySize = 1024 * 1024 - - // overallRequestLimit is the overall number of request per second limited on the - // “new-regâ€, “new-authz†and “new-cert†endpoints. From the documentation the - // limitation is 20 requests per second, but using 20 as value doesn't work but 18 do - overallRequestLimit = 18 -) - -// logf writes a log entry. It uses Logger if not -// nil, otherwise it uses the default log.Logger. -func logf(format string, args ...interface{}) { - if Logger != nil { - Logger.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// User interface is to be implemented by users of this library. -// It is used by the client type to get user specific information. -type User interface { - GetEmail() string - GetRegistration() *RegistrationResource - GetPrivateKey() crypto.PrivateKey -} - -// Interface for all challenge solvers to implement. -type solver interface { - Solve(challenge challenge, domain string) error -} - -type validateFunc func(j *jws, domain, uri string, chlng challenge) error - -// Client is the user-friendy way to ACME -type Client struct { - directory directory - user User - jws *jws - keyType KeyType - solvers map[Challenge]solver -} - -// NewClient creates a new ACME client on behalf of the user. The client will depend on -// the ACME directory located at caDirURL for the rest of its actions. A private -// key of type keyType (see KeyType contants) will be generated when requesting a new -// certificate if one isn't provided. -func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { - privKey := user.GetPrivateKey() - if privKey == nil { - return nil, errors.New("private key was nil") - } - - var dir directory - if _, err := getJSON(caDirURL, &dir); err != nil { - return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) - } - - if dir.NewRegURL == "" { - return nil, errors.New("directory missing new registration URL") - } - if dir.NewAuthzURL == "" { - return nil, errors.New("directory missing new authz URL") - } - if dir.NewCertURL == "" { - return nil, errors.New("directory missing new certificate URL") - } - if dir.RevokeCertURL == "" { - return nil, errors.New("directory missing revoke certificate URL") - } - - jws := &jws{privKey: privKey, directoryURL: caDirURL} - - // REVIEW: best possibility? - // Add all available solvers with the right index as per ACME - // spec to this map. Otherwise they won`t be found. - solvers := make(map[Challenge]solver) - solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} - solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} - - return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil -} - -// SetChallengeProvider specifies a custom provider p that can solve the given challenge type. -func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { - switch challenge { - case HTTP01: - c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} - case TLSSNI01: - c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} - case DNS01: - c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p} - default: - return fmt.Errorf("Unknown challenge %v", challenge) - } - return nil -} - -// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. -// If this option is not used, the default port 80 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -// -// NOTE: This REPLACES any custom HTTP provider previously set by calling -// c.SetChallengeProvider with the default HTTP challenge provider. -func (c *Client) SetHTTPAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[HTTP01]; ok { - chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) - } - - return nil -} - -// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. -// If this option is not used, the default port 443 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -// -// NOTE: This REPLACES any custom TLS-SNI provider previously set by calling -// c.SetChallengeProvider with the default TLS-SNI challenge provider. -func (c *Client) SetTLSAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[TLSSNI01]; ok { - chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) - } - return nil -} - -// ExcludeChallenges explicitly removes challenges from the pool for solving. -func (c *Client) ExcludeChallenges(challenges []Challenge) { - // Loop through all challenges and delete the requested one if found. - for _, challenge := range challenges { - delete(c.solvers, challenge) - } -} - -// Register the current account to the ACME server. -func (c *Client) Register() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot register a nil client or user") - } - logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "new-reg", - } - if c.user.GetEmail() != "" { - regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} - } else { - regMsg.Contact = []string{} - } - - var serverReg Registration - var regURI string - hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) - if err != nil { - remoteErr, ok := err.(RemoteError) - if ok && remoteErr.StatusCode == 409 { - regURI = hdr.Get("Location") - regMsg = registrationMessage{ - Resource: "reg", - } - if hdr, err = postJSON(c.jws, regURI, regMsg, &serverReg); err != nil { - return nil, err - } - } else { - return nil, err - } - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - - if regURI == "" { - regURI = hdr.Get("Location") - } - reg.URI = regURI - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: The server did not return 'next' link to proceed") - } - - return reg, nil -} - -// DeleteRegistration deletes the client's user registration from the ACME -// server. -func (c *Client) DeleteRegistration() error { - if c == nil || c.user == nil { - return errors.New("acme: cannot unregister a nil client or user") - } - logf("[INFO] acme: Deleting account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "reg", - Delete: true, - } - - _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil) - if err != nil { - return err - } - - return nil -} - -// QueryRegistration runs a POST request on the client's registration and -// returns the result. -// -// This is similar to the Register function, but acting on an existing -// registration link and resource. -func (c *Client) QueryRegistration() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot query the registration of a nil client or user") - } - // Log the URL here instead of the email as the email may not be set - logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI) - - regMsg := registrationMessage{ - Resource: "reg", - } - - var serverReg Registration - hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg) - if err != nil { - return nil, err - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - // Location: header is not returned so this needs to be populated off of - // existing URI - reg.URI = c.user.GetRegistration().URI - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: No new-authz link in response to registration query") - } - - return reg, nil -} - -// AgreeToTOS updates the Client registration and sends the agreement to -// the server. -func (c *Client) AgreeToTOS() error { - reg := c.user.GetRegistration() - - reg.Body.Agreement = c.user.GetRegistration().TosURL - reg.Body.Resource = "reg" - _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) - return err -} - -// ObtainCertificateForCSR tries to obtain a certificate matching the CSR passed into it. -// The domains are inferred from the CommonName and SubjectAltNames, if any. The private key -// for this CSR is not required. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificateForCSR(csr x509.CertificateRequest, bundle bool) (CertificateResource, map[string]error) { - // figure out what domains it concerns - // start with the common name - domains := []string{csr.Subject.CommonName} - - // loop over the SubjectAltName DNS names -DNSNames: - for _, sanName := range csr.DNSNames { - for _, existingName := range domains { - if existingName == sanName { - // duplicate; skip this name - continue DNSNames - } - } - - // name is unique - domains = append(domains, sanName) - } - - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - for _, auth := range challenges { - c.disableAuthz(auth) - } - - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificateForCsr(challenges, bundle, csr.Raw, nil) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - // Add the CSR to the certificate so that it can be used for renewals. - cert.CSR = pemEncode(&csr) - - return cert, failures -} - -// ObtainCertificate tries to obtain a single certificate using all domains passed into it. -// The first domain in domains is used for the CommonName field of the certificate, all other -// domains are added using the Subject Alternate Names extension. A new private key is generated -// for every invocation of this function. If you do not want that you can supply your own private key -// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, map[string]error) { - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - for _, auth := range challenges { - c.disableAuthz(auth) - } - - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificate(challenges, bundle, privKey, mustStaple) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - return cert, failures -} - -// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. -func (c *Client) RevokeCertificate(certificate []byte) error { - certificates, err := parsePEMBundle(certificate) - if err != nil { - return err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return fmt.Errorf("Certificate bundle starts with a CA certificate") - } - - encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) - - _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) - return err -} - -// RenewCertificate takes a CertificateResource and tries to renew the certificate. -// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. -// Please be aware that this function will return a new certificate in ANY case that is not an error. -// If the server does not provide us with a new cert on a GET request to the CertURL -// this function will start a new-cert flow where a new certificate gets generated. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. -func (c *Client) RenewCertificate(cert CertificateResource, bundle, mustStaple bool) (CertificateResource, error) { - // Input certificate is PEM encoded. Decode it here as we may need the decoded - // cert later on in the renewal process. The input may be a bundle or a single certificate. - certificates, err := parsePEMBundle(cert.Certificate) - if err != nil { - return CertificateResource{}, err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) - } - - // This is just meant to be informal for the user. - timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) - logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) - - // We always need to request a new certificate to renew. - // Start by checking to see if the certificate was based off a CSR, and - // use that if it's defined. - if len(cert.CSR) > 0 { - csr, err := pemDecodeTox509CSR(cert.CSR) - if err != nil { - return CertificateResource{}, err - } - newCert, failures := c.ObtainCertificateForCSR(*csr, bundle) - return newCert, failures[cert.Domain] - } - - var privKey crypto.PrivateKey - if cert.PrivateKey != nil { - privKey, err = parsePEMPrivateKey(cert.PrivateKey) - if err != nil { - return CertificateResource{}, err - } - } - - var domains []string - var failures map[string]error - // check for SAN certificate - if len(x509Cert.DNSNames) > 1 { - domains = append(domains, x509Cert.Subject.CommonName) - for _, sanDomain := range x509Cert.DNSNames { - if sanDomain == x509Cert.Subject.CommonName { - continue - } - domains = append(domains, sanDomain) - } - } else { - domains = append(domains, x509Cert.Subject.CommonName) - } - - newCert, failures := c.ObtainCertificate(domains, bundle, privKey, mustStaple) - return newCert, failures[cert.Domain] -} - -// Looks through the challenge combinations to find a solvable match. -// Then solves the challenges in series and returns. -func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { - // loop through the resources, basically through the domains. - failures := make(map[string]error) - for _, authz := range challenges { - if authz.Body.Status == "valid" { - // Boulder might recycle recent validated authz (see issue #267) - logf("[INFO][%s] acme: Authorization already valid; skipping challenge", authz.Domain) - continue - } - // no solvers - no solving - if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { - for i, solver := range solvers { - // TODO: do not immediately fail if one domain fails to validate. - err := solver.Solve(authz.Body.Challenges[i], authz.Domain) - if err != nil { - c.disableAuthz(authz) - failures[authz.Domain] = err - } - } - } else { - c.disableAuthz(authz) - failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) - } - } - - return failures -} - -// Checks all combinations from the server and returns an array of -// solvers which should get executed in series. -func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { - for _, combination := range auth.Combinations { - solvers := make(map[int]solver) - for _, idx := range combination { - if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { - solvers[idx] = solver - } else { - logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) - } - } - - // If we can solve the whole combination, return the solvers - if len(solvers) == len(combination) { - return solvers - } - } - return nil -} - -// Get the challenges needed to proof our identifier to the ACME server. -func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { - resc, errc := make(chan authorizationResource), make(chan domainError) - - delay := time.Second / overallRequestLimit - - for _, domain := range domains { - time.Sleep(delay) - - go func(domain string) { - authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} - var authz authorization - hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) - if err != nil { - errc <- domainError{Domain: domain, Error: err} - return - } - - links := parseLinks(hdr["Link"]) - if links["next"] == "" { - logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) - errc <- domainError{Domain: domain, Error: errors.New("Server did not provide next link to proceed")} - return - } - - resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} - }(domain) - } - - responses := make(map[string]authorizationResource) - failures := make(map[string]error) - for i := 0; i < len(domains); i++ { - select { - case res := <-resc: - responses[res.Domain] = res - case err := <-errc: - failures[err.Domain] = err.Error - } - } - - challenges := make([]authorizationResource, 0, len(responses)) - for _, domain := range domains { - if challenge, ok := responses[domain]; ok { - challenges = append(challenges, challenge) - } - } - - logAuthz(challenges) - - close(resc) - close(errc) - - return challenges, failures -} - -func logAuthz(authz []authorizationResource) { - for _, auth := range authz { - logf("[INFO][%s] AuthURL: %s", auth.Domain, auth.AuthURL) - } -} - -// cleanAuthz loops through the passed in slice and disables any auths which are not "valid" -func (c *Client) disableAuthz(auth authorizationResource) error { - var disabledAuth authorization - _, err := postJSON(c.jws, auth.AuthURL, deactivateAuthMessage{Resource: "authz", Status: "deactivated"}, &disabledAuth) - return err -} - -func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, error) { - if len(authz) == 0 { - return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") - } - - var err error - if privKey == nil { - privKey, err = generatePrivateKey(c.keyType) - if err != nil { - return CertificateResource{}, err - } - } - - // determine certificate name(s) based on the authorization resources - commonName := authz[0] - var san []string - for _, auth := range authz[1:] { - san = append(san, auth.Domain) - } - - // TODO: should the CSR be customizable? - csr, err := generateCsr(privKey, commonName.Domain, san, mustStaple) - if err != nil { - return CertificateResource{}, err - } - - return c.requestCertificateForCsr(authz, bundle, csr, pemEncode(privKey)) -} - -func (c *Client) requestCertificateForCsr(authz []authorizationResource, bundle bool, csr []byte, privateKeyPem []byte) (CertificateResource, error) { - commonName := authz[0] - - var authURLs []string - for _, auth := range authz[1:] { - authURLs = append(authURLs, auth.AuthURL) - } - - csrString := base64.URLEncoding.EncodeToString(csr) - jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) - if err != nil { - return CertificateResource{}, err - } - - resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) - if err != nil { - return CertificateResource{}, err - } - - certRes := CertificateResource{ - Domain: commonName.Domain, - CertURL: resp.Header.Get("Location"), - PrivateKey: privateKeyPem, - } - - maxChecks := 1000 - for i := 0; i < maxChecks; i++ { - done, err := c.checkCertResponse(resp, &certRes, bundle) - resp.Body.Close() - if err != nil { - return CertificateResource{}, err - } - if done { - break - } - if i == maxChecks-1 { - return CertificateResource{}, fmt.Errorf("polled for certificate %d times; giving up", i) - } - resp, err = httpGet(certRes.CertURL) - if err != nil { - return CertificateResource{}, err - } - } - - return certRes, nil -} - -// checkCertResponse checks resp to see if a certificate is contained in the -// response, and if so, loads it into certRes and returns true. If the cert -// is not yet ready, it returns false. This function honors the waiting period -// required by the Retry-After header of the response, if specified. This -// function may read from resp.Body but does NOT close it. The certRes input -// should already have the Domain (common name) field populated. If bundle is -// true, the certificate will be bundled with the issuer's cert. -func (c *Client) checkCertResponse(resp *http.Response, certRes *CertificateResource, bundle bool) (bool, error) { - switch resp.StatusCode { - case 201, 202: - cert, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return false, err - } - - // The server returns a body with a length of zero if the - // certificate was not ready at the time this request completed. - // Otherwise the body is the certificate. - if len(cert) > 0 { - certRes.CertStableURL = resp.Header.Get("Content-Location") - certRes.AccountRef = c.user.GetRegistration().URI - - issuedCert := pemEncode(derCertificateBytes(cert)) - - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", certRes.Domain, err) - } else { - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - - // If bundle is true, we want to return a certificate bundle. - // To do this, we append the issuer cert to the issued cert. - if bundle { - issuedCert = append(issuedCert, issuerCert...) - } - } - - certRes.Certificate = issuedCert - certRes.IssuerCertificate = issuerCert - logf("[INFO][%s] Server responded with a certificate.", certRes.Domain) - return true, nil - } - - // The certificate was granted but is not yet issued. - // Check retry-after and loop. - ra := resp.Header.Get("Retry-After") - retryAfter, err := strconv.Atoi(ra) - if err != nil { - return false, err - } - - logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", certRes.Domain, retryAfter) - time.Sleep(time.Duration(retryAfter) * time.Second) - - return false, nil - default: - return false, handleHTTPError(resp) - } -} - -// getIssuerCertificate requests the issuer certificate -func (c *Client) getIssuerCertificate(url string) ([]byte, error) { - logf("[INFO] acme: Requesting issuer cert from %s", url) - resp, err := httpGet(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return nil, err - } - - _, err = x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, err - } - - return issuerBytes, err -} - -func parseLinks(links []string) map[string]string { - aBrkt := regexp.MustCompile("[<>]") - slver := regexp.MustCompile("(.+) *= *\"(.+)\"") - linkMap := make(map[string]string) - - for _, link := range links { - - link = aBrkt.ReplaceAllString(link, "") - parts := strings.Split(link, ";") - - matches := slver.FindStringSubmatch(parts[1]) - if len(matches) > 0 { - linkMap[matches[2]] = parts[0] - } - } - - return linkMap -} - -// validate makes the ACME server start validating a -// challenge response, only returning once it is done. -func validate(j *jws, domain, uri string, chlng challenge) error { - var challengeResponse challenge - - hdr, err := postJSON(j, uri, chlng, &challengeResponse) - if err != nil { - return err - } - - // After the path is sent, the ACME server will access our server. - // Repeatedly check the server for an updated status on our request. - for { - switch challengeResponse.Status { - case "valid": - logf("[INFO][%s] The server validated our request", domain) - return nil - case "pending": - break - case "invalid": - return handleChallengeError(challengeResponse) - default: - return errors.New("The server returned an unexpected state.") - } - - ra, err := strconv.Atoi(hdr.Get("Retry-After")) - if err != nil { - // The ACME server MUST return a Retry-After. - // If it doesn't, we'll just poll hard. - ra = 1 - } - time.Sleep(time.Duration(ra) * time.Second) - - hdr, err = getJSON(uri, &challengeResponse) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go deleted file mode 100644 index b18334c8a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package acme - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -func TestNewClient(t *testing.T) { - keyBits := 32 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - if client.jws == nil { - t.Fatalf("Expected client.jws to not be nil") - } - if expected, actual := key, client.jws.privKey; actual != expected { - t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual) - } - - if client.keyType != keyType { - t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType) - } - - if expected, actual := 2, len(client.solvers); actual != expected { - t.Fatalf("Expected %d solver(s), got %d", expected, actual) - } -} - -func TestClientOptPort(t *testing.T) { - keyBits := 32 // small value keeps test fast - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - optPort := "1234" - optHost := "" - client, err := NewClient(ts.URL, user, RSA2048) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - httpSolver, ok := client.solvers[HTTP01].(*httpChallenge) - if !ok { - t.Fatal("Expected http-01 solver to be httpChallenge type") - } - if httpSolver.jws != client.jws { - t.Error("Expected http-01 to have same jws as client") - } - if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort { - t.Errorf("Expected http-01 to have port %s but was %s", optPort, got) - } - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - - httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge) - if !ok { - t.Fatal("Expected tls-sni-01 solver to be httpChallenge type") - } - if httpsSolver.jws != client.jws { - t.Error("Expected tls-sni-01 to have same jws as client") - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got) - } - - // test setting different host - optHost = "127.0.0.1" - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } -} - -func TestNotHoldingLockWhileMakingHTTPRequests(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(250 * time.Millisecond) - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - writeJSONResponse(w, &challenge{Type: "http-01", Status: "Valid", URI: "http://example.com/", Token: "token"}) - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - ch := make(chan bool) - resultCh := make(chan bool) - go func() { - j.Nonce() - ch <- true - }() - go func() { - j.Nonce() - ch <- true - }() - go func() { - <-ch - <-ch - resultCh <- true - }() - select { - case <-resultCh: - case <-time.After(400 * time.Millisecond): - t.Fatal("JWS is probably holding a lock while making HTTP request") - } -} - -func TestValidate(t *testing.T) { - var statuses []string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Minimal stub ACME server for validation. - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - switch r.Method { - case "HEAD": - case "POST": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - case "GET": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - default: - http.Error(w, r.Method, http.StatusMethodNotAllowed) - } - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - - tsts := []struct { - name string - statuses []string - want string - }{ - {"POST-unexpected", []string{"weird"}, "unexpected"}, - {"POST-valid", []string{"valid"}, ""}, - {"POST-invalid", []string{"invalid"}, "Error Detail"}, - {"GET-unexpected", []string{"pending", "weird"}, "unexpected"}, - {"GET-valid", []string{"pending", "valid"}, ""}, - {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"}, - } - - for _, tst := range tsts { - statuses = tst.statuses - if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } else if err != nil && !strings.Contains(err.Error(), tst.want) { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } - } -} - -func TestGetChallenges(t *testing.T) { - var ts *httptest.Server - ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET", "HEAD": - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - writeJSONResponse(w, directory{NewAuthzURL: ts.URL, NewCertURL: ts.URL, NewRegURL: ts.URL, RevokeCertURL: ts.URL}) - case "POST": - writeJSONResponse(w, authorization{}) - } - })) - defer ts.Close() - - keyBits := 512 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: &RegistrationResource{NewAuthzURL: ts.URL}, - privatekey: key, - } - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - _, failures := client.getChallenges([]string{"example.com"}) - if failures["example.com"] == nil { - t.Fatal("Expecting \"Server did not provide next link to proceed\" error, got nil") - } -} - -// writeJSONResponse marshals the body as JSON and writes it to the response. -func writeJSONResponse(w http.ResponseWriter, body interface{}) { - bs, err := json.Marshal(body) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(bs); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -// stubValidate is like validate, except it does nothing. -func stubValidate(j *jws, domain, uri string, chlng challenge) error { - return nil -} - -type mockUser struct { - email string - regres *RegistrationResource - privatekey *rsa.PrivateKey -} - -func (u mockUser) GetEmail() string { return u.email } -func (u mockUser) GetRegistration() *RegistrationResource { return u.regres } -func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey } diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go deleted file mode 100644 index fa868a90d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go +++ /dev/null @@ -1,347 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "time" - - "encoding/asn1" - - "golang.org/x/crypto/ocsp" -) - -// KeyType represents the key algo as well as the key size or curve to use. -type KeyType string -type derCertificateBytes []byte - -// Constants for all key types we support. -const ( - EC256 = KeyType("P256") - EC384 = KeyType("P384") - RSA2048 = KeyType("2048") - RSA4096 = KeyType("4096") - RSA8192 = KeyType("8192") -) - -const ( - // OCSPGood means that the certificate is valid. - OCSPGood = ocsp.Good - // OCSPRevoked means that the certificate has been deliberately revoked. - OCSPRevoked = ocsp.Revoked - // OCSPUnknown means that the OCSP responder doesn't know about the certificate. - OCSPUnknown = ocsp.Unknown - // OCSPServerFailed means that the OCSP responder failed to process the request. - OCSPServerFailed = ocsp.ServerFailed -) - -// Constants for OCSP must staple -var ( - tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} - ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05} -) - -// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { - certificates, err := parsePEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, errors.New("no OCSP server specified in cert") - } - if len(certificates) == 1 { - // TODO: build fallback. If this fails, check the remaining array entries. - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, errors.New("no issuing certificate URL") - } - - resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, err - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, err - } - - // Insert it into the slice on position 0 - // We want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - issuerCert := certificates[1] - - // Finally kick off the OCSP request. - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, err - } - - reader := bytes.NewReader(ocspReq) - req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) - if err != nil { - return nil, nil, err - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, err - } - - return ocspResBytes, ocspRes, nil -} - -func getKeyAuthorization(token string, key interface{}) (string, error) { - var publicKey crypto.PublicKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - publicKey = k.Public() - case *rsa.PrivateKey: - publicKey = k.Public() - } - - // Generate the Key Authorization for the challenge - jwk := keyAsJWK(publicKey) - if jwk == nil { - return "", errors.New("Could not generate JWK from key.") - } - thumbBytes, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - // unpad the base64URL - keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) - index := strings.Index(keyThumb, "=") - if index != -1 { - keyThumb = keyThumb[:index] - } - - return token + "." + keyThumb, nil -} - -// parsePEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - - if len(certificates) == 0 { - return nil, errors.New("No certificates were found while parsing the bundle.") - } - - return certificates, nil -} - -func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(key) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - default: - return nil, errors.New("Unknown PEM header value") - } -} - -func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { - - switch keyType { - case EC256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case EC384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - - return nil, fmt.Errorf("Invalid KeyType: %s", keyType) -} - -func generateCsr(privateKey crypto.PrivateKey, domain string, san []string, mustStaple bool) ([]byte, error) { - template := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: domain, - }, - } - - if len(san) > 0 { - template.DNSNames = san - } - - if mustStaple { - template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ - Id: tlsFeatureExtensionOID, - Value: ocspMustStapleFeature, - }) - } - - return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) -} - -func pemEncode(data interface{}) []byte { - var pemBlock *pem.Block - switch key := data.(type) { - case *ecdsa.PrivateKey: - keyBytes, _ := x509.MarshalECPrivateKey(key) - pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - case *rsa.PrivateKey: - pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} - break - case *x509.CertificateRequest: - pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw} - break - case derCertificateBytes: - pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} - } - - return pem.EncodeToMemory(pemBlock) -} - -func pemDecode(data []byte) (*pem.Block, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") - } - - return pemBlock, nil -} - -func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - return x509.ParseCertificate(pemBlock.Bytes) -} - -func pemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - if pemBlock.Type != "CERTIFICATE REQUEST" { - return nil, fmt.Errorf("PEM block is not a certificate request") - } - - return x509.ParseCertificateRequest(pemBlock.Bytes) -} - -// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. -// The certificate has to be PEM encoded. Any other encodings like DER will fail. -func GetPEMCertExpiration(cert []byte) (time.Time, error) { - pemBlock, err := pemDecode(cert) - if pemBlock == nil { - return time.Time{}, err - } - - return getCertExpiration(pemBlock.Bytes) -} - -// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. -func getCertExpiration(cert []byte) (time.Time, error) { - pCert, err := x509.ParseCertificate(cert) - if err != nil { - return time.Time{}, err - } - - return pCert.NotAfter, nil -} - -func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { - derBytes, err := generateDerCert(privKey, time.Time{}, domain) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: "ACME Challenge TEMP", - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) -} - -func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { - return http.MaxBytesReader(nil, rd, numBytes) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go deleted file mode 100644 index 6f43835fb..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package acme - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "testing" - "time" -) - -func TestGeneratePrivateKey(t *testing.T) { - key, err := generatePrivateKey(RSA2048) - if err != nil { - t.Error("Error generating private key:", err) - } - if key == nil { - t.Error("Expected key to not be nil, but it was") - } -} - -func TestGenerateCSR(t *testing.T) { - key, err := rsa.GenerateKey(rand.Reader, 512) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - csr, err := generateCsr(key, "fizz.buzz", nil, true) - if err != nil { - t.Error("Error generating CSR:", err) - } - if csr == nil || len(csr) == 0 { - t.Error("Expected CSR with data, but it was nil or length 0") - } -} - -func TestPEMEncode(t *testing.T) { - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - reader := MockRandReader{b: buf} - key, err := rsa.GenerateKey(reader, 32) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - data := pemEncode(key) - - if data == nil { - t.Fatal("Expected result to not be nil, but it was") - } - if len(data) != 127 { - t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data)) - } -} - -func TestPEMCertExpiration(t *testing.T) { - privKey, err := generatePrivateKey(RSA2048) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - expiration := time.Now().Add(365) - expiration = expiration.Round(time.Second) - certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com") - if err != nil { - t.Fatal("Error generating cert:", err) - } - - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - // Some random string should return an error. - if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil { - t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime) - } - - // A DER encoded certificate should return an error. - if _, err := GetPEMCertExpiration(certBytes); err == nil { - t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.") - } - - // A PEM encoded certificate should work ok. - pemCert := pemEncode(derCertificateBytes(certBytes)) - if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) { - t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err) - } -} - -type MockRandReader struct { - b *bytes.Buffer -} - -func (r MockRandReader) Read(p []byte) (int, error) { - return r.b.Read(p) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge.go deleted file mode 100644 index 30f2170ff..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge.go +++ /dev/null @@ -1,305 +0,0 @@ -package acme - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "log" - "net" - "strings" - "time" - - "github.com/miekg/dns" - "golang.org/x/net/publicsuffix" -) - -type preCheckDNSFunc func(fqdn, value string) (bool, error) - -var ( - // PreCheckDNS checks DNS propagation before notifying ACME that - // the DNS challenge is ready. - PreCheckDNS preCheckDNSFunc = checkDNSPropagation - fqdnToZone = map[string]string{} -) - -const defaultResolvConf = "/etc/resolv.conf" - -var defaultNameservers = []string{ - "google-public-dns-a.google.com:53", - "google-public-dns-b.google.com:53", -} - -var RecursiveNameservers = getNameservers(defaultResolvConf, defaultNameservers) - -// DNSTimeout is used to override the default DNS timeout of 10 seconds. -var DNSTimeout = 10 * time.Second - -// getNameservers attempts to get systems nameservers before falling back to the defaults -func getNameservers(path string, defaults []string) []string { - config, err := dns.ClientConfigFromFile(path) - if err != nil || len(config.Servers) == 0 { - return defaults - } - - systemNameservers := []string{} - for _, server := range config.Servers { - // ensure all servers have a port number - if _, _, err := net.SplitHostPort(server); err != nil { - systemNameservers = append(systemNameservers, net.JoinHostPort(server, "53")) - } else { - systemNameservers = append(systemNameservers, server) - } - } - return systemNameservers -} - -// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge -func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { - keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) - // base64URL encoding without padding - keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) - value = strings.TrimRight(keyAuthSha, "=") - ttl = 120 - fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) - return -} - -// dnsChallenge implements the dns-01 challenge according to ACME 7.5 -type dnsChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (s *dnsChallenge) Solve(chlng challenge, domain string) error { - logf("[INFO][%s] acme: Trying to solve DNS-01", domain) - - if s.provider == nil { - return errors.New("No DNS Provider configured") - } - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("Error presenting token: %s", err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("Error cleaning up %s: %v ", domain, err) - } - }() - - fqdn, value, _ := DNS01Record(domain, keyAuth) - - logf("[INFO][%s] Checking DNS record propagation using %+v", domain, RecursiveNameservers) - - var timeout, interval time.Duration - switch provider := s.provider.(type) { - case ChallengeProviderTimeout: - timeout, interval = provider.Timeout() - default: - timeout, interval = 60*time.Second, 2*time.Second - } - - err = WaitFor(timeout, interval, func() (bool, error) { - return PreCheckDNS(fqdn, value) - }) - if err != nil { - return err - } - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. -func checkDNSPropagation(fqdn, value string) (bool, error) { - // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true) - if err != nil { - return false, err - } - if r.Rcode == dns.RcodeSuccess { - // If we see a CNAME here then use the alias - for _, rr := range r.Answer { - if cn, ok := rr.(*dns.CNAME); ok { - if cn.Hdr.Name == fqdn { - fqdn = cn.Target - break - } - } - } - } - - authoritativeNss, err := lookupNameservers(fqdn) - if err != nil { - return false, err - } - - return checkAuthoritativeNss(fqdn, value, authoritativeNss) -} - -// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. -func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { - for _, ns := range nameservers { - r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) - if err != nil { - return false, err - } - - if r.Rcode != dns.RcodeSuccess { - return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) - } - - var found bool - for _, rr := range r.Answer { - if txt, ok := rr.(*dns.TXT); ok { - if strings.Join(txt.Txt, "") == value { - found = true - break - } - } - } - - if !found { - return false, fmt.Errorf("NS %s did not return the expected TXT record", ns) - } - } - - return true, nil -} - -// dnsQuery will query a nameserver, iterating through the supplied servers as it retries -// The nameserver should include a port, to facilitate testing where we talk to a mock dns server. -func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) { - m := new(dns.Msg) - m.SetQuestion(fqdn, rtype) - m.SetEdns0(4096, false) - - if !recursive { - m.RecursionDesired = false - } - - // Will retry the request based on the number of servers (n+1) - for i := 1; i <= len(nameservers)+1; i++ { - ns := nameservers[i%len(nameservers)] - udp := &dns.Client{Net: "udp", Timeout: DNSTimeout} - in, _, err = udp.Exchange(m, ns) - - if err == dns.ErrTruncated { - tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} - // If the TCP request suceeds, the err will reset to nil - in, _, err = tcp.Exchange(m, ns) - } - - if err == nil { - break - } - } - return -} - -// lookupNameservers returns the authoritative nameservers for the given fqdn. -func lookupNameservers(fqdn string) ([]string, error) { - var authoritativeNss []string - - zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return nil, fmt.Errorf("Could not determine the zone: %v", err) - } - - r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true) - if err != nil { - return nil, err - } - - for _, rr := range r.Answer { - if ns, ok := rr.(*dns.NS); ok { - authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) - } - } - - if len(authoritativeNss) > 0 { - return authoritativeNss, nil - } - return nil, fmt.Errorf("Could not determine authoritative nameservers") -} - -// FindZoneByFqdn determines the zone apex for the given fqdn by recursing up the -// domain labels until the nameserver returns a SOA record in the answer section. -func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { - // Do we have it cached? - if zone, ok := fqdnToZone[fqdn]; ok { - return zone, nil - } - - labelIndexes := dns.Split(fqdn) - for _, index := range labelIndexes { - domain := fqdn[index:] - // Give up if we have reached the TLD - if isTLD(domain) { - break - } - - in, err := dnsQuery(domain, dns.TypeSOA, nameservers, true) - if err != nil { - return "", err - } - - // Any response code other than NOERROR and NXDOMAIN is treated as error - if in.Rcode != dns.RcodeNameError && in.Rcode != dns.RcodeSuccess { - return "", fmt.Errorf("Unexpected response code '%s' for %s", - dns.RcodeToString[in.Rcode], domain) - } - - // Check if we got a SOA RR in the answer section - if in.Rcode == dns.RcodeSuccess { - for _, ans := range in.Answer { - if soa, ok := ans.(*dns.SOA); ok { - zone := soa.Hdr.Name - fqdnToZone[fqdn] = zone - return zone, nil - } - } - } - } - - return "", fmt.Errorf("Could not find the start of authority") -} - -func isTLD(domain string) bool { - publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(domain)) - if publicsuffix == UnFqdn(domain) { - return true - } - return false -} - -// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. -func ClearFqdnCache() { - fqdnToZone = map[string]string{} -} - -// ToFqdn converts the name into a fqdn appending a trailing dot. -func ToFqdn(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } - return name + "." -} - -// UnFqdn converts the fqdn into a name removing the trailing dot. -func UnFqdn(name string) string { - n := len(name) - if n != 0 && name[n-1] == '.' { - return name[:n-1] - } - return name -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go deleted file mode 100644 index 240384e60..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go +++ /dev/null @@ -1,53 +0,0 @@ -package acme - -import ( - "bufio" - "fmt" - "os" -) - -const ( - dnsTemplate = "%s %d IN TXT \"%s\"" -) - -// DNSProviderManual is an implementation of the ChallengeProvider interface -type DNSProviderManual struct{} - -// NewDNSProviderManual returns a DNSProviderManual instance. -func NewDNSProviderManual() (*DNSProviderManual, error) { - return &DNSProviderManual{}, nil -} - -// Present prints instructions for manually creating the TXT record -func (*DNSProviderManual) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - logf("[INFO] acme: Press 'Enter' when you are done") - - reader := bufio.NewReader(os.Stdin) - _, _ = reader.ReadString('\n') - return nil -} - -// CleanUp prints instructions for manually removing the TXT record -func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { - fqdn, _, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - return nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go deleted file mode 100644 index 597aaac17..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package acme - -import ( - "bufio" - "crypto/rand" - "crypto/rsa" - "net/http" - "net/http/httptest" - "os" - "reflect" - "sort" - "strings" - "testing" - "time" -) - -var lookupNameserversTestsOK = []struct { - fqdn string - nss []string -}{ - {"books.google.com.ng.", - []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."}, - }, - {"www.google.com.", - []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."}, - }, - {"physics.georgetown.edu.", - []string{"ns1.georgetown.edu.", "ns2.georgetown.edu.", "ns3.georgetown.edu."}, - }, -} - -var lookupNameserversTestsErr = []struct { - fqdn string - error string -}{ - // invalid tld - {"_null.n0n0.", - "Could not determine the zone", - }, - // invalid domain - {"_null.com.", - "Could not determine the zone", - }, - // invalid domain - {"in-valid.co.uk.", - "Could not determine the zone", - }, -} - -var findZoneByFqdnTests = []struct { - fqdn string - zone string -}{ - {"mail.google.com.", "google.com."}, // domain is a CNAME - {"foo.google.com.", "google.com."}, // domain is a non-existent subdomain -} - -var checkAuthoritativeNssTests = []struct { - fqdn, value string - ns []string - ok bool -}{ - // TXT RR w/ expected value - {"8.8.8.8.asn.routeviews.org.", "151698.8.8.024", []string{"asnums.routeviews.org."}, - true, - }, - // No TXT RR - {"ns1.google.com.", "", []string{"ns2.google.com."}, - false, - }, -} - -var checkAuthoritativeNssTestsErr = []struct { - fqdn, value string - ns []string - error string -}{ - // TXT RR /w unexpected value - {"8.8.8.8.asn.routeviews.org.", "fe01=", []string{"asnums.routeviews.org."}, - "did not return the expected TXT record", - }, - // No TXT RR - {"ns1.google.com.", "fe01=", []string{"ns2.google.com."}, - "did not return the expected TXT record", - }, -} - -var checkResolvConfServersTests = []struct { - fixture string - expected []string - defaults []string -}{ - {"testdata/resolv.conf.1", []string{"10.200.3.249:53", "10.200.3.250:5353", "[2001:4860:4860::8844]:53", "[10.0.0.1]:5353"}, []string{"127.0.0.1:53"}}, - {"testdata/resolv.conf.nonexistant", []string{"127.0.0.1:53"}, []string{"127.0.0.1:53"}}, -} - -func TestDNSValidServerResponse(t *testing.T) { - PreCheckDNS = func(fqdn, value string) (bool, error) { - return true, nil - } - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Replay-Nonce", "12345") - w.Write([]byte("{\"type\":\"dns01\",\"status\":\"valid\",\"uri\":\"http://some.url\",\"token\":\"http8\"}")) - })) - - manualProvider, _ := NewDNSProviderManual() - jws := &jws{privKey: privKey, directoryURL: ts.URL} - solver := &dnsChallenge{jws: jws, validate: validate, provider: manualProvider} - clientChallenge := challenge{Type: "dns01", Status: "pending", URI: ts.URL, Token: "http8"} - - go func() { - time.Sleep(time.Second * 2) - f := bufio.NewWriter(os.Stdout) - defer f.Flush() - f.WriteString("\n") - }() - - if err := solver.Solve(clientChallenge, "example.com"); err != nil { - t.Errorf("VALID: Expected Solve to return no error but the error was -> %v", err) - } -} - -func TestPreCheckDNS(t *testing.T) { - ok, err := PreCheckDNS("acme-staging.api.letsencrypt.org", "fe01=") - if err != nil || !ok { - t.Errorf("preCheckDNS failed for acme-staging.api.letsencrypt.org") - } -} - -func TestLookupNameserversOK(t *testing.T) { - for _, tt := range lookupNameserversTestsOK { - nss, err := lookupNameservers(tt.fqdn) - if err != nil { - t.Fatalf("#%s: got %q; want nil", tt.fqdn, err) - } - - sort.Strings(nss) - sort.Strings(tt.nss) - - if !reflect.DeepEqual(nss, tt.nss) { - t.Errorf("#%s: got %v; want %v", tt.fqdn, nss, tt.nss) - } - } -} - -func TestLookupNameserversErr(t *testing.T) { - for _, tt := range lookupNameserversTestsErr { - _, err := lookupNameservers(tt.fqdn) - if err == nil { - t.Fatalf("#%s: expected %q (error); got ", tt.fqdn, tt.error) - } - - if !strings.Contains(err.Error(), tt.error) { - t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err) - continue - } - } -} - -func TestFindZoneByFqdn(t *testing.T) { - for _, tt := range findZoneByFqdnTests { - res, err := FindZoneByFqdn(tt.fqdn, RecursiveNameservers) - if err != nil { - t.Errorf("FindZoneByFqdn failed for %s: %v", tt.fqdn, err) - } - if res != tt.zone { - t.Errorf("%s: got %s; want %s", tt.fqdn, res, tt.zone) - } - } -} - -func TestCheckAuthoritativeNss(t *testing.T) { - for _, tt := range checkAuthoritativeNssTests { - ok, _ := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns) - if ok != tt.ok { - t.Errorf("%s: got %t; want %t", tt.fqdn, ok, tt.ok) - } - } -} - -func TestCheckAuthoritativeNssErr(t *testing.T) { - for _, tt := range checkAuthoritativeNssTestsErr { - _, err := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns) - if err == nil { - t.Fatalf("#%s: expected %q (error); got ", tt.fqdn, tt.error) - } - if !strings.Contains(err.Error(), tt.error) { - t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err) - continue - } - } -} - -func TestResolveConfServers(t *testing.T) { - for _, tt := range checkResolvConfServersTests { - result := getNameservers(tt.fixture, tt.defaults) - - sort.Strings(result) - sort.Strings(tt.expected) - if !reflect.DeepEqual(result, tt.expected) { - t.Errorf("#%s: expected %q; got %q", tt.fixture, tt.expected, result) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go deleted file mode 100644 index e4bc934c2..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go +++ /dev/null @@ -1,94 +0,0 @@ -package acme - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -const ( - tosAgreementError = "Must agree to subscriber agreement before any further actions" - invalidNonceError = "JWS has invalid anti-replay nonce" -) - -// RemoteError is the base type for all errors specific to the ACME protocol. -type RemoteError struct { - StatusCode int `json:"status,omitempty"` - Type string `json:"type"` - Detail string `json:"detail"` -} - -func (e RemoteError) Error() string { - return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) -} - -// TOSError represents the error which is returned if the user needs to -// accept the TOS. -// TODO: include the new TOS url if we can somehow obtain it. -type TOSError struct { - RemoteError -} - -// NonceError represents the error which is returned if the -// nonce sent by the client was not accepted by the server. -type NonceError struct { - RemoteError -} - -type domainError struct { - Domain string - Error error -} - -type challengeError struct { - RemoteError - records []validationRecord -} - -func (c challengeError) Error() string { - - var errStr string - for _, validation := range c.records { - errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", - validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) - } - - return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) -} - -func handleHTTPError(resp *http.Response) error { - var errorDetail RemoteError - - contentType := resp.Header.Get("Content-Type") - if contentType == "application/json" || contentType == "application/problem+json" { - err := json.NewDecoder(resp.Body).Decode(&errorDetail) - if err != nil { - return err - } - } else { - detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return err - } - errorDetail.Detail = string(detailBytes) - } - - errorDetail.StatusCode = resp.StatusCode - - // Check for errors we handle specifically - if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { - return TOSError{errorDetail} - } - - if errorDetail.StatusCode == http.StatusBadRequest && strings.HasPrefix(errorDetail.Detail, invalidNonceError) { - return NonceError{errorDetail} - } - - return errorDetail -} - -func handleChallengeError(chlng challenge) error { - return challengeError{chlng.Error, chlng.ValidationRecords} -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go deleted file mode 100644 index a858b5a75..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go +++ /dev/null @@ -1,148 +0,0 @@ -package acme - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "runtime" - "strings" - "time" -) - -// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. -var UserAgent string - -// HTTPClient is an HTTP client with a reasonable timeout value. -var HTTPClient = http.Client{Timeout: 10 * time.Second} - -const ( - // defaultGoUserAgent is the Go HTTP package user agent string. Too - // bad it isn't exported. If it changes, we should update it here, too. - defaultGoUserAgent = "Go-http-client/1.1" - - // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme" -) - -// httpHead performs a HEAD request with a proper User-Agent string. -// The response body (resp.Body) is already closed when this function returns. -func httpHead(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, fmt.Errorf("failed to head %q: %v", url, err) - } - - req.Header.Set("User-Agent", userAgent()) - - resp, err = HTTPClient.Do(req) - if err != nil { - return resp, fmt.Errorf("failed to do head %q: %v", url, err) - } - resp.Body.Close() - return resp, err -} - -// httpPost performs a POST request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, fmt.Errorf("failed to post %q: %v", url, err) - } - req.Header.Set("Content-Type", bodyType) - req.Header.Set("User-Agent", userAgent()) - - return HTTPClient.Do(req) -} - -// httpGet performs a GET request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpGet(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, fmt.Errorf("failed to get %q: %v", url, err) - } - req.Header.Set("User-Agent", userAgent()) - - return HTTPClient.Do(req) -} - -// getJSON performs an HTTP GET request and parses the response body -// as JSON, into the provided respBody object. -func getJSON(uri string, respBody interface{}) (http.Header, error) { - resp, err := httpGet(uri) - if err != nil { - return nil, fmt.Errorf("failed to get json %q: %v", uri, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// postJSON performs an HTTP POST request and parses the response body -// as JSON, into the provided respBody object. -func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { - jsonBytes, err := json.Marshal(reqBody) - if err != nil { - return nil, errors.New("Failed to marshal network message...") - } - - resp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - - err := handleHTTPError(resp) - - switch err.(type) { - - case NonceError: - - // Retry once if the nonce was invalidated - - retryResp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - - defer retryResp.Body.Close() - - if retryResp.StatusCode >= http.StatusBadRequest { - return retryResp.Header, handleHTTPError(retryResp) - } - - if respBody == nil { - return retryResp.Header, nil - } - - return retryResp.Header, json.NewDecoder(retryResp.Body).Decode(respBody) - - default: - return resp.Header, err - - } - - } - - if respBody == nil { - return resp.Header, nil - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// userAgent builds and returns the User-Agent string to use in requests. -func userAgent() string { - ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) - return strings.TrimSpace(ua) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go deleted file mode 100644 index 95cb1fd81..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go +++ /dev/null @@ -1,41 +0,0 @@ -package acme - -import ( - "fmt" - "log" -) - -type httpChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -// HTTP01ChallengePath returns the URL path for the `http-01` challenge -func HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -func (s *httpChallenge) Solve(chlng challenge, domain string) error { - - logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go deleted file mode 100644 index 64c6a8280..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go +++ /dev/null @@ -1,79 +0,0 @@ -package acme - -import ( - "fmt" - "net" - "net/http" - "strings" -) - -// HTTPProviderServer implements ChallengeProvider for `http-01` challenge -// It may be instantiated without using the NewHTTPProviderServer function if -// you want only to use the default values. -type HTTPProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 80 respectively. -func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { - return &HTTPProviderServer{iface: iface, port: port} -} - -// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. -func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "80" - } - - var err error - s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) - if err != nil { - return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) - } - - s.done = make(chan bool) - go s.serve(domain, token, keyAuth) - return nil -} - -// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` -func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} - -func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { - path := HTTP01ChallengePath(token) - - // The handler validates the HOST header and request type. - // For validation it then writes the token the server returned with the challenge - mux := http.NewServeMux() - mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(keyAuth)) - logf("[INFO][%s] Served key authentication", domain) - } else { - logf("[WARN] Received request for domain %s with method %s but the domain did not match any challenge. Please ensure your are passing the HOST header properly.", r.Host, r.Method) - w.Write([]byte("TEST")) - } - }) - - httpServer := &http.Server{ - Handler: mux, - } - // Once httpServer is shut down we don't want any lingering - // connections, so disable KeepAlives. - httpServer.SetKeepAlivesEnabled(false) - httpServer.Serve(s.listener) - s.done <- true -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go deleted file mode 100644 index 7400f56d4..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "io/ioutil" - "strings" - "testing" -) - -func TestHTTPChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token - resp, err := httpGet(uri) - if err != nil { - return err - } - defer resp.Body.Close() - - if want := "text/plain"; resp.Header.Get("Content-Type") != want { - t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - bodyStr := string(body) - - if bodyStr != chlng.KeyAuthorization { - t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization) - } - - return nil - } - solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestHTTPChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http2"} - solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want, want18 := "invalid port 123456", "123456: invalid port"; !strings.HasSuffix(err.Error(), want) && !strings.HasSuffix(err.Error(), want18) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go deleted file mode 100644 index 33a48a331..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package acme - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestHTTPHeadUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - _, err := httpHead(ts.URL) - if err != nil { - t.Fatal(err) - } - - if method != "HEAD" { - t.Errorf("Expected method to be HEAD, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPGetUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpGet(ts.URL) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "GET" { - t.Errorf("Expected method to be GET, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPPostUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala")) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "POST" { - t.Errorf("Expected method to be POST, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestUserAgent(t *testing.T) { - ua := userAgent() - - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if strings.HasSuffix(ua, " ") { - t.Errorf("UA should not have trailing spaces; got '%s'", ua) - } - - // customize the UA by appending a value - UserAgent = "MyApp/1.2.3" - ua = userAgent() - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if !strings.Contains(ua, UserAgent) { - t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go deleted file mode 100644 index a39434342..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go +++ /dev/null @@ -1,131 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "fmt" - "net/http" - "sync" - - "gopkg.in/square/go-jose.v1" -) - -type jws struct { - directoryURL string - privKey crypto.PrivateKey - nonces nonceManager -} - -func keyAsJWK(key interface{}) *jose.JsonWebKey { - switch k := key.(type) { - case *ecdsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "EC"} - case *rsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} - - default: - return nil - } -} - -// Posts a JWS signed message to the specified URL. -// It does NOT close the response body, so the caller must -// do that if no error was returned. -func (j *jws) post(url string, content []byte) (*http.Response, error) { - signedContent, err := j.signContent(content) - if err != nil { - return nil, fmt.Errorf("Failed to sign content -> %s", err.Error()) - } - - resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) - if err != nil { - return nil, fmt.Errorf("Failed to HTTP POST to %s -> %s", url, err.Error()) - } - - nonce, nonceErr := getNonceFromResponse(resp) - if nonceErr == nil { - j.nonces.Push(nonce) - } - - return resp, nil -} - -func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { - - var alg jose.SignatureAlgorithm - switch k := j.privKey.(type) { - case *rsa.PrivateKey: - alg = jose.RS256 - case *ecdsa.PrivateKey: - if k.Curve == elliptic.P256() { - alg = jose.ES256 - } else if k.Curve == elliptic.P384() { - alg = jose.ES384 - } - } - - signer, err := jose.NewSigner(alg, j.privKey) - if err != nil { - return nil, fmt.Errorf("Failed to create jose signer -> %s", err.Error()) - } - signer.SetNonceSource(j) - - signed, err := signer.Sign(content) - if err != nil { - return nil, fmt.Errorf("Failed to sign content -> %s", err.Error()) - } - return signed, nil -} - -func (j *jws) Nonce() (string, error) { - if nonce, ok := j.nonces.Pop(); ok { - return nonce, nil - } - - return getNonce(j.directoryURL) -} - -type nonceManager struct { - nonces []string - sync.Mutex -} - -func (n *nonceManager) Pop() (string, bool) { - n.Lock() - defer n.Unlock() - - if len(n.nonces) == 0 { - return "", false - } - - nonce := n.nonces[len(n.nonces)-1] - n.nonces = n.nonces[:len(n.nonces)-1] - return nonce, true -} - -func (n *nonceManager) Push(nonce string) { - n.Lock() - defer n.Unlock() - n.nonces = append(n.nonces, nonce) -} - -func getNonce(url string) (string, error) { - resp, err := httpHead(url) - if err != nil { - return "", fmt.Errorf("Failed to get nonce from HTTP HEAD -> %s", err.Error()) - } - - return getNonceFromResponse(resp) -} - -func getNonceFromResponse(resp *http.Response) (string, error) { - nonce := resp.Header.Get("Replay-Nonce") - if nonce == "" { - return "", fmt.Errorf("Server did not respond with a proper nonce header.") - } - - return nonce, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go deleted file mode 100644 index 79ccf154e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go +++ /dev/null @@ -1,115 +0,0 @@ -package acme - -import ( - "time" - - "gopkg.in/square/go-jose.v1" -) - -type directory struct { - NewAuthzURL string `json:"new-authz"` - NewCertURL string `json:"new-cert"` - NewRegURL string `json:"new-reg"` - RevokeCertURL string `json:"revoke-cert"` -} - -type registrationMessage struct { - Resource string `json:"resource"` - Contact []string `json:"contact"` - Delete bool `json:"delete,omitempty"` -} - -// Registration is returned by the ACME server after the registration -// The client implementation should save this registration somewhere. -type Registration struct { - Resource string `json:"resource,omitempty"` - ID int `json:"id"` - Key jose.JsonWebKey `json:"key"` - Contact []string `json:"contact"` - Agreement string `json:"agreement,omitempty"` - Authorizations string `json:"authorizations,omitempty"` - Certificates string `json:"certificates,omitempty"` -} - -// RegistrationResource represents all important informations about a registration -// of which the client needs to keep track itself. -type RegistrationResource struct { - Body Registration `json:"body,omitempty"` - URI string `json:"uri,omitempty"` - NewAuthzURL string `json:"new_authzr_uri,omitempty"` - TosURL string `json:"terms_of_service,omitempty"` -} - -type authorizationResource struct { - Body authorization - Domain string - NewCertURL string - AuthURL string -} - -type authorization struct { - Resource string `json:"resource,omitempty"` - Identifier identifier `json:"identifier"` - Status string `json:"status,omitempty"` - Expires time.Time `json:"expires,omitempty"` - Challenges []challenge `json:"challenges,omitempty"` - Combinations [][]int `json:"combinations,omitempty"` -} - -type identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -type validationRecord struct { - URI string `json:"url,omitempty"` - Hostname string `json:"hostname,omitempty"` - Port string `json:"port,omitempty"` - ResolvedAddresses []string `json:"addressesResolved,omitempty"` - UsedAddress string `json:"addressUsed,omitempty"` -} - -type challenge struct { - Resource string `json:"resource,omitempty"` - Type Challenge `json:"type,omitempty"` - Status string `json:"status,omitempty"` - URI string `json:"uri,omitempty"` - Token string `json:"token,omitempty"` - KeyAuthorization string `json:"keyAuthorization,omitempty"` - TLS bool `json:"tls,omitempty"` - Iterations int `json:"n,omitempty"` - Error RemoteError `json:"error,omitempty"` - ValidationRecords []validationRecord `json:"validationRecord,omitempty"` -} - -type csrMessage struct { - Resource string `json:"resource,omitempty"` - Csr string `json:"csr"` - Authorizations []string `json:"authorizations"` -} - -type revokeCertMessage struct { - Resource string `json:"resource"` - Certificate string `json:"certificate"` -} - -type deactivateAuthMessage struct { - Resource string `json:"resource,omitempty"` - Status string `jsom:"status"` -} - -// CertificateResource represents a CA issued certificate. -// PrivateKey, Certificate and IssuerCertificate are all -// already PEM encoded and can be directly written to disk. -// Certificate may be a certificate bundle, depending on the -// options supplied to create it. -type CertificateResource struct { - Domain string `json:"domain"` - CertURL string `json:"certUrl"` - CertStableURL string `json:"certStableUrl"` - AccountRef string `json:"accountRef,omitempty"` - PrivateKey []byte `json:"-"` - Certificate []byte `json:"-"` - IssuerCertificate []byte `json:"-"` - CSR []byte `json:"-"` -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/pop_challenge.go deleted file mode 100644 index 8d2a213b0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/pop_challenge.go +++ /dev/null @@ -1 +0,0 @@ -package acme diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go deleted file mode 100644 index d177ff07a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go +++ /dev/null @@ -1,28 +0,0 @@ -package acme - -import "time" - -// ChallengeProvider enables implementing a custom challenge -// provider. Present presents the solution to a challenge available to -// be solved. CleanUp will be called by the challenge if Present ends -// in a non-error state. -type ChallengeProvider interface { - Present(domain, token, keyAuth string) error - CleanUp(domain, token, keyAuth string) error -} - -// ChallengeProviderTimeout allows for implementing a -// ChallengeProvider where an unusually long timeout is required when -// waiting for an ACME challenge to be satisfied, such as when -// checking for DNS record progagation. If an implementor of a -// ChallengeProvider provides a Timeout method, then the return values -// of the Timeout method will be used when appropriate by the acme -// package. The interval value is the time between checks. -// -// The default values used for timeout and interval are 60 seconds and -// 2 seconds respectively. These are used when no Timeout method is -// defined for the ChallengeProvider. -type ChallengeProviderTimeout interface { - ChallengeProvider - Timeout() (timeout, interval time.Duration) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 deleted file mode 100644 index 3098f99b5..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 +++ /dev/null @@ -1,5 +0,0 @@ -domain company.com -nameserver 10.200.3.249 -nameserver 10.200.3.250:5353 -nameserver 2001:4860:4860::8844 -nameserver [10.0.0.1]:5353 diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go deleted file mode 100644 index 34383cbfa..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go +++ /dev/null @@ -1,67 +0,0 @@ -package acme - -import ( - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "log" -) - -type tlsSNIChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { - // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 - // Currently we implement this challenge to track boulder, not the current spec! - - logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) - if err != nil { - return err - } - - err = t.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := t.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge -func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) { - // generate a new RSA key for the certificates - tempPrivKey, err := generatePrivateKey(RSA2048) - if err != nil { - return tls.Certificate{}, "", err - } - rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) - rsaPrivPEM := pemEncode(rsaPrivKey) - - zBytes := sha256.Sum256([]byte(keyAuth)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - tempCertPEM, err := generatePemCert(rsaPrivKey, domain) - if err != nil { - return tls.Certificate{}, "", err - } - - certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) - if err != nil { - return tls.Certificate{}, "", err - } - - return certificate, domain, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go deleted file mode 100644 index df00fbb5a..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go +++ /dev/null @@ -1,62 +0,0 @@ -package acme - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" -) - -// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge -// It may be instantiated without using the NewTLSProviderServer function if -// you want only to use the default values. -type TLSProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 443 respectively. -func NewTLSProviderServer(iface, port string) *TLSProviderServer { - return &TLSProviderServer{iface: iface, port: port} -} - -// Present makes the keyAuth available as a cert -func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "443" - } - - cert, _, err := TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - tlsConf := new(tls.Config) - tlsConf.Certificates = []tls.Certificate{cert} - - s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) - if err != nil { - return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) - } - - s.done = make(chan bool) - go func() { - http.Serve(s.listener, nil) - s.done <- true - }() - return nil -} - -// CleanUp closes the HTTP server. -func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go deleted file mode 100644 index 83b2833a9..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "strings" - "testing" -) - -func TestTLSSNIChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{ - InsecureSkipVerify: true, - }) - if err != nil { - t.Errorf("Expected to connect to challenge server without an error. %s", err.Error()) - } - - // Expect the server to only return one certificate - connState := conn.ConnectionState() - if count := len(connState.PeerCertificates); count != 1 { - t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count) - } - - remoteCert := connState.PeerCertificates[0] - if count := len(remoteCert.DNSNames); count != 1 { - t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count) - } - - zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - - if remoteCert.DNSNames[0] != domain { - t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0]) - } - - return nil - } - solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestTLSSNIChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"} - solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want, want18 := "invalid port 123456", "123456: invalid port"; !strings.HasSuffix(err.Error(), want) && !strings.HasSuffix(err.Error(), want18) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go deleted file mode 100644 index 2fa0db304..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package acme - -import ( - "fmt" - "time" -) - -// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. -func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { - var lastErr string - timeup := time.After(timeout) - for { - select { - case <-timeup: - return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(interval) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go deleted file mode 100644 index 158af4116..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package acme - -import ( - "testing" - "time" -) - -func TestWaitForTimeout(t *testing.T) { - c := make(chan error) - go func() { - err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) { - return false, nil - }) - c <- err - }() - - timeout := time.After(4 * time.Second) - select { - case <-timeout: - t.Fatal("timeout exceeded") - case err := <-c: - if err == nil { - t.Errorf("expected timeout error; got %v", err) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitattributes b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitattributes deleted file mode 100644 index d2f212e5d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitignore b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitignore deleted file mode 100644 index 8339fd61d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/AUTHORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTING.md deleted file mode 100644 index 88dff59bc..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/PATENTS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/README b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/README deleted file mode 100644 index f1e0cbf94..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go cryptography libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/codereview.cfg b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp.go deleted file mode 100644 index 6bd347e28..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ /dev/null @@ -1,778 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses -// are signed messages attesting to the validity of a certificate for a small -// period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "strconv" - "time" -) - -var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) - -// ResponseStatus contains the result of an OCSP request. See -// https://tools.ietf.org/html/rfc6960#section-2.3 -type ResponseStatus int - -const ( - Success ResponseStatus = 0 - Malformed ResponseStatus = 1 - InternalError ResponseStatus = 2 - TryLater ResponseStatus = 3 - // Status code four is unused in OCSP. See - // https://tools.ietf.org/html/rfc6960#section-4.2.1 - SignatureRequired ResponseStatus = 5 - Unauthorized ResponseStatus = 6 -) - -func (r ResponseStatus) String() string { - switch r { - case Success: - return "success" - case Malformed: - return "malformed" - case InternalError: - return "internal error" - case TryLater: - return "try later" - case SignatureRequired: - return "signature required" - case Unauthorized: - return "unauthorized" - default: - return "unknown OCSP status: " + strconv.Itoa(int(r)) - } -} - -// ResponseError is an error that may be returned by ParseResponse to indicate -// that the response itself is an error, not just that its indicating that a -// certificate is revoked, unknown, etc. -type ResponseError struct { - Status ResponseStatus -} - -func (r ResponseError) Error() string { - return "ocsp: error from server: " + r.Status.String() -} - -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. - -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// https://tools.ietf.org/html/rfc2560#section-4.1.1 -type ocspRequest struct { - TBSRequest tbsRequest -} - -type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0,optional"` - RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` - RequestList []request -} - -type request struct { - Cert certID -} - -type responseASN1 struct { - Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0,optional"` -} - -type responseBytes struct { - ResponseType asn1.ObjectIdentifier - Response []byte -} - -type basicResponse struct { - TBSResponseData responseData - SignatureAlgorithm pkix.AlgorithmIdentifier - Signature asn1.BitString - Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` -} - -type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:0,explicit,tag:0"` - RawResponderID asn1.RawValue - ProducedAt time.Time `asn1:"generalized"` - Responses []singleResponse -} - -type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` - SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` -} - -type revokedInfo struct { - RevocationTime time.Time `asn1:"generalized"` - Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` -} - -var ( - oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} - oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} - oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} - oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} -) - -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -var signatureAlgorithmDetails = []struct { - algo x509.SignatureAlgorithm - oid asn1.ObjectIdentifier - pubKeyAlgo x509.PublicKeyAlgorithm - hash crypto.Hash -}{ - {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, - {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, - {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, - {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, - {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, - {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, - {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, - {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, - {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, - {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, - {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, - {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { - var pubType x509.PublicKeyAlgorithm - - switch pub := pub.(type) { - case *rsa.PublicKey: - pubType = x509.RSA - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureSHA256WithRSA - sigAlgo.Parameters = asn1.RawValue{ - Tag: 5, - } - - case *ecdsa.PublicKey: - pubType = x509.ECDSA - - switch pub.Curve { - case elliptic.P224(), elliptic.P256(): - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 - case elliptic.P384(): - hashFunc = crypto.SHA384 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 - case elliptic.P521(): - hashFunc = crypto.SHA512 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 - default: - err = errors.New("x509: unknown elliptic curve") - } - - default: - err = errors.New("x509: only RSA and ECDSA keys supported") - } - - if err != nil { - return - } - - if requestedSigAlgo == 0 { - return - } - - found := false - for _, details := range signatureAlgorithmDetails { - if details.algo == requestedSigAlgo { - if details.pubKeyAlgo != pubType { - err = errors.New("x509: requested SignatureAlgorithm does not match private key type") - return - } - sigAlgo.Algorithm, hashFunc = details.oid, details.hash - if hashFunc == 0 { - err = errors.New("x509: cannot sign with hash function requested") - return - } - found = true - break - } - } - - if !found { - err = errors.New("x509: unknown SignatureAlgorithm") - } - - return -} - -// TODO(agl): this is taken from crypto/x509 and so should probably be exported -// from crypto/x509 or crypto/x509/pkix. -func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { - for _, details := range signatureAlgorithmDetails { - if oid.Equal(details.oid) { - return details.algo - } - } - return x509.UnknownSignatureAlgorithm -} - -// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. -func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target) { - return hash - } - } - return crypto.Hash(0) -} - -func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { - for hash, oid := range hashOIDs { - if hash == target { - return oid - } - } - return nil -} - -// This is the exposed reflection of the internal OCSP structures. - -// The status values that can be expressed in OCSP. See RFC 6960. -const ( - // Good means that the certificate is valid. - Good = iota - // Revoked means that the certificate has been deliberately revoked. - Revoked - // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown - // ServerFailed is unused and was never used (see - // https://go-review.googlesource.com/#/c/18944). ParseResponse will - // return a ResponseError when an error response is parsed. - ServerFailed -) - -// The enumerated reasons for revoking a certificate. See RFC 5280. -const ( - Unspecified = iota - KeyCompromise = iota - CACompromise = iota - AffiliationChanged = iota - Superseded = iota - CessationOfOperation = iota - CertificateHold = iota - _ = iota - RemoveFromCRL = iota - PrivilegeWithdrawn = iota - AACompromise = iota -) - -// Request represents an OCSP request. See RFC 6960. -type Request struct { - HashAlgorithm crypto.Hash - IssuerNameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// Marshal marshals the OCSP request to ASN.1 DER encoded form. -func (req *Request) Marshal() ([]byte, error) { - hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) - if hashAlg == nil { - return nil, errors.New("Unknown hash algorithm") - } - return asn1.Marshal(ocspRequest{ - tbsRequest{ - Version: 0, - RequestList: []request{ - { - Cert: certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashAlg, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - req.IssuerNameHash, - req.IssuerKeyHash, - req.SerialNumber, - }, - }, - }, - }, - }) -} - -// Response represents an OCSP response containing a single SingleResponse. See -// RFC 6960. -type Response struct { - // Status is one of {Good, Revoked, Unknown} - Status int - SerialNumber *big.Int - ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time - RevocationReason int - Certificate *x509.Certificate - // TBSResponseData contains the raw bytes of the signed response. If - // Certificate is nil then this can be used to verify Signature. - TBSResponseData []byte - Signature []byte - SignatureAlgorithm x509.SignatureAlgorithm - - // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. - // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. - // If zero, the default is crypto.SHA1. - IssuerHash crypto.Hash - - // RawResponderName optionally contains the DER-encoded subject of the - // responder certificate. Exactly one of RawResponderName and - // ResponderKeyHash is set. - RawResponderName []byte - // ResponderKeyHash optionally contains the SHA-1 hash of the - // responder's public key. Exactly one of RawResponderName and - // ResponderKeyHash is set. - ResponderKeyHash []byte - - // Extensions contains raw X.509 extensions from the singleExtensions field - // of the OCSP response. When parsing certificates, this can be used to - // extract non-critical extensions that are not parsed by this package. When - // marshaling OCSP responses, the Extensions field is ignored, see - // ExtraExtensions. - Extensions []pkix.Extension - - // ExtraExtensions contains extensions to be copied, raw, into any marshaled - // OCSP response (in the singleExtensions field). Values override any - // extensions that would otherwise be produced based on the other fields. The - // ExtraExtensions field is not populated when parsing certificates, see - // Extensions. - ExtraExtensions []pkix.Extension -} - -// These are pre-serialized error responses for the various non-success codes -// defined by OCSP. The Unauthorized code in particular can be used by an OCSP -// responder that supports only pre-signed responses as a response to requests -// for certificates with unknown status. See RFC 5019. -var ( - MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} -) - -// CheckSignatureFrom checks that the signature in resp is a valid signature -// from issuer. This should only be used if resp.Certificate is nil. Otherwise, -// the OCSP response contained an intermediate certificate that created the -// signature. That signature is checked by ParseResponse and only -// resp.Certificate remains to be validated. -func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { - return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) -} - -// ParseError results from an invalid OCSP response. -type ParseError string - -func (p ParseError) Error() string { - return string(p) -} - -// ParseRequest parses an OCSP request in DER form. It only supports -// requests for a single certificate. Signed requests are not supported. -// If a request includes a signature, it will result in a ParseError. -func ParseRequest(bytes []byte) (*Request, error) { - var req ocspRequest - rest, err := asn1.Unmarshal(bytes, &req) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP request") - } - - if len(req.TBSRequest.RequestList) == 0 { - return nil, ParseError("OCSP request contains no request body") - } - innerRequest := req.TBSRequest.RequestList[0] - - hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) - if hashFunc == crypto.Hash(0) { - return nil, ParseError("OCSP request uses unknown hash function") - } - - return &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: innerRequest.Cert.NameHash, - IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, - SerialNumber: innerRequest.Cert.SerialNumber, - }, nil -} - -// ParseResponse parses an OCSP response in DER form. It only supports -// responses for a single certificate. If the response contains a certificate -// then the signature over the response is checked. If issuer is not nil then -// it will be used to validate the signature or embedded certificate. -// -// Invalid responses and parse failures will result in a ParseError. -// Error responses will result in a ResponseError. -func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { - return ParseResponseForCert(bytes, nil, issuer) -} - -// ParseResponseForCert parses an OCSP response in DER form and searches for a -// Response relating to cert. If such a Response is found and the OCSP response -// contains a certificate then the signature over the response is checked. If -// issuer is not nil then it will be used to validate the signature or embedded -// certificate. -// -// Invalid responses and parse failures will result in a ParseError. -// Error responses will result in a ResponseError. -func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { - var resp responseASN1 - rest, err := asn1.Unmarshal(bytes, &resp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - if status := ResponseStatus(resp.Status); status != Success { - return nil, ResponseError{status} - } - - if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { - return nil, ParseError("bad OCSP response type") - } - - var basicResp basicResponse - rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) - if err != nil { - return nil, err - } - - if len(basicResp.Certificates) > 1 { - return nil, ParseError("OCSP response contains bad number of certificates") - } - - if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { - return nil, ParseError("OCSP response contains bad number of responses") - } - - var singleResp singleResponse - if cert == nil { - singleResp = basicResp.TBSResponseData.Responses[0] - } else { - match := false - for _, resp := range basicResp.TBSResponseData.Responses { - if cert == nil || cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { - singleResp = resp - match = true - break - } - } - if !match { - return nil, ParseError("no response matching the supplied certificate") - } - } - - ret := &Response{ - TBSResponseData: basicResp.TBSResponseData.Raw, - Signature: basicResp.Signature.RightAlign(), - SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), - Extensions: singleResp.SingleExtensions, - SerialNumber: singleResp.CertID.SerialNumber, - ProducedAt: basicResp.TBSResponseData.ProducedAt, - ThisUpdate: singleResp.ThisUpdate, - NextUpdate: singleResp.NextUpdate, - } - - // Handle the ResponderID CHOICE tag. ResponderID can be flattened into - // TBSResponseData once https://go-review.googlesource.com/34503 has been - // released. - rawResponderID := basicResp.TBSResponseData.RawResponderID - switch rawResponderID.Tag { - case 1: // Name - var rdn pkix.RDNSequence - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder name") - } - ret.RawResponderName = rawResponderID.Bytes - case 2: // KeyHash - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder key hash") - } - default: - return nil, ParseError("invalid responder id tag") - } - - if len(basicResp.Certificates) > 0 { - ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) - if err != nil { - return nil, err - } - - if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { - return nil, ParseError("bad signature on embedded certificate: " + err.Error()) - } - - if issuer != nil { - if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - } else if issuer != nil { - if err := ret.CheckSignatureFrom(issuer); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - - for _, ext := range singleResp.SingleExtensions { - if ext.Critical { - return nil, ParseError("unsupported critical extension") - } - } - - for h, oid := range hashOIDs { - if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { - ret.IssuerHash = h - break - } - } - if ret.IssuerHash == 0 { - return nil, ParseError("unsupported issuer hash algorithm") - } - - switch { - case bool(singleResp.Good): - ret.Status = Good - case bool(singleResp.Unknown): - ret.Status = Unknown - default: - ret.Status = Revoked - ret.RevokedAt = singleResp.Revoked.RevocationTime - ret.RevocationReason = int(singleResp.Revoked.Reason) - } - - return ret, nil -} - -// RequestOptions contains options for constructing OCSP requests. -type RequestOptions struct { - // Hash contains the hash function that should be used when - // constructing the OCSP request. If zero, SHA-1 will be used. - Hash crypto.Hash -} - -func (opts *RequestOptions) hash() crypto.Hash { - if opts == nil || opts.Hash == 0 { - // SHA-1 is nearly universally used in OCSP. - return crypto.SHA1 - } - return opts.Hash -} - -// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If -// opts is nil then sensible defaults are used. -func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { - hashFunc := opts.hash() - - // OCSP seems to be the only place where these raw hash identifiers are - // used. I took the following from - // http://msdn.microsoft.com/en-us/library/ff635603.aspx - _, ok := hashOIDs[hashFunc] - if !ok { - return nil, x509.ErrUnsupportedAlgorithm - } - - if !hashFunc.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - h := opts.hash().New() - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - req := &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: cert.SerialNumber, - } - return req.Marshal() -} - -// CreateResponse returns a DER-encoded OCSP response with the specified contents. -// The fields in the response are populated as follows: -// -// The responder cert is used to populate the responder's name field, and the -// certificate itself is provided alongside the OCSP response signature. -// -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. -// -// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, -// RevocationReason, ThisUpdate, and NextUpdate fields. -// -// If template.IssuerHash is not set, SHA1 will be used. -// -// The ProducedAt date is automatically set to the current date, to the nearest minute. -func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - if template.IssuerHash == 0 { - template.IssuerHash = crypto.SHA1 - } - hashOID := getOIDFromHashAlgorithm(template.IssuerHash) - if hashOID == nil { - return nil, errors.New("unsupported issuer hash algorithm") - } - - if !template.IssuerHash.Available() { - return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) - } - h := template.IssuerHash.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - innerResponse := singleResponse{ - CertID: certID{ - HashAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - NameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: template.SerialNumber, - }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), - SingleExtensions: template.ExtraExtensions, - } - - switch template.Status { - case Good: - innerResponse.Good = true - case Unknown: - innerResponse.Unknown = true - case Revoked: - innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt.UTC(), - Reason: asn1.Enumerated(template.RevocationReason), - } - } - - rawResponderID := asn1.RawValue{ - Class: 2, // context-specific - Tag: 1, // Name (explicit tag) - IsCompound: true, - Bytes: responderCert.RawSubject, - } - tbsResponseData := responseData{ - Version: 0, - RawResponderID: rawResponderID, - ProducedAt: time.Now().Truncate(time.Minute).UTC(), - Responses: []singleResponse{innerResponse}, - } - - tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) - if err != nil { - return nil, err - } - - hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) - if err != nil { - return nil, err - } - - responseHash := hashFunc.New() - responseHash.Write(tbsResponseDataDER) - signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) - if err != nil { - return nil, err - } - - response := basicResponse{ - TBSResponseData: tbsResponseData, - SignatureAlgorithm: signatureAlgorithm, - Signature: asn1.BitString{ - Bytes: signature, - BitLength: 8 * len(signature), - }, - } - if template.Certificate != nil { - response.Certificates = []asn1.RawValue{ - asn1.RawValue{FullBytes: template.Certificate.Raw}, - } - } - responseDER, err := asn1.Marshal(response) - if err != nil { - return nil, err - } - - return asn1.Marshal(responseASN1{ - Status: asn1.Enumerated(Success), - Response: responseBytes{ - ResponseType: idPKIXOCSPBasic, - Response: responseDER, - }, - }) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp_test.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp_test.go deleted file mode 100644 index df674b374..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/crypto/ocsp/ocsp_test.go +++ /dev/null @@ -1,875 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package ocsp - -import ( - "bytes" - "crypto" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "math/big" - "reflect" - "testing" - "time" -) - -func TestOCSPDecode(t *testing.T) { - responseBytes, _ := hex.DecodeString(ocspResponseHex) - resp, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Fatal(err) - } - - responderCert, _ := hex.DecodeString(startComResponderCertHex) - responder, err := x509.ParseCertificate(responderCert) - if err != nil { - t.Fatal(err) - } - - expected := Response{ - Status: Good, - SerialNumber: big.NewInt(0x1d0fa), - RevocationReason: Unspecified, - ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC), - NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC), - RawResponderName: responder.RawSubject, - } - - if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) { - t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate) - } - - if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate) - } - - if resp.Status != expected.Status { - t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status) - } - - if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 { - t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber) - } - - if resp.RevocationReason != expected.RevocationReason { - t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason) - } - - if !bytes.Equal(resp.RawResponderName, expected.RawResponderName) { - t.Errorf("resp.RawResponderName: got %x, want %x", resp.RawResponderName, expected.RawResponderName) - } - - if !bytes.Equal(resp.ResponderKeyHash, expected.ResponderKeyHash) { - t.Errorf("resp.ResponderKeyHash: got %x, want %x", resp.ResponderKeyHash, expected.ResponderKeyHash) - } -} - -func TestOCSPDecodeWithoutCert(t *testing.T) { - responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex) - _, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Error(err) - } -} - -func TestOCSPDecodeWithExtensions(t *testing.T) { - responseBytes, _ := hex.DecodeString(ocspResponseWithCriticalExtensionHex) - _, err := ParseResponse(responseBytes, nil) - if err == nil { - t.Error(err) - } - - responseBytes, _ = hex.DecodeString(ocspResponseWithExtensionHex) - response, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Fatal(err) - } - - if len(response.Extensions) != 1 { - t.Errorf("len(response.Extensions): got %v, want %v", len(response.Extensions), 1) - } - - extensionBytes := response.Extensions[0].Value - expectedBytes, _ := hex.DecodeString(ocspExtensionValueHex) - if !bytes.Equal(extensionBytes, expectedBytes) { - t.Errorf("response.Extensions[0]: got %x, want %x", extensionBytes, expectedBytes) - } -} - -func TestOCSPSignature(t *testing.T) { - issuerCert, _ := hex.DecodeString(startComHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - response, _ := hex.DecodeString(ocspResponseHex) - if _, err := ParseResponse(response, issuer); err != nil { - t.Error(err) - } -} - -func TestOCSPRequest(t *testing.T) { - leafCert, _ := hex.DecodeString(leafCertHex) - cert, err := x509.ParseCertificate(leafCert) - if err != nil { - t.Fatal(err) - } - - issuerCert, _ := hex.DecodeString(issuerCertHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - request, err := CreateRequest(cert, issuer, nil) - if err != nil { - t.Fatal(err) - } - - expectedBytes, _ := hex.DecodeString(ocspRequestHex) - if !bytes.Equal(request, expectedBytes) { - t.Errorf("request: got %x, wanted %x", request, expectedBytes) - } - - decodedRequest, err := ParseRequest(expectedBytes) - if err != nil { - t.Fatal(err) - } - - if decodedRequest.HashAlgorithm != crypto.SHA1 { - t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1) - } - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - _, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo) - if err != nil { - t.Fatal(err) - } - - h := sha1.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) { - t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash) - } - - if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) { - t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash) - } - - if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 { - t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber) - } - - marshaledRequest, err := decodedRequest.Marshal() - if err != nil { - t.Fatal(err) - } - - if bytes.Compare(expectedBytes, marshaledRequest) != 0 { - t.Errorf( - "Marshaled request doesn't match expected: wanted %x, got %x", - expectedBytes, - marshaledRequest, - ) - } -} - -func TestOCSPResponse(t *testing.T) { - leafCert, _ := hex.DecodeString(leafCertHex) - leaf, err := x509.ParseCertificate(leafCert) - if err != nil { - t.Fatal(err) - } - - issuerCert, _ := hex.DecodeString(issuerCertHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - responderCert, _ := hex.DecodeString(responderCertHex) - responder, err := x509.ParseCertificate(responderCert) - if err != nil { - t.Fatal(err) - } - - responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex) - responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER) - if err != nil { - t.Fatal(err) - } - - extensionBytes, _ := hex.DecodeString(ocspExtensionValueHex) - extensions := []pkix.Extension{ - pkix.Extension{ - Id: ocspExtensionOID, - Critical: false, - Value: extensionBytes, - }, - } - - thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC) - nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC) - template := Response{ - Status: Revoked, - SerialNumber: leaf.SerialNumber, - ThisUpdate: thisUpdate, - NextUpdate: nextUpdate, - RevokedAt: thisUpdate, - RevocationReason: KeyCompromise, - Certificate: responder, - ExtraExtensions: extensions, - } - - template.IssuerHash = crypto.MD5 - _, err = CreateResponse(issuer, responder, template, responderPrivateKey) - if err == nil { - t.Fatal("CreateResponse didn't fail with non-valid template.IssuerHash value crypto.MD5") - } - - testCases := []struct { - name string - issuerHash crypto.Hash - }{ - {"Zero value", 0}, - {"crypto.SHA1", crypto.SHA1}, - {"crypto.SHA256", crypto.SHA256}, - {"crypto.SHA384", crypto.SHA384}, - {"crypto.SHA512", crypto.SHA512}, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - template.IssuerHash = tc.issuerHash - responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey) - if err != nil { - t.Fatalf("CreateResponse failed: %s", err) - } - - resp, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Fatalf("ParseResponse failed: %s", err) - } - - if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) { - t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, template.ThisUpdate) - } - - if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate) - } - - if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) { - t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt) - } - - if !reflect.DeepEqual(resp.Extensions, template.ExtraExtensions) { - t.Errorf("resp.Extensions: got %v, want %v", resp.Extensions, template.ExtraExtensions) - } - - delay := time.Since(resp.ProducedAt) - if delay < -time.Hour || delay > time.Hour { - t.Errorf("resp.ProducedAt: got %s, want close to current time (%s)", resp.ProducedAt, time.Now()) - } - - if resp.Status != template.Status { - t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status) - } - - if resp.SerialNumber.Cmp(template.SerialNumber) != 0 { - t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber) - } - - if resp.RevocationReason != template.RevocationReason { - t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason) - } - - expectedHash := tc.issuerHash - if tc.issuerHash == 0 { - expectedHash = crypto.SHA1 - } - - if resp.IssuerHash != expectedHash { - t.Errorf("resp.IssuerHash: got %d, want %d", resp.IssuerHash, expectedHash) - } - }) - } -} - -func TestErrorResponse(t *testing.T) { - responseBytes, _ := hex.DecodeString(errorResponseHex) - _, err := ParseResponse(responseBytes, nil) - - respErr, ok := err.(ResponseError) - if !ok { - t.Fatalf("expected ResponseError from ParseResponse but got %#v", err) - } - if respErr.Status != Malformed { - t.Fatalf("expected Malformed status from ParseResponse but got %d", respErr.Status) - } -} - -func TestOCSPDecodeMultiResponse(t *testing.T) { - inclCert, _ := hex.DecodeString(ocspMultiResponseCertHex) - cert, err := x509.ParseCertificate(inclCert) - if err != nil { - t.Fatal(err) - } - - responseBytes, _ := hex.DecodeString(ocspMultiResponseHex) - resp, err := ParseResponseForCert(responseBytes, cert, nil) - if err != nil { - t.Fatal(err) - } - - if resp.SerialNumber.Cmp(cert.SerialNumber) != 0 { - t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, cert.SerialNumber) - } -} - -func TestOCSPDecodeMultiResponseWithoutMatchingCert(t *testing.T) { - wrongCert, _ := hex.DecodeString(startComHex) - cert, err := x509.ParseCertificate(wrongCert) - if err != nil { - t.Fatal(err) - } - - responseBytes, _ := hex.DecodeString(ocspMultiResponseHex) - _, err = ParseResponseForCert(responseBytes, cert, nil) - want := ParseError("no response matching the supplied certificate") - if err != want { - t.Errorf("err: got %q, want %q", err, want) - } -} - -// This OCSP response was taken from Thawte's public OCSP responder. -// To recreate: -// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443 -// Copy and paste the first certificate into /tmp/cert.crt and the second into -// /tmp/intermediate.crt -// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der -// Then hex encode the result: -// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")' - -const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" + - "c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" + - "6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" + - "5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" + - "2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" + - "b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" + - "30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" + - "000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" + - "fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" + - "467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" + - "4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" + - "672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" + - "d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" + - "17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" + - "e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" + - "06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" + - "040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" + - "69676974616c204365727469666963617465205369676e696e6731383036060355040313" + - "2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" + - "746520536572766572204341301e170d3037313032353030323330365a170d3132313032" + - "333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" + - "617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" + - "2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" + - "0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" + - "7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" + - "a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" + - "fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" + - "4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" + - "ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" + - "3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" + - "29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" + - "01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" + - "0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" + - "bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" + - "6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" + - "55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" + - "4469676974616c204365727469666963617465205369676e696e67312930270603550403" + - "13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" + - "0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" + - "6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" + - "6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" + - "8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" + - "2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" + - "1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" + - "c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" + - "f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" + - "a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" + - "6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42" - -const startComResponderCertHex = "308204b23082039aa003020102020101300d06092a864886f70d010105050030818c310b" + - "300906035504061302494c31163014060355040a130d5374617274436f6d204c74642e31" + - "2b3029060355040b1322536563757265204469676974616c204365727469666963617465" + - "205369676e696e67313830360603550403132f5374617274436f6d20436c617373203120" + - "5072696d61727920496e7465726d65646961746520536572766572204341301e170d3037" + - "313032353030323330365a170d3132313032333030323330365a304c310b300906035504" + - "061302494c31163014060355040a130d5374617274436f6d204c74642e31253023060355" + - "0403131c5374617274436f6d20436c6173732031204f435350205369676e657230820122" + - "300d06092a864886f70d01010105000382010f003082010a0282010100b9561b4c453187" + - "17178084e96e178df2255e18ed8d8ecc7c2b7b51a6c1c2e6bf0aa3603066f132fe10ae97" + - "b50e99fa24b83fc53dd2777496387d14e1c3a9b6a4933e2ac12413d085570a95b8147414" + - "a0bc007c7bcf222446ef7f1a156d7ea1c577fc5f0facdfd42eb0f5974990cb2f5cefebce" + - "ef4d1bdc7ae5c1075c5a99a93171f2b0845b4ff0864e973fcfe32f9d7511ff87a3e94341" + - "0c90a4493a306b6944359340a9ca96f02b66ce67f028df2980a6aaee8d5d5d452b8b0eb9" + - "3f923cc1e23fcccbdbe7ffcb114d08fa7a6a3c404f825d1a0e715935cf623a8c7b596700" + - "14ed0622f6089a9447a7a19010f7fe58f84129a2765ea367824d1c3bb2fda30853020301" + - "0001a382015c30820158300c0603551d130101ff04023000300b0603551d0f0404030203" + - "a8301e0603551d250417301506082b0601050507030906092b0601050507300105301d06" + - "03551d0e0416041445e0a36695414c5dd449bc00e33cdcdbd2343e173081a80603551d23" + - "0481a030819d8014eb4234d098b0ab9ff41b6b08f7cc642eef0e2c45a18181a47f307d31" + - "0b300906035504061302494c31163014060355040a130d5374617274436f6d204c74642e" + - "312b3029060355040b1322536563757265204469676974616c2043657274696669636174" + - "65205369676e696e6731293027060355040313205374617274436f6d2043657274696669" + - "636174696f6e20417574686f7269747982010a30230603551d12041c301a861868747470" + - "3a2f2f7777772e737461727473736c2e636f6d2f302c06096086480186f842010d041f16" + - "1d5374617274436f6d205265766f636174696f6e20417574686f72697479300d06092a86" + - "4886f70d01010505000382010100182d22158f0fc0291324fa8574c49bb8ff2835085adc" + - "bf7b7fc4191c397ab6951328253fffe1e5ec2a7da0d50fca1a404e6968481366939e666c" + - "0a6209073eca57973e2fefa9ed1718e8176f1d85527ff522c08db702e3b2b180f1cbff05" + - "d98128252cf0f450f7dd2772f4188047f19dc85317366f94bc52d60f453a550af58e308a" + - "aab00ced33040b62bf37f5b1ab2a4f7f0f80f763bf4d707bc8841d7ad9385ee2a4244469" + - "260b6f2bf085977af9074796048ecc2f9d48a1d24ce16e41a9941568fec5b42771e118f1" + - "6c106a54ccc339a4b02166445a167902e75e6d8620b0825dcd18a069b90fd851d10fa8ef" + - "fd409deec02860d26d8d833f304b10669b42" - -const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" + - "0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" + - "3029060355040b1322536563757265204469676974616c20436572746966696361746520" + - "5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" + - "74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" + - "3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" + - "0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" + - "74616c204365727469666963617465205369676e696e67313830360603550403132f5374" + - "617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" + - "53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" + - "4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" + - "a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" + - "3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" + - "2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" + - "48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" + - "c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" + - "c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" + - "030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" + - "b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" + - "17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" + - "05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" + - "082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" + - "667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" + - "772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" + - "3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" + - "20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" + - "687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" + - "06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" + - "696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" + - "2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" + - "e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" + - "c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" + - "46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" + - "7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" + - "ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" + - "4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" + - "7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" + - "c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" + - "434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" + - "4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" + - "214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" + - "c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" + - "4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" + - "d2ae83a7a8ce5ba7" - -const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" + - "01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" + - "3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" + - "456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" + - "1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" + - "5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" + - "82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" + - "5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" + - "8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" + - "6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" + - "0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" + - "9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" + - "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" + - "5a35fca2e054dfa8" - -// PKIX nonce extension -var ocspExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2} -var ocspExtensionValueHex = "0403000000" - -const ocspResponseWithCriticalExtensionHex = "308204fe0a0100a08204f7308204f306092b0601050507300101048204e4308204e03081" + - "dba003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + - "0f32303136303130343137303130305a3081a53081a23049300906052b0e03021a050004" + - "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + - "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + - "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + - "3130303730373138333531375aa1193017301506092b06010505073001020101ff040504" + - "03000000300d06092a864886f70d01010b0500038201010031c730ca60a7a0d92d8e4010" + - "911b469de95b4d27e89de6537552436237967694f76f701cf6b45c932bd308bca4a8d092" + - "5c604ba94796903091d9e6c000178e72c1f0a24a277dd262835af5d17d3f9d7869606c9f" + - "e7c8e708a41645699895beee38bfa63bb46296683761c5d1d65439b8ab868dc3017c9eeb" + - "b70b82dbf3a31c55b457d48bb9e82b335ed49f445042eaf606b06a3e0639824924c89c63" + - "eccddfe85e6694314138b2536f5e15e07085d0f6e26d4b2f8244bab0d70de07283ac6384" + - "a0501fc3dea7cf0adfd4c7f34871080900e252ddc403e3f0265f2a704af905d3727504ed" + - "28f3214a219d898a022463c78439799ca81c8cbafdbcec34ea937cd6a08202ea308202e6" + - "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + - "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + - "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + - "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + - "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + - "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + - "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + - "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + - "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + - "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + - "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + - "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + - "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + - "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + - "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + - "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + - "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + - "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + - "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + - "3a25439a94299a65a709756c7a3e568be049d5c38839" - -const ocspResponseWithExtensionHex = "308204fb0a0100a08204f4308204f006092b0601050507300101048204e1308204dd3081" + - "d8a003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + - "0f32303136303130343136353930305a3081a230819f3049300906052b0e03021a050004" + - "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + - "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + - "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + - "3130303730373138333531375aa1163014301206092b0601050507300102040504030000" + - "00300d06092a864886f70d01010b05000382010100c09a33e0b2324c852421bb83f85ac9" + - "9113f5426012bd2d2279a8166e9241d18a33c870894250622ffc7ed0c4601b16d624f90b" + - "779265442cdb6868cf40ab304ab4b66e7315ed02cf663b1601d1d4751772b31bc299db23" + - "9aebac78ed6797c06ed815a7a8d18d63cfbb609cafb47ec2e89e37db255216eb09307848" + - "d01be0a3e943653c78212b96ff524b74c9ec456b17cdfb950cc97645c577b2e09ff41dde" + - "b03afb3adaa381cc0f7c1d95663ef22a0f72f2c45613ae8e2b2d1efc96e8463c7d1d8a1d" + - "7e3b35df8fe73a301fc3f804b942b2b3afa337ff105fc1462b7b1c1d75eb4566c8665e59" + - "f80393b0adbf8004ff6c3327ed34f007cb4a3348a7d55e06e3a08202ea308202e6308202" + - "e2308201caa003020102020101300d06092a864886f70d01010b05003019311730150603" + - "550403130e4f43535020526573706f6e646572301e170d3135303133303135353033335a" + - "170d3136303133303135353033335a3019311730150603550403130e4f43535020526573" + - "706f6e64657230820122300d06092a864886f70d01010105000382010f003082010a0282" + - "010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616ec5265b" + - "56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbcbec75a" + - "70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b723350f0" + - "a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b8989ad0f6" + - "3aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d285b6a" + - "04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e655b104" + - "9a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31a77dcf" + - "920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030130603" + - "551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d06092a" + - "864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab861231c15f" + - "d5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d22889064f4" + - "aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f326709dce5" + - "2c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156d67156" + - "e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff59e2005" + - "d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf966705d" + - "e17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d93a2543" + - "9a94299a65a709756c7a3e568be049d5c38839" - -const ocspMultiResponseHex = "30820ee60a0100a0820edf30820edb06092b060105050730010104820ecc30820ec83082" + - "0839a216041445ac2ecd75f53f1cf6e4c51d3de0047ad0aa7465180f3230313530363032" + - "3130303033305a3082080c3065303d300906052b0e03021a05000414f7452a0080601527" + - "72e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204" + - "5456656a8000180f32303135303630323039303230375aa011180f323031353036303331" + - "30303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e7" + - "6e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456656b80" + - "00180f32303135303630323039303230375aa011180f3230313530363033313030303330" + - "5a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0" + - "f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456656c8000180f3230" + - "3135303630323039303230375aa011180f32303135303630333130303033305a3065303d" + - "300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414ed" + - "d8f2ee977252853a330b297a18f5c993853b3f02045456656d8000180f32303135303630" + - "323039303230375aa011180f32303135303630333130303033305a3065303d300906052b" + - "0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee9772" + - "52853a330b297a18f5c993853b3f02045456656e8000180f323031353036303230393032" + - "30375aa011180f32303135303630333130303033305a3065303d300906052b0e03021a05" + - "000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b" + - "297a18f5c993853b3f02045456656f8000180f32303135303630323039303230375aa011" + - "180f32303135303630333130303033305a3065303d300906052b0e03021a05000414f745" + - "2a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c9" + - "93853b3f0204545665708000180f32303135303630323039303230375aa011180f323031" + - "35303630333130303033305a3065303d300906052b0e03021a05000414f7452a00806015" + - "2772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02" + - "04545665718000180f32303135303630323039303230375aa011180f3230313530363033" + - "3130303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135" + - "e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f020454566572" + - "8000180f32303135303630323039303230375aa011180f32303135303630333130303033" + - "305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fd" + - "e0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204545665738000180f32" + - "303135303630323039303230375aa011180f32303135303630333130303033305a306530" + - "3d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414" + - "edd8f2ee977252853a330b297a18f5c993853b3f0204545665748000180f323031353036" + - "30323039303230375aa011180f32303135303630333130303033305a3065303d30090605" + - "2b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee97" + - "7252853a330b297a18f5c993853b3f0204545665758000180f3230313530363032303930" + - "3230375aa011180f32303135303630333130303033305a3065303d300906052b0e03021a" + - "05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a33" + - "0b297a18f5c993853b3f0204545665768000180f32303135303630323039303230375aa0" + - "11180f32303135303630333130303033305a3065303d300906052b0e03021a05000414f7" + - "452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5" + - "c993853b3f0204545665778000180f32303135303630323039303230375aa011180f3230" + - "3135303630333130303033305a3065303d300906052b0e03021a05000414f7452a008060" + - "152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f" + - "0204545665788000180f32303135303630323039303230375aa011180f32303135303630" + - "333130303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a1" + - "35e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204545665" + - "798000180f32303135303630323039303230375aa011180f323031353036303331303030" + - "33305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52" + - "fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456657a8000180f" + - "32303135303630323039303230375aa011180f32303135303630333130303033305a3065" + - "303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f15804" + - "14edd8f2ee977252853a330b297a18f5c993853b3f02045456657b8000180f3230313530" + - "3630323039303230375aa011180f32303135303630333130303033305a3065303d300906" + - "052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee" + - "977252853a330b297a18f5c993853b3f02045456657c8000180f32303135303630323039" + - "303230375aa011180f32303135303630333130303033305a3065303d300906052b0e0302" + - "1a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a" + - "330b297a18f5c993853b3f02045456657d8000180f32303135303630323039303230375a" + - "a011180f32303135303630333130303033305a300d06092a864886f70d01010505000382" + - "01010016b73b92859979f27d15eb018cf069eed39c3d280213565f3026de11ba15bdb94d" + - "764cf2d0fdd204ef926c588d7b183483c8a2b1995079c7ed04dcefcc650c1965be4b6832" + - "a8839e832f7f60f638425eccdf9bc3a81fbe700fda426ddf4f06c29bee431bbbe81effda" + - "a60b7da5b378f199af2f3c8380be7ba6c21c8e27124f8a4d8989926aea19055700848d33" + - "799e833512945fd75364edbd2dd18b783c1e96e332266b17979a0b88c35b43f47c87c493" + - "19155056ad8dbbae5ff2afad3c0e1c69ed111206ffda49875e8e4efc0926264823bc4423" + - "c8a002f34288c4bc22516f98f54fc609943721f590ddd8d24f989457526b599b0eb75cb5" + - "a80da1ad93a621a08205733082056f3082056b30820453a0030201020204545638c4300d" + - "06092a864886f70d01010b0500308182310b300906035504061302555331183016060355" + - "040a130f552e532e20476f7665726e6d656e7431233021060355040b131a446570617274" + - "6d656e74206f662074686520547265617375727931223020060355040b13194365727469" + - "6669636174696f6e20417574686f7269746965733110300e060355040b13074f43494f20" + - "4341301e170d3135303332303131353531335a170d3135303633303034303030305a3081" + - "98310b300906035504061302555331183016060355040a130f552e532e20476f7665726e" + - "6d656e7431233021060355040b131a4465706172746d656e74206f662074686520547265" + - "617375727931223020060355040b131943657274696669636174696f6e20417574686f72" + - "69746965733110300e060355040b13074f43494f204341311430120603550403130b4f43" + - "5350205369676e657230820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100c1b6fe1ba1ad50bb98c855811acbd67fe68057f48b8e08d3800e7f2c51b7" + - "9e20551934971fd92b9c9e6c49453097927cba83a94c0b2fea7124ba5ac442b38e37dba6" + - "7303d4962dd7d92b22a04b0e0e182e9ea67620b1c6ce09ee607c19e0e6e3adae81151db1" + - "2bb7f706149349a292e21c1eb28565b6839df055e1a838a772ff34b5a1452618e2c26042" + - "705d53f0af4b57aae6163f58216af12f3887813fe44b0321827b3a0c52b0e47d0aab94a2" + - "f768ab0ba3901d22f8bb263823090b0e37a7f8856db4b0d165c42f3aa7e94f5f6ce1855e" + - "98dc57adea0ae98ad39f67ecdec00b88685566e9e8d69f6cefb6ddced53015d0d3b862bc" + - "be21f3d72251eefcec730203010001a38201cf308201cb300e0603551d0f0101ff040403" + - "020780306b0603551d2004643062300c060a60864801650302010502300c060a60864801" + - "650302010503300c060a60864801650302010504300c060a60864801650302010507300c" + - "060a60864801650302010508300c060a6086480165030201030d300c060a608648016503" + - "020103113081e506082b060105050701010481d83081d5303006082b0601050507300286" + - "24687474703a2f2f706b692e74726561732e676f762f746f63615f65655f6169612e7037" + - "633081a006082b060105050730028681936c6461703a2f2f6c6461702e74726561732e67" + - "6f762f6f753d4f43494f25323043412c6f753d43657274696669636174696f6e25323041" + - "7574686f7269746965732c6f753d4465706172746d656e742532306f6625323074686525" + - "323054726561737572792c6f3d552e532e253230476f7665726e6d656e742c633d55533f" + - "634143657274696669636174653b62696e61727930130603551d25040c300a06082b0601" + - "0505070309300f06092b060105050730010504020500301f0603551d23041830168014a2" + - "13a8e5c607546c243d4eb72b27a2a7711ab5af301d0603551d0e0416041451f98046818a" + - "e46d953ac90c210ccfaa1a06980c300d06092a864886f70d01010b050003820101003a37" + - "0b301d14ffdeb370883639bec5ae6f572dcbddadd672af16ee2a8303316b14e1fbdca8c2" + - "8f4bad9c7b1410250e149c14e9830ca6f17370a8d13151205d956e28c141cc0500379596" + - "c5b9239fcfa3d2de8f1d4f1a2b1bf2d1851bed1c86012ee8135bdc395cd4496ce69fadd0" + - "3b682b90350ca7b4f458190b7a0ab5c33a04cf1347a77d541877a380a4c94988c5658908" + - "44fdc22637a72b9fa410333e2caf969477f9fe07f50e3681c204fb3bf073b9da01cd8d91" + - "8044c40b1159955af12a3263ab1d34119d7f59bfa6cae88ed058addc4e08250263f8f836" + - "2f5bdffd45636fea7474c60a55c535954477b2f286e1b2535f0dd12c162f1b353c370e08" + - "be67" - -const ocspMultiResponseCertHex = "308207943082067ca003020102020454566573300d06092a864886f70d01010b05003081" + - "82310b300906035504061302555331183016060355040a130f552e532e20476f7665726e" + - "6d656e7431233021060355040b131a4465706172746d656e74206f662074686520547265" + - "617375727931223020060355040b131943657274696669636174696f6e20417574686f72" + - "69746965733110300e060355040b13074f43494f204341301e170d313530343130313535" + - "3733385a170d3138303431303136323733385a30819d310b300906035504061302555331" + - "183016060355040a130f552e532e20476f7665726e6d656e7431233021060355040b131a" + - "4465706172746d656e74206f662074686520547265617375727931253023060355040b13" + - "1c427572656175206f66207468652046697363616c20536572766963653110300e060355" + - "040b130744657669636573311630140603550403130d706b692e74726561732e676f7630" + - "820122300d06092a864886f70d01010105000382010f003082010a0282010100c7273623" + - "8c49c48bf501515a2490ef6e5ae0c06e0ad2aa9a6bb77f3d0370d846b2571581ebf38fd3" + - "1948daad3dec7a4da095f1dcbe9654e65bcf7acdfd4ee802421dad9b90536c721d2bca58" + - "8413e6bfd739a72470560bb7d64f9a09284f90ff8af1d5a3c5c84d0f95a00f9c6d988dd0" + - "d87f1d0d3344580901c955139f54d09de0acdbd3322b758cb0c58881bf04913243401f44" + - "013fd9f6d8348044cc8bb0a71978ad93366b2a4687a5274b2ee07d0fb40225453eb244ed" + - "b20152251ac77c59455260ff07eeceb3cb3c60fb8121cf92afd3daa2a4650e1942ccb555" + - "de10b3d481feb299838ef05d0fd1810b146753472ae80da65dd34da25ca1f89971f10039" + - "0203010001a38203f3308203ef300e0603551d0f0101ff0404030205a030170603551d20" + - "0410300e300c060a60864801650302010503301106096086480186f84201010404030206" + - "4030130603551d25040c300a06082b060105050703013082010806082b06010505070101" + - "0481fb3081f8303006082b060105050730028624687474703a2f2f706b692e7472656173" + - "2e676f762f746f63615f65655f6169612e7037633081a006082b06010505073002868193" + - "6c6461703a2f2f6c6461702e74726561732e676f762f6f753d4f43494f25323043412c6f" + - "753d43657274696669636174696f6e253230417574686f7269746965732c6f753d446570" + - "6172746d656e742532306f6625323074686525323054726561737572792c6f3d552e532e" + - "253230476f7665726e6d656e742c633d55533f634143657274696669636174653b62696e" + - "617279302106082b060105050730018615687474703a2f2f6f6373702e74726561732e67" + - "6f76307b0603551d1104743072811c6373612d7465616d4066697363616c2e7472656173" + - "7572792e676f768210706b692e74726561737572792e676f768210706b692e64696d632e" + - "6468732e676f76820d706b692e74726561732e676f76811f6563622d686f7374696e6740" + - "66697363616c2e74726561737572792e676f76308201890603551d1f048201803082017c" + - "3027a025a0238621687474703a2f2f706b692e74726561732e676f762f4f43494f5f4341" + - "332e63726c3082014fa082014ba0820147a48197308194310b3009060355040613025553" + - "31183016060355040a130f552e532e20476f7665726e6d656e7431233021060355040b13" + - "1a4465706172746d656e74206f662074686520547265617375727931223020060355040b" + - "131943657274696669636174696f6e20417574686f7269746965733110300e060355040b" + - "13074f43494f2043413110300e0603550403130743524c313430398681aa6c6461703a2f" + - "2f6c6461702e74726561732e676f762f636e3d43524c313430392c6f753d4f43494f2532" + - "3043412c6f753d43657274696669636174696f6e253230417574686f7269746965732c6f" + - "753d4465706172746d656e742532306f6625323074686525323054726561737572792c6f" + - "3d552e532e253230476f7665726e6d656e742c633d55533f636572746966696361746552" + - "65766f636174696f6e4c6973743b62696e617279302b0603551d1004243022800f323031" + - "35303431303135353733385a810f32303138303431303136323733385a301f0603551d23" + - "041830168014a213a8e5c607546c243d4eb72b27a2a7711ab5af301d0603551d0e041604" + - "14b0869c12c293914cd460e33ed43e6c5a26e0d68f301906092a864886f67d074100040c" + - "300a1b0456382e31030203a8300d06092a864886f70d01010b050003820101004968d182" + - "8f9efdc147e747bb5dda15536a42a079b32d3d7f87e619b483aeee70b7e26bda393c6028" + - "7c733ecb468fe8b8b11bf809ff76add6b90eb25ad8d3a1052e43ee281e48a3a1ebe7efb5" + - "9e2c4a48765dedeb23f5346242145786cc988c762d230d28dd33bf4c2405d80cbb2cb1d6" + - "4c8f10ba130d50cb174f6ffb9cfc12808297a2cefba385f4fad170f39b51ebd87c12abf9" + - "3c51fc000af90d8aaba78f48923908804a5eb35f617ccf71d201e3708a559e6d16f9f13e" + - "074361eb9007e28d86bb4e0bfa13aad0e9ddd9124e84519de60e2fc6040b18d9fd602b02" + - "684b4c071c3019fc842197d00c120c41654bcbfbc4a096a1c637b79112b81ce1fa3899f9" - -const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" + - "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" + - "bcbb235d44ccc7dba62e72" - -const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" + - "864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" + - "7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" + - "333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" + - "766572204341202d20436c617373203331493047060355040b13407777772e7665726973" + - "69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" + - "204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" + - "170d3133313233313233353935395a3068310b3009060355040613025553311330110603" + - "550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" + - "173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" + - "2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" + - "818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" + - "23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" + - "5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" + - "c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" + - "82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" + - "01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" + - "7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" + - "2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" + - "726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" + - "03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" + - "30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" + - "20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" + - "092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" + - "81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" + - "67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" + - "9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817" - -const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" + - "864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" + - "566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" + - "6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" + - "1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" + - "301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" + - "0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" + - "69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" + - "3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" + - "6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" + - "5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" + - "e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" + - "bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" + - "3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" + - "f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" + - "0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" + - "2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" + - "636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" + - "030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" + - "0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" + - "a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" + - "300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" + - "adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" + - "f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" + - "e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" + - "d68071" - -// Key and certificate for the OCSP responder were not taken from the Thawte -// responder, since CreateResponse requires that we have the private key. -// Instead, they were generated randomly. -const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" + - "1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" + - "1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" + - "fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" + - "c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" + - "7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" + - "1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" + - "6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" + - "72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" + - "56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" + - "575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" + - "679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" + - "05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" + - "5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" + - "1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" + - "ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" + - "daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" + - "e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" + - "07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" + - "87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" + - "327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" + - "497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" + - "6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" + - "4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" + - "7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" + - "0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" + - "d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" + - "c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" + - "c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" + - "0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" + - "120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" + - "45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" + - "48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" + - "6bdaa059" - -const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + - "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + - "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + - "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + - "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + - "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + - "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + - "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + - "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + - "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + - "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + - "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + - "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + - "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + - "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + - "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + - "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + - "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + - "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + - "3a25439a94299a65a709756c7a3e568be049d5c38839" - -const errorResponseHex = "30030a0101" diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitattributes b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitattributes deleted file mode 100644 index d2f212e5d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitignore b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitignore deleted file mode 100644 index 8339fd61d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/AUTHORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTING.md deleted file mode 100644 index 88dff59bc..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/PATENTS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/README b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/README deleted file mode 100644 index 6b13d8e50..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go networking libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/codereview.cfg b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index f143ed6a1..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context_test.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context_test.go deleted file mode 100644 index 62844131b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) - testDeadline(c, 2*timeUnit, t) -} - -func TestTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 1*timeUnit) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o = otherContext{c} - c, _ = WithTimeout(o, 3*timeUnit) - testDeadline(c, 2*timeUnit, t) -} - -func TestCanceledTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 2*timeUnit) - o := otherContext{c} - c, cancel := WithTimeout(o, 4*timeUnit) - cancel() - time.Sleep(1 * timeUnit) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 16, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TODO(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + 100*time.Millisecond): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} - -func TestCancelRemoves(t *testing.T) { - checkChildren := func(when string, ctx Context, want int) { - if got := len(ctx.(*cancelCtx).children); got != want { - t.Errorf("%s: context has %d children, want %d", when, got, want) - } - } - - ctx, _ := WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel := WithCancel(ctx) - checkChildren("with WithCancel child ", ctx, 1) - cancel() - checkChildren("after cancelling WithCancel child", ctx, 0) - - ctx, _ = WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel = WithTimeout(ctx, 60*time.Minute) - checkChildren("with WithTimeout child ", ctx, 1) - cancel() - checkChildren("after cancelling WithTimeout child", ctx, 0) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/go17.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b7d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/pre_go17.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592df..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc36..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index a2d499529..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,713 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go based on the authoritative -// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat -// -// The version is derived from -// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat -// and a human-readable form is at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" -// and -version "an explicit version string". - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -const ( - defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" - gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" -) - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - - // validSuffixRE is used to check that the entries in the public suffix - // list are in canonical form (after Punycode encoding). Specifically, - // capital letters are not allowed. - validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) - dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) - - comments = flag.Bool("comments", false, "generate table.go comments, for debugging") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - if *url != defaultURL { - return fmt.Errorf("-version was not specified, and the -url is not the default one") - } - sha, date, err := gitCommit() - if err != nil { - return err - } - *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann = false - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffixRE.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - if err := generate(printReal, &root, "table.go"); err != nil { - return err - } - if err := generate(printTest, &root, "table_test.go"); err != nil { - return err - } - return nil -} - -func generate(p func(io.Writer, *node) error, root *node, filename string) error { - buf := new(bytes.Buffer) - if err := p(buf, root); err != nil { - return err - } - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - return ioutil.WriteFile(filename, b, 0644) -} - -func gitCommit() (sha, date string, retErr error) { - res, err := http.Get(gitCommitURL) - if err != nil { - return "", "", err - } - if res.StatusCode != http.StatusOK { - return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if m := shaRE.FindSubmatch(b); m != nil { - sha = string(m[1]) - } - if m := dateRE.FindSubmatch(b); m != nil { - date = string(m[1]) - } - if sha == "" || date == "" { - retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) - } - return sha, date, retErr -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - if *comments { - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } else { - fmt.Fprintf(w, "0x%x,\n", c) - } - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index 8bbf3bcd7..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package publicsuffix provides a public suffix list based on data from -// http://publicsuffix.org/. A public suffix is one under which Internet users -// can directly register names. -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is privately -// managed. For example, foo.org and foo.co.uk are ICANN domains, -// foo.dyndns.org and foo.blogspot.co.uk are private domains. -// -// Use cases for distinguishing ICANN domains like foo.com from private -// domains like foo.appspot.com can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, wildcard := domain, len(domain), false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) - icann = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1< len(b[j]) -} - -// eTLDPlusOneTestCases come from -// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt -var eTLDPlusOneTestCases = []struct { - domain, want string -}{ - // Empty input. - {"", ""}, - // Unlisted TLD. - {"example", ""}, - {"example.example", "example.example"}, - {"b.example.example", "example.example"}, - {"a.b.example.example", "example.example"}, - // TLD with only 1 rule. - {"biz", ""}, - {"domain.biz", "domain.biz"}, - {"b.domain.biz", "domain.biz"}, - {"a.b.domain.biz", "domain.biz"}, - // TLD with some 2-level rules. - {"com", ""}, - {"example.com", "example.com"}, - {"b.example.com", "example.com"}, - {"a.b.example.com", "example.com"}, - {"uk.com", ""}, - {"example.uk.com", "example.uk.com"}, - {"b.example.uk.com", "example.uk.com"}, - {"a.b.example.uk.com", "example.uk.com"}, - {"test.ac", "test.ac"}, - // TLD with only 1 (wildcard) rule. - {"mm", ""}, - {"c.mm", ""}, - {"b.c.mm", "b.c.mm"}, - {"a.b.c.mm", "b.c.mm"}, - // More complex TLD. - {"jp", ""}, - {"test.jp", "test.jp"}, - {"www.test.jp", "test.jp"}, - {"ac.jp", ""}, - {"test.ac.jp", "test.ac.jp"}, - {"www.test.ac.jp", "test.ac.jp"}, - {"kyoto.jp", ""}, - {"test.kyoto.jp", "test.kyoto.jp"}, - {"ide.kyoto.jp", ""}, - {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, - {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, - {"c.kobe.jp", ""}, - {"b.c.kobe.jp", "b.c.kobe.jp"}, - {"a.b.c.kobe.jp", "b.c.kobe.jp"}, - {"city.kobe.jp", "city.kobe.jp"}, - {"www.city.kobe.jp", "city.kobe.jp"}, - // TLD with a wildcard rule and exceptions. - {"ck", ""}, - {"test.ck", ""}, - {"b.test.ck", "b.test.ck"}, - {"a.b.test.ck", "b.test.ck"}, - {"www.ck", "www.ck"}, - {"www.www.ck", "www.ck"}, - // US K12. - {"us", ""}, - {"test.us", "test.us"}, - {"www.test.us", "test.us"}, - {"ak.us", ""}, - {"test.ak.us", "test.ak.us"}, - {"www.test.ak.us", "test.ak.us"}, - {"k12.ak.us", ""}, - {"test.k12.ak.us", "test.k12.ak.us"}, - {"www.test.k12.ak.us", "test.k12.ak.us"}, - // Punycoded IDN labels - {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, - {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, - {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, - {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, - {"xn--55qx5d.cn", ""}, - {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, - {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, - {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, - {"xn--fiqs8s", ""}, -} - -func TestEffectiveTLDPlusOne(t *testing.T) { - for _, tc := range eTLDPlusOneTestCases { - got, _ := EffectiveTLDPlusOne(tc.domain) - if got != tc.want { - t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table.go deleted file mode 100644 index 50f070a92..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table.go +++ /dev/null @@ -1,9253 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = "publicsuffix.org's public_suffix_list.dat, git revision f47d806df99585862c8426c3e064a50eb5a278f5 (2017-06-14T11:49:01Z)" - -const ( - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 -) - -// numTLD is the number of top level domains. -const numTLD = 1549 - -// Text is the combined text of all labels. -const text = "bifukagawalterbihorologybikedagestangeorgeorgiaxasnesoddenmarkha" + - "ngelskjakdnepropetrovskiervaapsteiermarkaragandabruzzoologicalvi" + - "nklein-addrammenuernberggfarmerseine12bilbaogakidsmynasushiobara" + - "gusartsalangeninohekinannestadray-dnsiskinkyotobetsumidatlantica" + - "tholicheltenham-radio-opencraftranagatorodoybillustrationinomiya" + - "konojosoyrorosalondonetskarpaczeladzjavald-aostarnbergladegreevj" + - "e-og-hornnesaltdalimitedraydnsupdaternopilawabioceanographiquebi" + - "rdartcenterprisesakikuchikuseikarugamvikaruizawabirkenesoddtange" + - "novaraumalopolskanlandrivelandrobaknoluoktachikawakembuchikumaga" + - "yagawakkanaibetsubamericanfamilydscloudcontrolledekafjordrudunsa" + - "lvadordalibabalatinord-aurdalvdalaskanittedallasalleasinglesuran" + - "certmgretagajobojinzais-a-candidatebirthplacebjarkoybjerkreimbal" + - "sfjordgcahcesuolocus-1bjugnirasakis-a-catererblockbustermezlglas" + - "sassinationalheritagematsubarakawagoebloombergbauernishiazais-a-" + - "celticsfanishigoddabloxcmsalzburgliwicebluedancebmoattachmentsam" + - "egawabmsamnangerbmwegroweibolzanordkappgafanquannefrankfurtjmaxx" + - "xboxenapponazure-mobilebnpparibaselburglobalashovhachinohedmarka" + - "rumaifarmsteadupontariomutashinais-a-chefarsundurbanamexnethnolo" + - "gybnrweirbonnishiharabookinglobodoes-itvedestrandurhamburglogowf" + - "ashionishiizunazukis-a-conservativefsnillfjordvrcambridgestonexu" + - "s-2bootsamsclubindalimoliserniaboschaefflerdalindashorokanaiebos" + - "tikasaokaminokawanishiaizubangebostonakijinsekikogentingloppenza" + - "ogashimadachicagoboatsamsungmbhartiffanybotanicalgardenishikatak" + - "ayamatta-varjjatjometlifeinsurancebotanicgardenishikatsuragithub" + - "usercontentjxfinitybotanybouncemerckmsdnipropetrovskjervoyagebou" + - "nty-fullensakerrypropertiesandvikcoromantovalle-d-aostatic-acces" + - "sanfranciscofreakunemurorangeiseiyoichippubetsubetsugaruhrboutiq" + - "uebecngminakamichiharabozentsujiiebplacedogawarabikomaezakirunor" + - "dlandvrdnsangoppdalindesnesanjournalismailillesandefjordyndns-at" + - "-workinggroupaleobrandywinevalleybrasiliabresciabrindisibenikebr" + - "istoloslocalhistorybritishcolumbialowiezachpomorskienishikawazuk" + - "amitondabayashiogamagoriziabroadcastlegallocalhostrodawaravennag" + - "asukebroadwaybroke-itkmaxxjaworznowtvalled-aostavangerbrokerbron" + - "noysundyndns-blogdnsannanishimerabrothermesaverdeatnurembergmode" + - "nakasatsunais-a-cpadualstackspace-to-rentalstomakomaibarabrowser" + - "safetymarketsannohelplfinancialivornobrumunddalombardiamondsanok" + - "ashibatakashimaseratis-a-cubicle-slavellinotteroybrunelasticbean" + - "stalkashiharabrusselsantabarbarabruxellesantacruzsantafedjeffers" + - "onishinomiyashironobryanskleppalermomahachijorpelandyndns-freebo" + - "x-ostrowwlkpmgmxn--0trq7p7nnishinoomotegobrynewhollandyndns-home" + - "dnsanukis-a-democratmpalmspringsakerbuskerudinewmexicodyn-vpnplu" + - "sterbuzenishinoshimattelefonicarbonia-iglesias-carboniaiglesiasc" + - "arboniabuzzpamperedchefastlylbaltimore-og-romsdalwaysdatabasebal" + - "langenoamishirasatochigiessensiositelemarkarateu-1bwhalingrimsta" + - "dyndns-ipirangaulardalombardynamisches-dnsaotomemergencyachtsapo" + - "dlasiellaktyubinskiptveterinairealtorlandyndns-mailomzaporizhzhe" + - "guris-a-designerimarumorimachidabzhitomirumalselvendrellorenskog" + - "ripescaravantaacondoshichinohealth-carereformitakeharaconference" + - "constructionconsuladoesntexistanbullensvanguardyndns1consultanth" + - "ropologyconsultingvolluroycontactoyotsukaidownloadynnsaskatchewa" + - "ncontemporaryarteducationalchikugodoharuovatoyouracontractorsken" + - "conventureshinodesashibetsuikinderoycookingchannelblagdenesnaase" + - "ralingenkainanaejrietisalatinabenonichernivtsiciliacoolkuszczytn" + - "ore-og-uvdalutskasuyameldaluxembourgrpanamacooperaunitenrightath" + - "omeftpanasonichernovtsykkylvenetogakushimotoganewspapercopenhage" + - "ncyclopedichirurgiens-dentistes-en-francecorsicagliaridagawarsza" + - "washingtondclkaszubycorvettevadsoccertificationcosenzagancosidns" + - "dojoetsuwanouchikujogaszkoladbrokesassaris-a-huntercostumedio-ca" + - "mpidano-mediocampidanomediocouchpotatofriesatxn--11b4c3dynv6coun" + - "ciluxurycouponsaudacoursesauheradynvpnchiryukyuragifuchungbukhar" + - "acq-acranbrookuwanalyticsavannahgacreditcardyroyrvikingruecredit" + - "unioncremonashgabadaddjambyluzerncrewiiheyakagecricketrzyncrimea" + - "st-kazakhstanangercrotonextdirectoystre-slidrettozawacrownprovid" + - "ercrsvparaglidinguitarsaves-the-whalessandria-trani-barletta-and" + - "riatranibarlettaandriacruisesavonaplesaxocryptonomichigangwoncui" + - "sinellahppiacenzakopanerairguardiannakadomarinebraskaunjargalsac" + - "eoculturalcentertainmentozsdeltaitogliattiresbschokoladencuneocu" + - "pcakecxn--12c1fe0bradescorporationcyberlevagangaviikanonjis-a-kn" + - "ightpointtokaizukamikitayamatsuris-a-landscapercymrussiacyonabar" + - "ulvikatowicecyouthdfcbankatsushikabeeldengeluidfidonnakamurataji" + - "mibuildingulenfieldfiguerestaurantraniandriabarlettatraniandriaf" + - "ilateliafilegearthachiojiyahoofilminamidaitomangotsukisosakitaga" + - "wafinalfinancefineartschwarzgwangjuifminamiechizenfinlandfinnoyf" + - "irebaseapparisor-fronfirenzefirestonefirmdaleirvikaufenfishingol" + - "ffanschweizwildlifedorainfracloudfrontdoorfitjarmeniafitnessettl" + - "ementranoyfjalerflesbergunmarburguovdageaidnuslivinghistoryflick" + - "ragerotikakamigaharaflightsciencecentersciencehistoryflirflogint" + - "ogurafloraflorencefloridavvesiidazaifudaigojomedizinhistorisches" + - "cientistoragefloripaderbornfloristanohatakahamangyshlakasamatsud" + - "ontexisteingeekautokeinoflorogerscjohnsonflowerscotlandflynnhuba" + - "mblefrakkestadiscountysnes3-sa-east-1fndfoodnetworkshoppingushik" + - "amifuranortonsbergxn--12co0c3b4evalleaostatoilfor-ourfor-someetn" + - "edalfor-theaterforexrothachirogatakahatakaishimogosenforgotdnscr" + - "apper-siteforli-cesena-forlicesenaforlikescandynamic-dnscrapping" + - "forsaleitungsenforsandasuolodingenfortmissoulair-traffic-control" + - "leyfortworthadanosegawaforuminamifuranofosneserveftparliamentran" + - "sportransurlfotaruis-a-lawyerfoxfordedyn-ip24freeboxoservegame-s" + - "erversailleservehalflifestylefreemasonryfreetlservehttparmafreib" + - "urgfreightcminamiiselectrapaniimimatakatoris-a-liberalfresenius-" + - "3fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-ve" + - "nezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriu" + - "live-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiulia" + - "friulivgiuliafrlfroganservehumourfrognfrolandfrom-akrehamnfrom-a" + - "lfrom-arqhadselfiparocherkasyno-dserveirchitachinakagawassamukaw" + - "ataricohdatsunanjoburgriwataraidyndns-office-on-the-webcampobass" + - "ociatesapporofrom-azfrom-capebretonamiastapleserveminecraftravel" + - "channelfrom-collectionfrom-ctravelersinsurancefrom-dchitosetogit" + - "suldalotenkawafrom-defenseljordfrom-flanderservemp3from-gausdalf" + - "rom-higashiagatsumagoizumizakirkeneservep2parservepicservequakef" + - "rom-iafrom-idfrom-ilfrom-incheonfrom-kservesarcasmatartanddesign" + - "from-kyowariasahikawafrom-lajollamericanexpressexyfrom-maniwakur" + - "atextileksvikazofrom-mdfrom-megurokunohealthcareerservicesettsur" + - "geonshalloffamemorialfrom-microsoftbankazunofrom-mnfrom-modellin" + - "gfrom-msevastopolefrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktrdfr" + - "om-njcbnlfrom-nminamiizukamisatokamachintaifun-dnsaliasdaburfrom" + - "-nvalledaostavernfrom-nyfrom-ohkurafrom-oketohmannorth-kazakhsta" + - "nfrom-orfrom-padovaksdalfrom-pratohnoshoooshikamaishimodatefrom-" + - "rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-txn--1ck2e1bananarepubli" + - "caseihichisobetsuitainairforcechirealminamiawajikibmdiscoveryomb" + - "ondishakotanavigationavoiitatebayashiibahcavuotnagaraholtaleniwa" + - "izumiotsukumiyamazonawsadodgemologicallyngenvironmentalconservat" + - "ionavuotnaklodzkodairassnasabaerobaticketselinogradultashkentata" + - "motors3-ap-northeast-2from-utazuerichardlillehammerfeste-ipartis" + - "-a-libertarianfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wa" + - "from-wielunnerfrom-wvallee-aosteroyfrom-wyfrosinonefrostalowa-wo" + - "lawafroyahikobeardubaiduckdnsevenassisicilyfstcgroupartnersewill" + - "iamhillfujiiderafujikawaguchikonefujiminohtawaramotoineppubologn" + - "akanotoddenfujinomiyadafujiokayamansionsfranziskanerdpolicefujis" + - "atoshonairtelecityeatsharis-a-linux-useranishiaritabashijonawate" + - "fujisawafujishiroishidakabiratoridefinimakanegasakindlegokasells" + - "-for-lessharpartshawaiijimarugame-hostrolekameokameyamatotakadaf" + - "ujitsurugashimaritimekeepingfujixeroxn--1ctwolominamatakkokamino" + - "yamaxunusualpersonfujiyoshidafukayabeatshellaspeziafukuchiyamada" + - "fukudominichocolatemasekashiwazakiyosatokashikiyosemitefukuis-a-" + - "llamarylandfukumitsubishigakirovogradoyfukuokazakiryuohaebarumin" + - "amimakis-a-musicianfukuroishikarikaturindalfukusakisarazurewebsi" + - "teshikagamiishibukawafukuyamagatakaharustkanoyakumoldeloittexasc" + - "olipicenoipifonynysaarlandfunabashiriuchinadafunagatakahashimama" + - "kishiwadafunahashikamiamakusatsumasendaisennangonohejis-a-nascar" + - "fanfundaciofuoiskujukuriyamanxn--1lqs03nfuosskoczowinbarcelonaga" + - "sakijobserverisignieznord-frontiereviewskrakowedeployomitanobihi" + - "rosakikamijimastronomy-gatewaybomloans3-ap-south-1furnituredston" + - "efurubiraquarelleborkangerfurudonostiaarpartyfurukawairtrafficho" + - "funatoriginsurecifedexhibitionishiokoppegardyndns-picsardegnamss" + - "koganeis-a-doctorayfusodegaurafussaikisofukushimaoris-a-nurserve" + - "bbshimojis-a-painteractivegarsheis-a-patsfanfutabayamaguchinomig" + - "awafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiafut" + - "urehostingfuturemailingfvgfyis-a-personaltrainerfylkesbiblackfri" + - "dayfyresdalhangoutsystemscloudfunctionshimokawahannanmokuizumode" + - "rnhannotaireshimokitayamahanyuzenhapmirhareidsbergenharstadharve" + - "stcelebrationhasamarcheapassagenshimonitayanagitlaborhasaminami-" + - "alpssells-itrentino-aadigehashbanghasudahasura-appassenger-assoc" + - "iationhasvikddielddanuorrikuzentakataiwanairlinedre-eikerhatogay" + - "aitakamoriokalmykiahatoyamazakitahiroshimarnardalhatsukaichikais" + - "eis-a-republicancerresearchaeologicaliforniahattfjelldalhayashim" + - "amotobungotakadapliernewjerseyhazuminobusellsyourhomegoodshimono" + - "sekikawahboehringerikehelsinkitakamiizumisanofidelitysvardollshi" + - "mosuwalkis-a-rockstarachowicehembygdsforbundhemneshimotsukehemse" + - "dalhepforgeherokussldheroyhgtvalleeaosteigenhigashichichibunkyon" + - "anaoshimageandsoundandvisionhigashihiroshimanehigashiizumozakita" + - "katakanabeautydalhigashikagawahigashikagurasoedahigashikawakitaa" + - "ikitakyushuaiahigashikurumeiwamarriottrentino-alto-adigehigashim" + - "atsushimarshallstatebankfhappouhigashimatsuyamakitaakitadaitoiga" + - "wahigashimurayamamotorcycleshimotsumahigashinarusembokukitamidor" + - "is-a-socialistmein-vigorgehigashinehigashiomihachimanchesterhiga" + - "shiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiy" + - "oshikawaminamiaikitamotosumitakagildeskaliszhigashitsunotogawahi" + - "gashiurausukitanakagusukumoduminamiminowahigashiyamatokoriyamana" + - "shifteditchyouripaviancarrierhigashiyodogawahigashiyoshinogaris-" + - "a-soxfanhiraizumisatohobby-sitehirakatashinagawahiranais-a-stude" + - "ntalhirarahiratsukagawahirayaizuwakamatsubushikusakadogawahistor" + - "ichouseshinichinanhitachiomiyaginankokubunjis-a-teacherkassymant" + - "echnologyhitachiotagooglecodespotrentino-altoadigehitraeumtgerad" + - "elmenhorstalbanshinjournalistjohnhjartdalhjelmelandholeckobierzy" + - "ceholidayhomeipfizerhomelinkhakassiahomelinuxn--1lqs71dhomeoffic" + - "ehomesecuritymaceratakaokaluganskolevangerhomesecuritypccwindmil" + - "lhomesenseminehomeunixn--1qqw23ahondahoneywellbeingzonehongopocz" + - "northwesternmutualhonjyoitakarazukamakurazakitashiobarahornindal" + - "horseoulminamiogunicomcastresistancehortendofinternet-dnshinjuku" + - "manohospitalhoteleshinkamigotoyohashimotoshimahotmailhoyangerhoy" + - "landetroitskydivinghumanitieshinshinotsurgeryhurdalhurumajis-a-t" + - "echietis-a-therapistoiahyllestadhyogoris-an-accountantshinshiroh" + - "yugawarahyundaiwafunehzchoseiroumuenchenishitosashimizunaminamia" + - "shigarajfkhmelnitskiyamashikejgorajlchoyodobashichikashukujitawa" + - "rajlljmpharmacienshiojirishirifujiedajnjcpgfoggiajoyokaichibahcc" + - "avuotnagareyamalborkdalpha-myqnapcloudapplebesbyglandjpmorganjpn" + - "jprshioyanaizujuniperjurkoshimizumakis-an-engineeringkoshunantok" + - "igawakosugekotohiradomainshirakofuefukihaboromskoguchikuzenkotou" + - "rakouhokutamakis-an-entertainerkounosupplieshiranukamogawakouyam" + - "ashikokuchuokouzushimasoykozagawakozakis-bykpnkppspdnshiraois-ce" + - "rtifieducatorahimeshimamateramochizukirakrasnodarkredirectmelhus" + - "cultureggio-calabriakristiansandcatshiraokanagawakristiansundkro" + - "dsheradkrokstadelvaldaostarostwodzislawindowshiratakahagivestbyk" + - "ryminamisanrikubetsupportrentino-sued-tirolkumatorinokumejimasud" + - "akumenanyokkaichiropractichristmasakikugawatchandclockasukabedzi" + - "n-the-bandaikawachinaganoharamcoachampionshiphoptobishimaizurugb" + - "ydgoszczecinemakeupowiathletajimabariakeisenbahnishiwakis-a-fina" + - "ncialadvisor-aurdalottokonamegatakasugais-a-geekgalaxykunisakis-" + - "foundationkunitachiarailwaykunitomigusukumamotoyamassa-carrara-m" + - "assacarraramassabusinessebytomaritimobarakunneppulawykunstsammlu" + - "ngkunstunddesignkuokgrouphdkureggio-emilia-romagnakatsugawakurga" + - "nkurobelaudiblebtimnetzkurogimilanokuroisoftwarendalenugkuromats" + - "unais-gonekurotakikawasakis-into-animelbournekushirogawakustanai" + - "s-into-carshintomikasaharakusupplykutchanelkutnokuzumakis-into-c" + - "artoonshinyoshitomiokamitsuekvafjordkvalsundkvamfamberkeleykvana" + - "ngenkvinesdalkvinnheradkviteseidskogkvitsoykwpspiegelkzmissilewi" + - "smillermisugitokorozawamitourismolancastermitoyoakemiuramiyazumi" + - "yotamanomjondalenmlbfanmonmouthagebostadmonstermonticellolmontre" + - "alestatefarmequipmentrentino-suedtirolmonza-brianzaporizhzhiamon" + - "za-e-della-brianzapposhishikuis-not-certifiedunetbankharkovanylv" + - "enicemonzabrianzaptokuyamatsusakahoginowaniihamatamakawajimaphil" + - "adelphiaareadmyblogsitemonzaebrianzaramonzaedellabrianzamoonscal" + - "exusdecorativeartshisognemoparachutingmordoviajessheiminamitanem" + - "oriyamatsushigemoriyoshimilitarymormoneymoroyamatsuuramortgagemo" + - "scowinnershisuifuelveruminamiuonumatsumotofukemoseushistorymosjo" + - "enmoskeneshitaramamosshizukuishimofusaitamatsukuris-savedmosvikn" + - "x-serveronakatombetsunndalmoteginozawaonsenmoviemovistargardmtpc" + - "hromedicaltanissettairamtranbymuenstermugithubcloudusercontentre" + - "ntinoa-adigemuikamishihoronobeauxartsandcraftshizuokananporovigo" + - "tpantheonsitemukochikushinonsenergymulhouservebeermunakatanemunc" + - "ieszynmuosattemuphilatelymurmanskolobrzegersundmurotorcraftrenti" + - "noaadigemusashimurayamatsuzakis-slickhersonmusashinoharamuseetre" + - "ntinoalto-adigemuseumverenigingmusicargodaddynaliascoli-picenogi" + - "ftshoujis-uberleetrentino-stirolmutsuzawamy-vigorlicemy-wanggouv" + - "icenzamyactivedirectorymyasustor-elvdalmycdn77-securechtrainingm" + - "ydissentrentinoaltoadigemydrobofagemydshowamyeffectrentinos-tiro" + - "lmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccesshowti" + - "meteorapphilipsynology-diskstationmyfusionmyhome-serverrankoshig" + - "ayanagawamykolaivaporcloudmymailermymediapchryslermyokohamamatsu" + - "damypepsongdalenviknakanojohanamakinoharamypetshriramlidlugoleka" + - "gaminoduminamiyamashirokawanabelembroideryggeelvincklabudhabikin" + - "okawabarthagakhanamigawamyphotoshibajddarchaeologyeongnamegawalb" + - "rzycharternidmypsxn--30rr7ymysecuritycamerakermyshopblocksienara" + - "shinomytis-a-bookkeeperugiamyvnchungnamdalseidfjordyndns-remotew" + - "dyndns-serverdalouvreggioemiliaromagnakayamatsumaebashikshacknet" + - "oyookanmakiwakunigamidsundyndns-weberlincolnissandnessjoenissayo" + - "koshibahikariwanumatakazakis-a-greenissedalowiczest-le-patrondhe" + - "immobilienisshingugepicturesilkomaganepiemontepilotsimple-urlpim" + - "ientaketomisatolgapinkomakiyosumy-routerpioneerpippuphonefossigd" + - "alpiszpittsburghofauskedsmokorsetagayasells-for-unzenpiwatepizza" + - "pkomatsushimashikizunokunimihoboleslawiechristiansburgroks-thisa" + - "yamanobeokakudamatsueplanetariuminanoplantationplantsirdalplatfo" + - "rmshangrilanciaplaystationplazaplchurchaseljeepostfoldnavyplumbi" + - "ngopmnpodzonepohlpoivronpokerpokrovskomforbarclays3-us-gov-west-" + - "1politiendapolkowicepoltavalle-aostathellezajskommunalforbundpom" + - "orzeszowioslingpordenonepornporsangerporsanguidellogliastradingp" + - "orsgrunnanpoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrpr" + - "imeloyalistockholmestrandprincipeprivatizehealthinsuranceprochow" + - "iceproductionslupskommuneprofbsbxn--12cfi8ixb8lvivano-frankivska" + - "tsuyamasfjordenprogressivegasiapromombetsurfbx-oscholarshipschoo" + - "lpropertyprotectionprotonetrentinosud-tirolprudentialpruszkowitd" + - "komonoprzeworskogptplusgardenpvtrentinosudtirolpwcirclegnicafede" + - "rationiyodogawapzqldqponqslgbtrentinosued-tirolquicksytesnoasait" + - "omobellevuelosangelesjaguarchitecturealtychyattorneyagawalesundq" + - "uipelementsokanazawaqvcircustomerstuff-4-salestufftoread-booksne" + - "solognestuttgartritonsusakis-very-evillagesusonosuzakaneyamazoes" + - "uzukaniepcesuzukis-very-goodhandsonsvalbardunloppacificitadelive" + - "rysveiosvelvikongsbergsvizzeraswedenswidnicartierswiebodzindiana" + - "polis-a-bloggerswiftcoversicherungswinoujscienceandhistoryswissh" + - "ikis-very-nicesynology-dsolundbeckomorotsukamiokamikoaniikappugl" + - "iatushuissier-justicetuvalle-daostaticsomatuxfamilytwmailvennesl" + - "askerrylogisticsomnaritakurashikis-very-badajozoravestfoldvestne" + - "soovestre-slidreamhostersopotrentinosuedtirolvestre-totennishiaw" + - "akuravestvagoyvevelstadvibo-valentiavibovalentiavideovillaskimit" + - "subatamicable-modembetsukuis-very-sweetpeppervinnicartoonartdeco" + - "ffeedbackplaneappspotagervinnytsiavipsinaappiagetmyiphoenixn--32" + - "vp30haibarakitahatakamatsukawavirginiavirtualvirtueeldomeindianm" + - "arketingvirtuelvisakegawavistaprinternationalfirearmsor-odalvite" + - "rboltrogstadvivoldavixn--3bst00minnesotaketakatsukis-into-gamess" + - "inatsukigatakasagotembaixadavlaanderenvladikavkazimierz-dolnyvla" + - "dimirvlogoipictetrentinostirolvolkswagentsor-varangervologdansko" + - "ninjamisonvolvolkenkundenvolyngdalvossevangenvotevotingvotoyonak" + - "agyokutoursorfoldwloclawekonskowolayangroupharmacyshirahamatonbe" + - "tsurnadalwmflabsorreisahayakawakamiichikawamisatotalworldworse-t" + - "handawowithgoogleapisa-hockeynutsiracusakatakinouewritesthisblog" + - "sytewroclawithyoutubeneventoeidsvollwtcitichernigovernmentoyonow" + - "tfbxoschulewuozuwwwiwatsukiyonowruzhgorodeowzmiuwajimaxn--45brj9" + - "civilaviationxn--45q11civilisationxn--4gbriminingxn--4it168dxn--" + - "4it797konyveloftrentino-sudtirolxn--4pvxs4allxn--54b7fta0ccivili" + - "zationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilwarmanageme" + - "ntoyosatoyakokonoexn--5rtq34kooris-an-anarchistoricalsocietyxn--" + - "5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986" + - "b3xlxn--7t0a264claimsarlucaniaxn--80adxhksortlandxn--80ao21axn--" + - "80aqecdr1axn--80asehdbarreauctionflfanfshostrowiecasertaipeiheij" + - "iiyamanouchikuhokuryugasakitaurayasudaukraanghkeymachineustarhub" + - "alsanagochihayaakasakawaharanzanpachigasakicks-assedicasadelamon" + - "edatingjemnes3-ap-southeast-2xn--80aswgxn--80audnedalnxn--8ltr62" + - "kopervikhmelnytskyivaolbia-tempio-olbiatempioolbialystokkepnogat" + - "aijis-an-actresshintokushimaxn--8pvr4uxn--8y0a063axn--90a3academ" + - "y-firewall-gatewayxn--90aishobaraomoriguchiharahkkeravjuedisches" + - "apeakebayernrtromsakakinokiaxn--90azhytomyrxn--9dbhblg6dietcimdb" + - "arrel-of-knowledgeologyonagoyaurskog-holandroverhalla-speziaerop" + - "ortalaheadjudaicaaarborteaches-yogasawaracingroks-theatree164xn-" + - "-9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexn--3d" + - "s443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-knowledgeometre-" + - "experts-comptables3-us-west-1xn--avery-yuasakuhokkaidoomdnshome-" + - "webservercellikes-piedmontblancomeeresorumincommbankmpspbarclayc" + - "ards3-us-east-2xn--b-5gaxn--b4w605ferdxn--bck1b9a5dre4cldmailucc" + - "apitalonewportlligatoyotaris-a-gurulsandoyxn--bdddj-mrabdxn--bea" + - "ralvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7ax" + - "n--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bj" + - "ddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--b" + - "od-2naroyxn--brnny-wuaccident-investigation-aptibleaseating-orga" + - "nicbcn-north-1xn--brnnysund-m8accident-prevention-webhopenairbus" + - "antiquest-a-la-maisondre-landebudapest-a-la-masionionjukudoyamag" + - "entositelekommunikationthewifiat-band-campaniaxn--brum-voagatrom" + - "sojampagefrontapphotographysioxn--btsfjord-9zaxn--c1avgxn--c2br7" + - "gxn--c3s14mintelligencexn--cck2b3barsyonlinewhampshirebungoonord" + - "-odalazioceanographics3-us-west-2xn--cg4bkis-with-thebandovre-ei" + - "kerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisakis-leetrentino" + - "-s-tirollagrigentomologyeongbukharkivgucciprianiigataishinomakim" + - "obetsuliguriaxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-g" + - "hc29axn--czr694bashkiriaustevollarvikarasjohkamiminers3-ca-centr" + - "al-1xn--czrs0trusteexn--czru2dxn--czrw28basilicataniaustinnatura" + - "lsciencesnaturelles3-eu-central-1xn--d1acj3basketballfinanzgorau" + - "straliaisondriodejaneirochesterepbodynathomebuiltatarantottoribe" + - "staddnskingjerdrumckinseyokosukanzakiwienaturbruksgymnaturhistor" + - "isches3-eu-west-1xn--d1alfaromeoxn--d1atrvarggatroandinosaureise" + - "nxn--d5qv7z876clickasumigaurawa-mazowszextraspacekitagatajirissa" + - "gamiharaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawane" + - "honbetsurutaharaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry" + - "-iraxn--e1a4clinichernihivanovodkagoshimalvikashiwaraxn--eckvdtc" + - "9dxn--efvn9southcarolinazawaxn--efvy88hair-surveillancexn--ehqz5" + - "6nxn--elqq16hakatanoshiroomuraxn--estv75gxn--eveni-0qa01gaxn--f6" + - "qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hso" + - "uthwestfalenxn--fiq64batodayonaguniversityoriikariyaltakasakiyok" + - "awaraustrheimatunduhrennesoyokoteastcoastaldefencebinagisochildr" + - "ensgardenatuurwetenschappenaumburgjerstadotsuruokakegawaetnagaha" + - "maroygardenebakkeshibechambagriculturennebudejjudygarlandd-dnsfo" + - "r-better-thanawawdev-myqnapcloudcontrolapplinzi234xn--fiqs8sowax" + - "n--fiqz9spjelkavikomvuxn--2m4a15exn--fjord-lraxn--fjq720axn--fl-" + - "ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidspread" + - "bettingxn--frna-woaraisaijotrysiljanxn--frya-hraxn--fzc2c9e2clin" + - "iquenoharaxn--fzys8d69uvgmailxn--g2xx48clintonoshoesarpsborgrond" + - "arxn--gckr3f0fedorapeopleirfjordxn--gecrj9clothingrongaxn--ggavi" + - "ika-8ya47hakodatexn--gildeskl-g0axn--givuotna-8yasakaiminatoyone" + - "zawaxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050isleofmand" + - "alxn--gmqw5axn--h-2failxn--h1aeghakonexn--h2brj9cnsarufutsunomiy" + - "awakasaikaitakoelnxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y" + - "a35batsfjordivtasvuodnakaiwamizawauthordalandroiddnss3-eu-west-2" + - "xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctulangevagrarbo" + - "retumbriaxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqa" + - "xn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr" + - "513nxn--indery-fyasugissmarterthanyouxn--io0a7iwchoshibuyachiyod" + - "avvenjargapartmentsardiniaxn--j1aefedoraprojectrani-andria-barle" + - "tta-trani-andriaxn--j1amhakubaghdadxn--j6w193gxn--jlq61u9w7bauha" + - "usposts-and-telecommunicationsncfdivttasvuotnakamagayahababyklec" + - "lercasinordre-landiyoshiokaracoldwarmiamihamadautomotivecoalipay" + - "okozebinorfolkebibleikangereportateshinanomachimkentateyamagroce" + - "rybnikahokutobamaintenancebetsukubank12xn--jlster-byasuokanraxn-" + - "-jrpeland-54axn--jvr189misasaguris-lostre-toteneis-an-actorxn--k" + - "7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--kl" + - "bu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--ko" + - "luokta-7ya57hakuis-a-photographerokuappasadenamsosnowiechonanbui" + - "lderschmidtre-gauldalottexn--kprw13dxn--kpry57dxn--kpu716fermoda" + - "lenxn--kput3ixn--krager-gyatomitamamuraxn--kranghke-b0axn--krdsh" + - "erad-m8axn--krehamn-dxaxn--krjohka-hwab49jeonnamerikawauexn--ksn" + - "es-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazuryxn--kvnangen-k0a" + - "xn--l-1fairwindspydebergxn--l1accentureklamborghiniizaxn--lahead" + - "ju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leaga" + - "viika-52bbcateringebugattipschlesisches3-website-ap-northeast-1x" + - "n--lesund-huaxn--lgbbat1ad8jetztrentino-sud-tirolxn--lgrd-poacnt" + - "oyotomiyazakis-a-hard-workerxn--lhppi-xqaxn--linds-pramericanart" + - "unesolutionsokndalxn--lns-qlansrlxn--loabt-0qaxn--lrdal-sraxn--l" + - "renskog-54axn--lt-liacolonialwilliamsburgrossetouchijiwadell-ogl" + - "iastraderxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--m" + - "erker-kuaxn--mgb2ddesrtrentoyokawaxn--mgb9awbferraraxn--mgba3a3e" + - "jtunkongsvingerxn--mgba3a4f16axn--mgba3a4franamizuholdingsmilelx" + - "n--mgba7c0bbn0axn--mgbaakc7dvferrarittogoldpoint2thisamitsukexn-" + - "-mgbaam7a8hakusandiegoodyearxn--mgbab2bdxn--mgbai9a5eva00bbtatto" + - "olsztynsettlers3-website-ap-southeast-1xn--mgbai9azgqp6jevnakers" + - "huscountryestateofdelawarezzoologyxn--mgbayh7gpagespeedmobilizer" + - "oxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzd" + - "oxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--mgbpl" + - "2fhskodjejuegoshikiminokamoenairportland-4-salernoboribetsucksrv" + - "areserveblogspotrevisohughesolarssonxn--mgbqly7c0a67fbcoloradopl" + - "ateaudioxn--mgbqly7cvafredrikstadtvstordalxn--mgbt3dhdxn--mgbtf8" + - "flatangerxn--mgbtx2bbvacationswatch-and-clockerhcloudns3-website" + - "-ap-southeast-2xn--mgbx4cd0abbotturystykannamifunexn--mix082ferr" + - "eroticanonoichinomiyakexn--mix891fetsundxn--mjndalen-64axn--mk0a" + - "xindustriesteambulancexn--mk1bu44columbusheyxn--mkru45ixn--mlatv" + - "uopmi-s4axn--mli-tlanxesstorehabmerxn--mlselv-iuaxn--moreke-juax" + - "n--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlapyatigorskypexn-" + - "-mre-og-romsdal-qqbentleyukinfinitintuitaxihuanhlfanhs3-website-" + - "eu-west-1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycommu" + - "nitysfjordyndns-wikinkobayashikaoirminamibosogndalucernexn--muos" + - "t-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--3oq18vl8" + - "pn36axn--nit225kosakaerodromegallupinbarefootballooningjovikarat" + - "suginamikatagamiharuconnectatsunobiraugustowadaegubs3-ap-southea" + - "st-1xn--nmesjevuemie-tcbalestrandabergamoarekexn--nnx388axn--nod" + - "exn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery" + - "-byaeservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattano" + - "oganordreisa-geekoseis-an-artisteinkjerusalemrxn--o3cw4halsaintl" + - "ouis-a-anarchistoiredumbrellanbibaidarxn--o3cyx2axn--od0algxn--o" + - "d0aq3beppublishproxyzgorzeleccolognewyorkshirecipesaro-urbino-pe" + - "sarourbinopesaromasvuotnaharimamurogawatches3-website-sa-east-1x" + - "n--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osy" + - "ro-wuaxn--p1acfgujolsterxn--p1aixn--pbt977comobilyxn--pgbs0dhlxn" + - "--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4comparemarke" + - "rryhotelsasayamaxn--qcka1pmcdonaldstorfjordxn--qqqt11misconfused" + - "xn--qxamuneuestorjelenia-goraxn--rady-iraxn--rdal-poaxn--rde-ula" + - "quilancashireggiocalabriaxn--rdy-0nabarixn--rennesy-v1axn--rhkke" + - "rvju-01aflakstadaokagakibichuoxn--rholt-mragowoodsidexn--rhqv96g" + - "xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r" + - "land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricap" + - "etownnews-stagingxn--rovu88bernuorockartuzyukuhashimoichinosekig" + - "aharautoscanadaejeonbukarasjokarasuyamarylhurstjordalshalsenaust" + - "dalavagiskebizenakaniikawatanaguramusementarnobrzegyptianaturalh" + - "istorymuseumcenterepaircraftarumizusawabogadocscbgdyniabkhaziama" + - "llamagazineat-url-o-g-i-nativeamericanantiques3-ap-northeast-1ka" + - "ppchizippodhaleangaviikadenadexetereit3l3p0rtargets-itargiving12" + - "000emmafanconagawakayamadridvagsoyericssonyoursidealerimo-i-rana" + - "amesjevuemielno-ip6xn--rros-granvindafjordxn--rskog-uuaxn--rst-0" + - "narutokyotangovtuscanyxn--rsta-francaiseharaxn--ryken-vuaxn--ryr" + - "vik-byaxn--s-1faithruherecreationxn--s9brj9compute-1xn--sandness" + - "jen-ogbizxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangen" + - "xn--skierv-utazaskoyabearalvahkihokumakogengerdalcestpetersburgx" + - "n--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5" + - "narviikamisunagawaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-" + - "a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--snes-poaxn" + - "--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-" + - "varanger-ggbeskidyn-o-saurlandes3-website-us-east-1xn--srfold-by" + - "axn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--st" + - "jrdalshalsen-sqbestbuyshouses3-website-us-west-1xn--stre-toten-z" + - "cbstreamsterdamnserverbaniaxn--t60b56axn--tckweatherchannelxn--t" + - "iq49xqyjewelryxn--tjme-hraxn--tn0agrinet-freakstudioxn--tnsberg-" + - "q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-" + - "zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4axn--uist22hamurakamigor" + - "is-a-playerxn--uisz3gxn--unjrga-rtaobaokinawashirosatochiokinosh" + - "imalatvuopmiasakuchinotsuchiurakawakuyabukievenestudyndns-at-hom" + - "edepotenzamamicrolightingxn--unup4yxn--uuwu58axn--vads-jraxn--va" + - "rd-jraxn--vegrshei-c0axn--vermgensberater-ctbetainaboxfusejnyuri" + - "honjoyentgoryusuharaveroykenglandds3-external-1xn--vermgensberat" + - "ung-pwbieigersundnpalaceu-3utilitiesquare7xn--vestvgy-ixa6oxn--v" + - "g-yiabcgxn--vgan-qoaxn--vgsy-qoa0jewishartgalleryxn--vgu402compu" + - "terhistoryofscience-fictionxn--vhquvbargainstitutelevisionayorov" + - "nobninskarelianceu-2xn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadx" + - "n--vry-yla5gxn--vuq861bielawalmartjeldsundrangedalillyusuisserve" + - "exchangevents3-website-us-west-2xn--w4r85el8fhu5dnraxn--w4rs40lx" + - "n--wcvs22dxn--wgbh1comsecuritytacticsaseboknowsitallukowhoswhokk" + - "sundyndns-workisboringroundhandlingroznyxn--wgbl6axn--xhq521biel" + - "laakesvuemielecceverbankarlsoyuufcfanikinuyamashinashikitchenikk" + - "oebenhavnikolaevennodessagaeroclubmedecincinnationwidealstahauge" + - "sunderseaportsinfolldalabamagasakishimabarackmazerbaijan-mayendo" + - "ftheinternetflixilovecollegefantasyleaguernseyuzawavocatanzarowe" + - "ddingjesdalavangenaval-d-aosta-valleyolasitehimejibigawaskvolloa" + - "bathsbc66xn--xkc2al3hye2axn--xkc2dl3a5ee0hangglidingxn--y9a3aqua" + - "riumishimatsunoxn--yer-znarvikosherbrookegawaxn--yfro4i67oxn--yg" + - "arden-p1axn--ygbi2ammxn--3pxu8konsulatrobeepilepsydneyxn--ystre-" + - "slidre-ujbieszczadygeyachimataikikonaioirasebastopologyeonggieht" + - "avuoatnagaivuotnagaokakyotambabia-goracleaningatlantabuseekloges" + - "t-mon-blogueurovisionikonantankarmoyxn--zbx025dxn--zf0ao64axn--z" + - "f0avxn--42c2d9axn--zfr164bievatmallorcadaquesakurainvestmentsaky" + - "otanabellunorddalimanowarudavoues3-fips-us-gov-west-1xperiaxz" - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [ 1 bits] unused -// [ 9 bits] children index -// [ 1 bits] ICANN bit -// [15 bits] text index -// [ 6 bits] text length -var nodes = [...]uint32{ - 0x31a403, - 0x284944, - 0x2dd106, - 0x3706c3, - 0x3706c6, - 0x398706, - 0x3a8103, - 0x2fe244, - 0x38e987, - 0x2dcd48, - 0x1a05702, - 0x316e87, - 0x35c789, - 0x2abb0a, - 0x2abb0b, - 0x22f383, - 0x287506, - 0x232dc5, - 0x1e021c2, - 0x2161c4, - 0x238743, - 0x26fc45, - 0x2214902, - 0x347743, - 0x266f744, - 0x33ddc5, - 0x2a04702, - 0x376b4e, - 0x24c4c3, - 0x38ae46, - 0x2e00142, - 0x2dd287, - 0x236f46, - 0x3209282, - 0x229d83, - 0x24d9c4, - 0x325e86, - 0x26c588, - 0x2761c6, - 0x2011c4, - 0x3600242, - 0x3335c9, - 0x20a1c7, - 0x351e86, - 0x330c89, - 0x298308, - 0x26e904, - 0x241ec6, - 0x222a46, - 0x3a022c2, - 0x26480f, - 0x20948e, - 0x211d04, - 0x2c2b85, - 0x2fe145, - 0x39e189, - 0x23c409, - 0x349a87, - 0x20fa86, - 0x275a83, - 0x3e02a82, - 0x315503, - 0x34e24a, - 0x20f903, - 0x2af985, - 0x284202, - 0x284209, - 0x4200ec2, - 0x212484, - 0x2b9686, - 0x2f3645, - 0x3552c4, - 0x4a05644, - 0x2030c3, - 0x232344, - 0x4e00c02, - 0x268d44, - 0x52ef6c4, - 0x25ef4a, - 0x5603dc2, - 0x2ba587, - 0x2f3b08, - 0x6208142, - 0x311687, - 0x2bf204, - 0x2bf207, - 0x36e0c5, - 0x34ffc7, - 0x349846, - 0x24f3c4, - 0x38c105, - 0x29e447, - 0x72001c2, - 0x26e503, - 0x200b82, - 0x200b83, - 0x760de02, - 0x2102c5, - 0x7a02a42, - 0x350e04, - 0x2734c5, - 0x211c47, - 0x26bcce, - 0x2b9184, - 0x245544, - 0x202f03, - 0x281d49, - 0x31ee0b, - 0x2e9a88, - 0x379948, - 0x3a9908, - 0x22ae48, - 0x330aca, - 0x34fec7, - 0x318186, - 0x7e87002, - 0x35e203, - 0x367e43, - 0x36f4c4, - 0x3a8143, - 0x3250c3, - 0x1720b82, - 0x8202502, - 0x27a8c5, - 0x296206, - 0x2d1b84, - 0x375487, - 0x2e1886, - 0x331f84, - 0x39d3c7, - 0x203bc3, - 0x86c54c2, - 0x8b0f242, - 0x8e16742, - 0x216746, - 0x9200002, - 0x3523c5, - 0x3220c3, - 0x200604, - 0x2e8f84, - 0x2e8f85, - 0x206b43, - 0x978d2c3, - 0x9a0bb42, - 0x289e05, - 0x289e0b, - 0x31e686, - 0x20cb4b, - 0x221344, - 0x20d949, - 0x20e9c4, - 0x9e0ec02, - 0x20f143, - 0x20f403, - 0x16105c2, - 0x268183, - 0x2105ca, - 0xa20b382, - 0x216445, - 0x29224a, - 0x2d7744, - 0x283783, - 0x26cfc4, - 0x212543, - 0x212544, - 0x212547, - 0x2140c5, - 0x2147c5, - 0x214f46, - 0x2157c6, - 0x216a03, - 0x21ae88, - 0x210043, - 0xa601c02, - 0x243448, - 0x213ccb, - 0x220148, - 0x220d86, - 0x221847, - 0x225348, - 0xb642b42, - 0xbabf3c2, - 0x326788, - 0x35e4c7, - 0x246085, - 0x357f48, - 0x2bd408, - 0x34dd83, - 0x22a1c4, - 0x36f502, - 0xbe2bc82, - 0xc238482, - 0xca2e802, - 0x22e803, - 0xce01ec2, - 0x2fe203, - 0x2f1e84, - 0x201ec3, - 0x26e8c4, - 0x201ecb, - 0x213c03, - 0x2de946, - 0x239f84, - 0x29034e, - 0x371145, - 0x38af48, - 0x31ffc7, - 0x31ffca, - 0x229743, - 0x22f147, - 0x31efc5, - 0x22f8c4, - 0x265b06, - 0x265b07, - 0x2c11c4, - 0x2f7a87, - 0x313d44, - 0x26c004, - 0x26c006, - 0x387184, - 0x3510c6, - 0x203f83, - 0x35e288, - 0x203f88, - 0x245503, - 0x268143, - 0x399a04, - 0x39e003, - 0xd219f02, - 0xd6d6a42, - 0x20bac3, - 0x207146, - 0x241fc3, - 0x377cc4, - 0xdaee982, - 0x3af843, - 0x3507c3, - 0x217a02, - 0xde04142, - 0x2c1946, - 0x233ac7, - 0x2e8945, - 0x37de04, - 0x28c505, - 0x268907, - 0x267805, - 0x2b8649, - 0x2cefc6, - 0x2daa88, - 0x2e8846, - 0xe21a1c2, - 0x32ca08, - 0x2f1c46, - 0x21a1c5, - 0x2f6d87, - 0x309984, - 0x309985, - 0x276384, - 0x276388, - 0xe60cc02, - 0xea09882, - 0x3103c6, - 0x3b8988, - 0x334385, - 0x337306, - 0x342f08, - 0x344a88, - 0xee09885, - 0xf2142c4, - 0x3b0787, - 0xf60e5c2, - 0xfa1b102, - 0x10a099c2, - 0x2b9785, - 0x2a2645, - 0x2fef86, - 0x3b2547, - 0x380747, - 0x112a84c3, - 0x2a84c7, - 0x31eb08, - 0x376ec9, - 0x376d07, - 0x384d07, - 0x3a8ec8, - 0x3ad4c6, - 0x22f3c6, - 0x23000c, - 0x23120a, - 0x231687, - 0x232c8b, - 0x233907, - 0x23390e, - 0x234cc4, - 0x235ac4, - 0x237a47, - 0x3690c7, - 0x23b206, - 0x23b207, - 0x23b4c7, - 0x19604682, - 0x23c886, - 0x23c88a, - 0x23ce8b, - 0x23dbc7, - 0x23ed45, - 0x23f083, - 0x240586, - 0x240587, - 0x38eb43, - 0x19a0c442, - 0x240f4a, - 0x19f5d882, - 0x1a2a5e02, - 0x1a643142, - 0x1aa2cd82, - 0x244bc5, - 0x245304, - 0x1b205742, - 0x268dc5, - 0x23d483, - 0x20eac5, - 0x22ad44, - 0x206804, - 0x314046, - 0x25e206, - 0x28a003, - 0x238284, - 0x3a6803, - 0x1b600dc2, - 0x391c04, - 0x391c06, - 0x3b0d05, - 0x205e06, - 0x2f6e88, - 0x266e84, - 0x27ed08, - 0x2426c5, - 0x228308, - 0x29ff86, - 0x237587, - 0x22e204, - 0x22e206, - 0x33f443, - 0x383ec3, - 0x223d08, - 0x318dc4, - 0x348747, - 0x23e6c6, - 0x2d6389, - 0x250348, - 0x26cd08, - 0x26d084, - 0x351443, - 0x225e02, - 0x1c60f882, - 0x1ca10e82, - 0x3a7403, - 0x1ce04a42, - 0x38eac4, - 0x2862c6, - 0x26e605, - 0x21ba03, - 0x232884, - 0x2b14c7, - 0x33da03, - 0x231a88, - 0x208545, - 0x36e803, - 0x273445, - 0x273584, - 0x2f6a86, - 0x209ec4, - 0x211346, - 0x211b86, - 0x3916c4, - 0x213b43, - 0x1d205882, - 0x247345, - 0x221c03, - 0x1d61b0c2, - 0x22ffc3, - 0x209bc5, - 0x232403, - 0x232409, - 0x1da05f02, - 0x1e205e42, - 0x2893c5, - 0x218786, - 0x2d1746, - 0x2b0a88, - 0x2b0a8b, - 0x20718b, - 0x2e8b45, - 0x2db145, - 0x2c6309, - 0x1600302, - 0x391888, - 0x20dc44, - 0x1ea007c2, - 0x3a7883, - 0x1f2c6086, - 0x20ae88, - 0x1f601402, - 0x2344c8, - 0x1fa2bb82, - 0x3b92ca, - 0x1feccc43, - 0x3ac1c6, - 0x3af408, - 0x3ac008, - 0x31d006, - 0x36bc07, - 0x264a07, - 0x3349ca, - 0x2d77c4, - 0x3474c4, - 0x35c1c9, - 0x20794385, - 0x209686, - 0x20e1c3, - 0x24a044, - 0x20a02644, - 0x202647, - 0x212fc7, - 0x22a584, - 0x285445, - 0x2ff048, - 0x366747, - 0x370f07, - 0x20e18342, - 0x327704, - 0x292b48, - 0x245bc4, - 0x247784, - 0x248085, - 0x2481c7, - 0x223589, - 0x248fc4, - 0x249709, - 0x249948, - 0x249dc4, - 0x249dc7, - 0x2124aa83, - 0x24ad47, - 0x1609d02, - 0x16ad202, - 0x24bec6, - 0x24c507, - 0x24cd44, - 0x24e6c7, - 0x24fa47, - 0x24fdc3, - 0x248902, - 0x229642, - 0x250a03, - 0x250a04, - 0x250a0b, - 0x379a48, - 0x256804, - 0x2523c5, - 0x254007, - 0x2555c5, - 0x2bc00a, - 0x256743, - 0x2160fc82, - 0x226e84, - 0x258d89, - 0x25c343, - 0x25c407, - 0x24a849, - 0x282688, - 0x204743, - 0x278fc7, - 0x279709, - 0x268ac3, - 0x2810c4, - 0x283c89, - 0x2880c6, - 0x289683, - 0x200182, - 0x21f983, - 0x3a8a87, - 0x21f985, - 0x379746, - 0x256e84, - 0x302e85, - 0x2e4403, - 0x216c46, - 0x20db42, - 0x395144, - 0x221402, - 0x221403, - 0x21a00782, - 0x247303, - 0x215c44, - 0x215c47, - 0x200906, - 0x202602, - 0x21e025c2, - 0x2dca84, - 0x22235e82, - 0x22600b02, - 0x2d4f84, - 0x2d4f85, - 0x2b6dc5, - 0x390e06, - 0x22a05d42, - 0x205d45, - 0x20cf05, - 0x20ae03, - 0x210986, - 0x2126c5, - 0x2166c2, - 0x343605, - 0x2166c4, - 0x221ec3, - 0x227343, - 0x22e0c642, - 0x2d4987, - 0x3669c4, - 0x3669c9, - 0x249f44, - 0x291d43, - 0x2f6609, - 0x367508, - 0x232a24c4, - 0x2a24c6, - 0x21c303, - 0x247bc3, - 0x2e9dc3, - 0x236eb382, - 0x368cc2, - 0x23a05e82, - 0x323cc8, - 0x32a388, - 0x398e46, - 0x2e27c5, - 0x22efc5, - 0x352ec7, - 0x21d205, - 0x228782, - 0x23e38182, - 0x1603002, - 0x2416c8, - 0x32c945, - 0x2e3404, - 0x2ebac5, - 0x23f407, - 0x3207c4, - 0x240e42, - 0x24200582, - 0x338984, - 0x212cc7, - 0x28a2c7, - 0x34ff84, - 0x292203, - 0x245444, - 0x245448, - 0x22f706, - 0x26598a, - 0x223444, - 0x292588, - 0x288504, - 0x221946, - 0x294684, - 0x2b9a86, - 0x366c89, - 0x25da47, - 0x3375c3, - 0x24667e42, - 0x267e43, - 0x20ee02, - 0x24a11ec2, - 0x3085c6, - 0x365c88, - 0x2a4087, - 0x3a3f49, - 0x291c49, - 0x2a5045, - 0x2a6049, - 0x2a6805, - 0x2a6949, - 0x2a8005, - 0x2a9108, - 0x21fb84, - 0x24e890c7, - 0x2a9303, - 0x2a9307, - 0x3850c6, - 0x2a9b87, - 0x2a1085, - 0x2935c3, - 0x2521ae02, - 0x3b40c4, - 0x2562ce82, - 0x258203, - 0x25a17f42, - 0x36d586, - 0x2f3a85, - 0x2ac207, - 0x26cc43, - 0x325044, - 0x20e903, - 0x33e783, - 0x25e02bc2, - 0x266015c2, - 0x398804, - 0x2488c3, - 0x243c85, - 0x26a029c2, - 0x27206482, - 0x2b4506, - 0x318f04, - 0x2e3004, - 0x2e300a, - 0x27a01fc2, - 0x37204a, - 0x3756c8, - 0x27fb1384, - 0x20ad83, - 0x201fc3, - 0x3a9a49, - 0x217649, - 0x285246, - 0x28244183, - 0x3292c5, - 0x30180d, - 0x375886, - 0x3bac8b, - 0x28602e82, - 0x22c1c8, - 0x29206e82, - 0x29606fc2, - 0x2ae585, - 0x29a03942, - 0x258447, - 0x21c907, - 0x21e003, - 0x2306c8, - 0x29e06502, - 0x312684, - 0x212943, - 0x351d45, - 0x34db83, - 0x2f3546, - 0x205904, - 0x268103, - 0x2ae9c3, - 0x2a205fc2, - 0x2e8ac4, - 0x35f6c5, - 0x39f1c7, - 0x275643, - 0x2ad883, - 0x2ae083, - 0x160fec2, - 0x2ae143, - 0x2ae943, - 0x2a605102, - 0x282104, - 0x25e406, - 0x342643, - 0x2aec43, - 0x2aaafd42, - 0x2afd48, - 0x2b0004, - 0x36c246, - 0x2b0387, - 0x249c46, - 0x28e2c4, - 0x38600682, - 0x384f8b, - 0x2fb08e, - 0x21930f, - 0x2985c3, - 0x38ebbbc2, - 0x1600f42, - 0x39201582, - 0x28f403, - 0x2fdec3, - 0x233706, - 0x277c46, - 0x3afd87, - 0x3328c4, - 0x396188c2, - 0x39a08882, - 0x348345, - 0x2e6047, - 0x3b5746, - 0x39e27282, - 0x227284, - 0x2b3ac3, - 0x3a20be02, - 0x3a759ec3, - 0x2b4c44, - 0x2be409, - 0x16c3ac2, - 0x3aa03a82, - 0x203a85, - 0x3aec3d42, - 0x3b203202, - 0x346947, - 0x239689, - 0x35ca0b, - 0x2647c5, - 0x2c4849, - 0x2e8246, - 0x31e6c7, - 0x3b608484, - 0x3199c9, - 0x373487, - 0x20ab47, - 0x20a383, - 0x20a386, - 0x3b68c7, - 0x206a43, - 0x2565c6, - 0x3be02a02, - 0x3c232682, - 0x385803, - 0x324c45, - 0x350f47, - 0x250086, - 0x21f905, - 0x277d44, - 0x2c9fc5, - 0x2f2684, - 0x3c6040c2, - 0x331107, - 0x2dbd44, - 0x217544, - 0x21754d, - 0x257509, - 0x3a4448, - 0x253944, - 0x3abc45, - 0x206447, - 0x2144c4, - 0x2e1947, - 0x21c485, - 0x3caa4604, - 0x2d92c5, - 0x25b004, - 0x24bb86, - 0x3b2345, - 0x3ce250c2, - 0x283844, - 0x283845, - 0x36fa46, - 0x20c3c5, - 0x30c304, - 0x2c5dc3, - 0x2053c6, - 0x358505, - 0x2bb485, - 0x3b2444, - 0x2234c3, - 0x2234cc, - 0x3d288a02, - 0x3d6010c2, - 0x3da00282, - 0x206343, - 0x206344, - 0x3de04bc2, - 0x2f9688, - 0x379805, - 0x235684, - 0x23b086, - 0x3e201f42, - 0x3e609782, - 0x3ea00e82, - 0x306b85, - 0x391586, - 0x211084, - 0x3263c6, - 0x2ba346, - 0x219943, - 0x3ef0de0a, - 0x247b05, - 0x2c8e83, - 0x223186, - 0x300fc9, - 0x223187, - 0x297788, - 0x2981c9, - 0x224348, - 0x229486, - 0x20bf03, - 0x3f2a8542, - 0x385683, - 0x385689, - 0x332448, - 0x3f649a02, - 0x3fa02342, - 0x227f83, - 0x2da905, - 0x251ec4, - 0x2c0909, - 0x22cb84, - 0x266348, - 0x202343, - 0x202344, - 0x278b03, - 0x2187c8, - 0x217487, - 0x4020b102, - 0x274082, - 0x351905, - 0x266689, - 0x209703, - 0x27b184, - 0x329284, - 0x2064c3, - 0x27c3ca, - 0x40752bc2, - 0x40a83802, - 0x2c5443, - 0x3739c3, - 0x1602302, - 0x38ac03, - 0x40e0f242, - 0x4120ec42, - 0x41610444, - 0x210446, - 0x383b06, - 0x26ad44, - 0x36c643, - 0x38bcc3, - 0x226883, - 0x23d206, - 0x2cb8c5, - 0x2c5a07, - 0x31e589, - 0x2ca645, - 0x2cb806, - 0x2cbd88, - 0x2cbf86, - 0x236a04, - 0x29944b, - 0x2ceac3, - 0x2ceac5, - 0x2cec08, - 0x228502, - 0x346c42, - 0x41a44c42, - 0x41e0e602, - 0x218903, - 0x422675c2, - 0x2675c3, - 0x2cef04, - 0x2cf5c3, - 0x42a115c2, - 0x42ed43c6, - 0x2a7306, - 0x43207902, - 0x4360f442, - 0x43a27382, - 0x43e02c82, - 0x4422dd02, - 0x44602d02, - 0x234703, - 0x390685, - 0x319606, - 0x44a11cc4, - 0x3b0b0a, - 0x32fe86, - 0x2e8d84, - 0x281d03, - 0x45604642, - 0x200c82, - 0x25fd03, - 0x45a05503, - 0x2c7b87, - 0x3b2247, - 0x47250b07, - 0x312d87, - 0x227b03, - 0x227b0a, - 0x236b84, - 0x23e5c4, - 0x23e5ca, - 0x213f05, - 0x47609642, - 0x24e683, - 0x47a008c2, - 0x21c2c3, - 0x267e03, - 0x48203342, - 0x2a8444, - 0x21de84, - 0x3b9505, - 0x305005, - 0x2e1ac6, - 0x2e1e46, - 0x48608442, - 0x48a033c2, - 0x3185c5, - 0x2a7012, - 0x2511c6, - 0x220803, - 0x30a746, - 0x220805, - 0x1610602, - 0x50e120c2, - 0x353e83, - 0x2120c3, - 0x2441c3, - 0x512023c2, - 0x376e43, - 0x5160b482, - 0x210483, - 0x282148, - 0x25e983, - 0x25e986, - 0x3a2987, - 0x306806, - 0x30680b, - 0x2e8cc7, - 0x3b3ec4, - 0x51e04ec2, - 0x379685, - 0x522054c3, - 0x2a6e03, - 0x326c05, - 0x329983, - 0x52729986, - 0x391a0a, - 0x26a9c3, - 0x204584, - 0x3b88c6, - 0x21a5c6, - 0x52a00983, - 0x324f07, - 0x285147, - 0x29b0c5, - 0x2318c6, - 0x224a83, - 0x54a10bc3, - 0x54e056c2, - 0x328144, - 0x22a2cc, - 0x236149, - 0x2414c7, - 0x249245, - 0x262a84, - 0x273cc8, - 0x278305, - 0x55284a05, - 0x28c609, - 0x351f43, - 0x2a5d84, - 0x556013c2, - 0x2013c3, - 0x55a94142, - 0x2a4386, - 0x160f982, - 0x55e06e02, - 0x306a88, - 0x2be603, - 0x2d9207, - 0x2e4d05, - 0x2dd685, - 0x32840b, - 0x2dd686, - 0x328606, - 0x2ffac6, - 0x262c84, - 0x3042c6, - 0x2e3508, - 0x23a043, - 0x250dc3, - 0x250dc4, - 0x2e4484, - 0x2e4a07, - 0x2e5ec5, - 0x562e6002, - 0x5660ba02, - 0x20ba05, - 0x2e83c4, - 0x2e83cb, - 0x2e8e88, - 0x228f44, - 0x2272c2, - 0x56e28ec2, - 0x23b903, - 0x2e9344, - 0x2e9605, - 0x2ea047, - 0x2eb604, - 0x2e8b84, - 0x57201302, - 0x360cc9, - 0x2ec405, - 0x264a85, - 0x2ecf85, - 0x57601303, - 0x2ee0c4, - 0x2ee0cb, - 0x2ee644, - 0x2ef3cb, - 0x2ef7c5, - 0x21944a, - 0x2f0048, - 0x2f024a, - 0x2f0ac3, - 0x2f0aca, - 0x57a01742, - 0x57e2d4c2, - 0x21aa03, - 0x582f1bc2, - 0x2f1bc3, - 0x5875c402, - 0x58b22842, - 0x2f2504, - 0x21afc6, - 0x326105, - 0x2f4503, - 0x31a9c6, - 0x204405, - 0x25e704, - 0x58e05ec2, - 0x2c9244, - 0x2c5f8a, - 0x22d787, - 0x2f38c6, - 0x380b07, - 0x22a403, - 0x283e48, - 0x37f48b, - 0x3736c5, - 0x333ec5, - 0x333ec6, - 0x390884, - 0x3aa248, - 0x222943, - 0x222944, - 0x222947, - 0x38e446, - 0x352686, - 0x29018a, - 0x246604, - 0x24660a, - 0x59282846, - 0x282847, - 0x252447, - 0x270844, - 0x270849, - 0x25e0c5, - 0x235e0b, - 0x2e81c3, - 0x211503, - 0x22f003, - 0x22fac4, - 0x59600482, - 0x25d4c6, - 0x293345, - 0x30a985, - 0x24f6c6, - 0x3395c4, - 0x59a02782, - 0x23f0c4, - 0x59e01c42, - 0x2b9f05, - 0x21ad84, - 0x21bec3, - 0x5a612102, - 0x212103, - 0x23ba46, - 0x5aa03082, - 0x27f488, - 0x223004, - 0x223006, - 0x374246, - 0x2540c4, - 0x205345, - 0x2141c8, - 0x216547, - 0x219687, - 0x21968f, - 0x292a46, - 0x22cf03, - 0x22cf04, - 0x310504, - 0x20d003, - 0x221a84, - 0x240944, - 0x5ae42b02, - 0x289d43, - 0x242b03, - 0x5b209842, - 0x229f83, - 0x38eb83, - 0x21484a, - 0x358107, - 0x2efc0c, - 0x2efec6, - 0x30a146, - 0x248547, - 0x5b64c687, - 0x24f809, - 0x243584, - 0x24fbc4, - 0x5ba18942, - 0x5be027c2, - 0x290546, - 0x324d04, - 0x2d6bc6, - 0x2a5148, - 0x3b8dc4, - 0x258486, - 0x2d1705, - 0x265c88, - 0x207383, - 0x273705, - 0x273e83, - 0x264b83, - 0x264b84, - 0x2759c3, - 0x5c2ec082, - 0x5c602e02, - 0x2e8089, - 0x278205, - 0x278404, - 0x27a9c5, - 0x20dd44, - 0x2e0d07, - 0x343bc5, - 0x250cc4, - 0x250cc8, - 0x2d5086, - 0x2d7984, - 0x2d8e88, - 0x2dbb87, - 0x5ca03902, - 0x2e36c4, - 0x20d0c4, - 0x20ad47, - 0x5ce2b804, - 0x2ccf42, - 0x5d201102, - 0x201543, - 0x203984, - 0x2aa283, - 0x374e05, - 0x5d61e182, - 0x2eb285, - 0x202c42, - 0x34d5c5, - 0x365e45, - 0x5da00c42, - 0x350744, - 0x5de00d02, - 0x2387c6, - 0x29a146, - 0x2667c8, - 0x2bfa08, - 0x36d504, - 0x36d6c5, - 0x3610c9, - 0x2db1c4, - 0x3919c4, - 0x205183, - 0x5e222705, - 0x2c3b87, - 0x2a2744, - 0x341e8d, - 0x361782, - 0x361783, - 0x364503, - 0x5e600802, - 0x388305, - 0x25f9c7, - 0x205b44, - 0x312e47, - 0x2983c9, - 0x2c60c9, - 0x2519c7, - 0x273b03, - 0x273b08, - 0x2ed249, - 0x24e187, - 0x373605, - 0x39e086, - 0x39fb86, - 0x3a3c05, - 0x257605, - 0x5ea02d82, - 0x36ce45, - 0x2b2908, - 0x2c1706, - 0x5eeb7487, - 0x2efa04, - 0x2aa987, - 0x2f62c6, - 0x5f230982, - 0x36f746, - 0x2f83ca, - 0x2f8e85, - 0x5f6de402, - 0x5fa36542, - 0x3b6c06, - 0x2a1e88, - 0x5fe8a487, - 0x60234e42, - 0x2255c3, - 0x311d86, - 0x225044, - 0x3a2846, - 0x390b06, - 0x26ff0a, - 0x331c05, - 0x367ec6, - 0x3759c3, - 0x3759c4, - 0x207102, - 0x309943, - 0x60606382, - 0x2f0f83, - 0x3722c4, - 0x2a1fc4, - 0x2a1fca, - 0x229543, - 0x276288, - 0x22954a, - 0x27b447, - 0x2fcd86, - 0x238684, - 0x290bc2, - 0x2a2e82, - 0x60a04002, - 0x245403, - 0x252207, - 0x31ac87, - 0x2848c4, - 0x26f8c7, - 0x2ea146, - 0x216847, - 0x35e604, - 0x242a05, - 0x2b7985, - 0x60e0fe82, - 0x20fe86, - 0x218283, - 0x220502, - 0x220506, - 0x61203e02, - 0x6160b0c2, - 0x3ba785, - 0x61a21c82, - 0x61e03b42, - 0x33b5c5, - 0x393105, - 0x367f85, - 0x267303, - 0x286385, - 0x2dd747, - 0x307bc5, - 0x306185, - 0x38b044, - 0x3204c6, - 0x23e804, - 0x62201442, - 0x62f630c5, - 0x2ebe07, - 0x2d6dc8, - 0x25fe86, - 0x25fe8d, - 0x260709, - 0x260712, - 0x32f345, - 0x3339c3, - 0x6320a9c2, - 0x309444, - 0x375903, - 0x360fc5, - 0x2fa085, - 0x63612982, - 0x36e843, - 0x63a50b82, - 0x642bf542, - 0x6460fb42, - 0x353805, - 0x37ac43, - 0x37a4c8, - 0x64a07842, - 0x64e000c2, - 0x2a8406, - 0x33b80a, - 0x21bf03, - 0x20c343, - 0x2ee3c3, - 0x65a02dc2, - 0x73e35482, - 0x74601c82, - 0x201682, - 0x36f549, - 0x2c2f04, - 0x2309c8, - 0x74af4542, - 0x74e08602, - 0x2ef605, - 0x2330c8, - 0x282288, - 0x2f858c, - 0x22d543, - 0x25a9c2, - 0x75201f82, - 0x2caac6, - 0x2fdc05, - 0x26d343, - 0x23cc46, - 0x2fdd46, - 0x201f83, - 0x2ff883, - 0x300786, - 0x3013c4, - 0x295586, - 0x2cec85, - 0x30164a, - 0x2eebc4, - 0x302304, - 0x30370a, - 0x7566b082, - 0x337745, - 0x30478a, - 0x305285, - 0x305b44, - 0x305c46, - 0x305dc4, - 0x218dc6, - 0x75a6dac2, - 0x2f3206, - 0x2f3dc5, - 0x3ab6c7, - 0x200206, - 0x248744, - 0x2d5e07, - 0x30dd46, - 0x2b8a45, - 0x381947, - 0x39eb47, - 0x39eb4e, - 0x25ed06, - 0x2e1805, - 0x27dec7, - 0x282b43, - 0x3b2f87, - 0x20f5c5, - 0x212144, - 0x212f82, - 0x3addc7, - 0x332944, - 0x377404, - 0x273f0b, - 0x21d5c3, - 0x2b6987, - 0x21d5c4, - 0x2cc0c7, - 0x228bc3, - 0x33678d, - 0x388b48, - 0x21d044, - 0x250bc5, - 0x307d05, - 0x308143, - 0x75e22f02, - 0x309903, - 0x309fc3, - 0x210004, - 0x279805, - 0x218307, - 0x375a46, - 0x372003, - 0x23ab4b, - 0x26ba4b, - 0x2a654b, - 0x2de44a, - 0x30254b, - 0x31be8b, - 0x356b8c, - 0x378d11, - 0x3b654a, - 0x3ba10b, - 0x30ad8b, - 0x30b34a, - 0x30b88a, - 0x30cb4e, - 0x30d18b, - 0x30d44a, - 0x30ef11, - 0x30f34a, - 0x30f84b, - 0x30fd8e, - 0x31078c, - 0x310c4b, - 0x310f0e, - 0x31128c, - 0x31474a, - 0x31698c, - 0x76316c8a, - 0x317489, - 0x31af4a, - 0x31b1ca, - 0x31b44b, - 0x31f60e, - 0x31f991, - 0x328b89, - 0x328dca, - 0x3295cb, - 0x32a84a, - 0x32b316, - 0x32e14b, - 0x32f10a, - 0x32f50a, - 0x33084b, - 0x333449, - 0x337109, - 0x337d4d, - 0x33870b, - 0x33978b, - 0x33a14b, - 0x33a609, - 0x33ac4e, - 0x33b30a, - 0x33fc8a, - 0x33ffca, - 0x340b8b, - 0x3413cb, - 0x34168d, - 0x342c0d, - 0x343290, - 0x34374b, - 0x34408c, - 0x34480b, - 0x34644b, - 0x34798b, - 0x34c00b, - 0x34ca8f, - 0x34ce4b, - 0x34d94a, - 0x34e689, - 0x34f409, - 0x34f8cb, - 0x34fb8e, - 0x35434b, - 0x35574f, - 0x35864b, - 0x35890b, - 0x358bcb, - 0x3590ca, - 0x35c609, - 0x35f34f, - 0x36424c, - 0x36488c, - 0x364d0e, - 0x3653cf, - 0x36578e, - 0x365fd0, - 0x3663cf, - 0x366f4e, - 0x36770c, - 0x367a12, - 0x3689d1, - 0x36988e, - 0x36a04e, - 0x36a58e, - 0x36a90f, - 0x36acce, - 0x36b053, - 0x36b511, - 0x36b94e, - 0x36bdcc, - 0x36d913, - 0x36e210, - 0x36ea8c, - 0x36ed8c, - 0x36f24b, - 0x3703ce, - 0x370c8b, - 0x3715cb, - 0x37258c, - 0x37814a, - 0x37850c, - 0x37880c, - 0x378b09, - 0x37bb8b, - 0x37be48, - 0x37c049, - 0x37c04f, - 0x37d98b, - 0x7677eb8a, - 0x381fcc, - 0x383189, - 0x383608, - 0x38380b, - 0x383c8b, - 0x38480a, - 0x384a8b, - 0x38540c, - 0x386008, - 0x388d4b, - 0x38b44b, - 0x39484b, - 0x3958cb, - 0x39e6cb, - 0x39e989, - 0x39eecd, - 0x3a464a, - 0x3a5597, - 0x3a6bd8, - 0x3a96c9, - 0x3ab30b, - 0x3ac814, - 0x3acd0b, - 0x3ad28a, - 0x3aea0a, - 0x3aec8b, - 0x3b4250, - 0x3b4651, - 0x3b4d0a, - 0x3b5b4d, - 0x3b624d, - 0x3ba3cb, - 0x3bbd46, - 0x20ff83, - 0x76b80483, - 0x22cdc6, - 0x247645, - 0x27a007, - 0x31bd46, - 0x1656682, - 0x2ad9c9, - 0x31a7c4, - 0x2dacc8, - 0x232b43, - 0x309387, - 0x234f42, - 0x2ac243, - 0x76e07b02, - 0x2c7406, - 0x2c9884, - 0x369f44, - 0x390143, - 0x390145, - 0x776c3d82, - 0x77aa6cc4, - 0x270787, - 0x77e4a282, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x204e83, - 0x205702, - 0x16d208, - 0x2099c2, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x214843, - 0x324556, - 0x325793, - 0x26f749, - 0x3b0688, - 0x379509, - 0x304906, - 0x3389d0, - 0x254b53, - 0x38e508, - 0x28ea47, - 0x36c747, - 0x284d0a, - 0x372349, - 0x38d849, - 0x28decb, - 0x349846, - 0x379b4a, - 0x220d86, - 0x31a3c3, - 0x2d48c5, - 0x35e288, - 0x23888d, - 0x2b984c, - 0x2de0c7, - 0x30b00d, - 0x2142c4, - 0x22fd8a, - 0x230d4a, - 0x23120a, - 0x2099c7, - 0x23af07, - 0x23d844, - 0x22e206, - 0x20c144, - 0x2b4148, - 0x22cbc9, - 0x2b0a86, - 0x2b0a88, - 0x2422cd, - 0x2c6309, - 0x3ac008, - 0x264a07, - 0x2f1f0a, - 0x24c506, - 0x2580c7, - 0x2cc3c4, - 0x23f287, - 0x309c0a, - 0x3ae54e, - 0x21d205, - 0x3b4a4b, - 0x331a09, - 0x217649, - 0x21c747, - 0x2a34ca, - 0x20ac87, - 0x2fb1c9, - 0x38f0c8, - 0x3533cb, - 0x2da905, - 0x3a430a, - 0x266e09, - 0x26d2ca, - 0x2ca6cb, - 0x23f18b, - 0x28dc55, - 0x2e3b85, - 0x264a85, - 0x2ee0ca, - 0x3945ca, - 0x331787, - 0x21da83, - 0x2904c8, - 0x2d2c4a, - 0x223006, - 0x24dfc9, - 0x265c88, - 0x2d7984, - 0x2aa289, - 0x2bfa08, - 0x29fec7, - 0x3630c6, - 0x2ebe07, - 0x289a47, - 0x23d005, - 0x21d04c, - 0x250bc5, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2099c2, - 0x2a84c3, - 0x205503, - 0x204e83, - 0x200983, - 0x2a84c3, - 0x205503, - 0x25e983, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x16d208, - 0x2099c2, - 0x2006c2, - 0x231442, - 0x206502, - 0x200542, - 0x2decc2, - 0x46a84c3, - 0x232403, - 0x2163c3, - 0x2e9dc3, - 0x244183, - 0x209703, - 0x2d47c6, - 0x205503, - 0x200983, - 0x233183, - 0x16d208, - 0x31ae44, - 0x202107, - 0x392403, - 0x2ae584, - 0x22e043, - 0x21c7c3, - 0x2e9dc3, - 0x16fc07, - 0x205702, - 0x18d2c3, - 0x5a099c2, - 0x88f4d, - 0x8928d, - 0x231442, - 0x1b1384, - 0x200442, - 0x5fb1288, - 0xed844, - 0x16d208, - 0x1411d82, - 0x15054c6, - 0x231783, - 0x200c03, - 0x66a84c3, - 0x22fd84, - 0x6a32403, - 0x6ee9dc3, - 0x202bc2, - 0x3b1384, - 0x205503, - 0x2f78c3, - 0x203ec2, - 0x200983, - 0x21b5c2, - 0x2f2443, - 0x203082, - 0x211643, - 0x265d43, - 0x200202, - 0x16d208, - 0x231783, - 0x2f78c3, - 0x203ec2, - 0x2f2443, - 0x203082, - 0x211643, - 0x265d43, - 0x200202, - 0x2f2443, - 0x203082, - 0x211643, - 0x265d43, - 0x200202, - 0x2a84c3, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x209703, - 0x211cc4, - 0x205503, - 0x200983, - 0x20f942, - 0x201303, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x205503, - 0x200983, - 0x373605, - 0x212982, - 0x205702, - 0x16d208, - 0x1456108, - 0x2e9dc3, - 0x2274c1, - 0x202901, - 0x202941, - 0x23ad81, - 0x23ad01, - 0x30aec1, - 0x23aec1, - 0x2275c1, - 0x2eea41, - 0x30afc1, - 0x200141, - 0x200001, - 0x129845, - 0x16d208, - 0x201ec1, - 0x200701, - 0x200301, - 0x200081, - 0x200181, - 0x200401, - 0x200041, - 0x201181, - 0x200101, - 0x200281, - 0x200e81, - 0x2008c1, - 0x200441, - 0x201301, - 0x206ec1, - 0x200341, - 0x200801, - 0x2002c1, - 0x2000c1, - 0x201501, - 0x200201, - 0x200bc1, - 0x2005c1, - 0x201cc1, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x200442, - 0x200983, - 0x16fc07, - 0x9807, - 0x1cdc6, - 0x13ef8a, - 0x88648, - 0x51d48, - 0x52107, - 0x191106, - 0xd8c05, - 0x192345, - 0x5d306, - 0x125c86, - 0x25ef44, - 0x311547, - 0x16d208, - 0x2d5f04, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x2e9dc3, - 0x244183, - 0x209703, - 0x205503, - 0x200983, - 0x212982, - 0x2c5983, - 0x2bb143, - 0x32c243, - 0x2022c2, - 0x25d183, - 0x2030c3, - 0x204903, - 0x200001, - 0x2dc745, - 0x206b43, - 0x221344, - 0x26cc83, - 0x318ec3, - 0x21b103, - 0x35ff43, - 0xaaa84c3, - 0x235ac4, - 0x23dbc3, - 0x21cc43, - 0x21b0c3, - 0x22ffc3, - 0x232403, - 0x232143, - 0x2459c3, - 0x2a2703, - 0x318e43, - 0x2344c3, - 0x202643, - 0x24ce44, - 0x24e347, - 0x248902, - 0x250943, - 0x256303, - 0x273ac3, - 0x390f43, - 0x2025c3, - 0xaee9dc3, - 0x20bec3, - 0x2143c3, - 0x24a5c3, - 0x328085, - 0x209d43, - 0x2fa383, - 0xb21f903, - 0x365f03, - 0x20d543, - 0x227f83, - 0x209703, - 0x228502, - 0x27d2c3, - 0x205503, - 0x1604e83, - 0x224a43, - 0x209a43, - 0x204a03, - 0x200983, - 0x35fe83, - 0x20f943, - 0x201303, - 0x2efe83, - 0x2ff903, - 0x2f2603, - 0x204405, - 0x23e743, - 0x285346, - 0x2f2643, - 0x36cf43, - 0x3759c4, - 0x2d9083, - 0x2284c3, - 0x267ec3, - 0x233183, - 0x212982, - 0x22d543, - 0x3024c3, - 0x304144, - 0x377404, - 0x20ce83, - 0x16d208, - 0x205702, - 0x200242, - 0x2022c2, - 0x201702, - 0x202a42, - 0x206c02, - 0x245482, - 0x2007c2, - 0x20d882, - 0x200e82, - 0x20b102, - 0x20e602, - 0x2675c2, - 0x2056c2, - 0x2decc2, - 0x2013c2, - 0x2069c2, - 0x201302, - 0x2172c2, - 0x202482, - 0x200482, - 0x219382, - 0x202782, - 0x209842, - 0x2027c2, - 0x222702, - 0x203b42, - 0x5702, - 0x242, - 0x22c2, - 0x1702, - 0x2a42, - 0x6c02, - 0x45482, - 0x7c2, - 0xd882, - 0xe82, - 0xb102, - 0xe602, - 0x675c2, - 0x56c2, - 0xdecc2, - 0x13c2, - 0x69c2, - 0x1302, - 0x172c2, - 0x2482, - 0x482, - 0x19382, - 0x2782, - 0x9842, - 0x27c2, - 0x22702, - 0x3b42, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2099c2, - 0x200983, - 0xc6a84c3, - 0x2e9dc3, - 0x209703, - 0x21a2c2, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x7b02, - 0x201bc2, - 0x153f3c5, - 0x25ed82, - 0x16d208, - 0x99c2, - 0x20c182, - 0x208d02, - 0x2024c2, - 0x209642, - 0x208442, - 0x192345, - 0x2038c2, - 0x203ec2, - 0x2023c2, - 0x204dc2, - 0x2013c2, - 0x385502, - 0x201102, - 0x236582, - 0x16fc07, - 0x1b270d, - 0xd8c89, - 0x56e8b, - 0xdd608, - 0x53dc9, - 0xfacc6, - 0x2e9dc3, - 0x16d208, - 0x16d208, - 0x52e06, - 0x1a78c7, - 0x205702, - 0x25ef44, - 0x2099c2, - 0x2a84c3, - 0x2006c2, - 0x232403, - 0x20d882, - 0x2d5f04, - 0x244183, - 0x249a02, - 0x205503, - 0x200442, - 0x200983, - 0x264a86, - 0x31ba0f, - 0x70a403, - 0x16d208, - 0x2099c2, - 0x2163c3, - 0x2e9dc3, - 0x209703, - 0x1526f4b, - 0xd9888, - 0x142b68a, - 0x14fa807, - 0xda405, - 0x16fc07, - 0x2099c2, - 0x2a84c3, - 0x2e9dc3, - 0x205503, - 0x205702, - 0x20c202, - 0x20bb42, - 0xfea84c3, - 0x23c042, - 0x232403, - 0x209d02, - 0x221402, - 0x2e9dc3, - 0x228782, - 0x251442, - 0x2a6c82, - 0x200f82, - 0x28d742, - 0x203442, - 0x202e42, - 0x267e42, - 0x24ecc2, - 0x211ec2, - 0x2ad882, - 0x2eab02, - 0x2182c2, - 0x2ad342, - 0x209703, - 0x20ec42, - 0x205503, - 0x200e42, - 0x281702, - 0x200983, - 0x25d202, - 0x209842, - 0x218942, - 0x202e02, - 0x200c42, - 0x2de402, - 0x20fe82, - 0x250b82, - 0x220642, - 0x30d44a, - 0x34d94a, - 0x37fc4a, - 0x3bbec2, - 0x202cc2, - 0x2058c2, - 0x1026e389, - 0x1072510a, - 0x1594ac7, - 0x1410843, - 0x24d50, - 0x50642, - 0x2030c4, - 0x10ea84c3, - 0x232403, - 0x249944, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x209703, - 0x205503, - 0xdc105, - 0x204e83, - 0x200983, - 0x23e743, - 0x25ed03, - 0x16d208, - 0x1591084, - 0x18ff45, - 0x1a768a, - 0x116902, - 0x18ae46, - 0xaf551, - 0x1166e389, - 0x18ffc8, - 0x13f9c8, - 0xff387, - 0xec2, - 0x12984b, - 0x1a5b0a, - 0x21347, - 0x16d208, - 0x108f08, - 0xe4c7, - 0x17818f4b, - 0x1b887, - 0x1c02, - 0x6c707, - 0x1a1ca, - 0x13f6cf, - 0x988f, - 0x1b102, - 0x99c2, - 0xa2648, - 0x19e30a, - 0x1320c8, - 0xdc2, - 0x13f44f, - 0x9e18b, - 0x68bc8, - 0x38f47, - 0x388a, - 0x304cb, - 0x4efc9, - 0x11dd07, - 0xfc34c, - 0x2c07, - 0x19b40a, - 0xd4ac8, - 0x1a3cce, - 0x1cdce, - 0x2118b, - 0x26ccb, - 0x27d4b, - 0x2c009, - 0x2da0b, - 0x5e7cd, - 0x85acb, - 0xdfc8d, - 0xe000d, - 0xe164a, - 0x17724b, - 0x1ae0cb, - 0x31c45, - 0x1424d50, - 0x12618f, - 0x1268cf, - 0xe2c0d, - 0x1b8f90, - 0x2bb82, - 0x17fb0388, - 0x9688, - 0x182ee705, - 0x48fcb, - 0x117090, - 0x4fdc8, - 0x26e8a, - 0x56b49, - 0x5cb47, - 0x5ce87, - 0x5d047, - 0x5f507, - 0x60587, - 0x60b87, - 0x61387, - 0x617c7, - 0x61cc7, - 0x61fc7, - 0x62fc7, - 0x63187, - 0x63347, - 0x63507, - 0x63807, - 0x64007, - 0x64c87, - 0x65407, - 0x66547, - 0x66b07, - 0x66cc7, - 0x67047, - 0x67487, - 0x67687, - 0x67947, - 0x67b07, - 0x67cc7, - 0x67f87, - 0x68247, - 0x68f07, - 0x69607, - 0x698c7, - 0x6a047, - 0x6a207, - 0x6a607, - 0x6aec7, - 0x6b147, - 0x6b547, - 0x6b707, - 0x6b8c7, - 0x70587, - 0x71387, - 0x718c7, - 0x71e47, - 0x72007, - 0x72387, - 0x728c7, - 0xdb42, - 0xbbb0a, - 0xffb87, - 0x184cfa0b, - 0x14cfa16, - 0x17e91, - 0x1082ca, - 0xa24ca, - 0x52e06, - 0xd0f8b, - 0x5e82, - 0x2f711, - 0x157789, - 0x942c9, - 0x67e42, - 0x9f54a, - 0xa4909, - 0xa504f, - 0xa5a8e, - 0xa6388, - 0x17f42, - 0x18ef09, - 0x17f08e, - 0xf80cc, - 0xdf20f, - 0x198f4e, - 0xc84c, - 0x11809, - 0x13491, - 0x222c8, - 0x24512, - 0x281cd, - 0x2e0cd, - 0x8618b, - 0xbadd5, - 0xbb9c9, - 0xe268a, - 0x120689, - 0x160310, - 0x39a0b, - 0x4480f, - 0x5648b, - 0x58a8c, - 0x70f90, - 0x7beca, - 0x7d18d, - 0x80d4e, - 0x86cca, - 0x8720c, - 0x89714, - 0x157411, - 0x1a200b, - 0x9004f, - 0x9320d, - 0x9a00e, - 0x9fd8c, - 0xa1acc, - 0xaae8b, - 0xab18e, - 0xab990, - 0x154c0b, - 0x1160cd, - 0x10e80f, - 0x17e50c, - 0xb090e, - 0xb2391, - 0xb3ecc, - 0xc00c7, - 0xc064d, - 0xc0fcc, - 0xc1dd0, - 0x102c8d, - 0x12bc87, - 0xc7750, - 0xd3748, - 0xd51cb, - 0x12aa8f, - 0x17e248, - 0x1084cd, - 0x14d550, - 0x18ba60c6, - 0xaff43, - 0xbe02, - 0x11e309, - 0x5394a, - 0x104186, - 0x18cd9009, - 0x11d43, - 0xd6191, - 0xd65c9, - 0xd7607, - 0xaf6cb, - 0xde6d0, - 0xdeb8c, - 0xdf6c5, - 0x18f248, - 0x19f94a, - 0x111947, - 0x33c2, - 0x124a4a, - 0x127549, - 0x35b4a, - 0x8a3cf, - 0x3edcb, - 0x12814c, - 0x169b92, - 0xaea45, - 0x166aca, - 0x192ece45, - 0x18020c, - 0x122843, - 0x185502, - 0xf2bca, - 0x14f3fcc, - 0x1b1a48, - 0xdfe48, - 0x16fb87, - 0x1c42, - 0x3082, - 0x3f590, - 0x27c2, - 0x1ad58f, - 0x5d306, - 0x77ece, - 0xe598b, - 0x86ec8, - 0xd1a49, - 0x17d152, - 0x1abecd, - 0x55b08, - 0x56d49, - 0x572cd, - 0x57b89, - 0x5c58b, - 0x5d848, - 0x61ac8, - 0x628c8, - 0x62b49, - 0x62d4a, - 0x6398c, - 0xe3cca, - 0xff947, - 0x2270d, - 0xf4b4b, - 0x11a5cc, - 0x18b050, - 0xc2, - 0x7a14d, - 0x2dc2, - 0x35482, - 0xff88a, - 0x1081ca, - 0x10928b, - 0x1ae28c, - 0x108c8e, - 0x100cd, - 0x1b3908, - 0x7b02, - 0x11b5ec4e, - 0x1227020e, - 0x12a83a0a, - 0x1336864e, - 0x13b143ce, - 0x1432ee0c, - 0x1594ac7, - 0x1594ac9, - 0x1410843, - 0x14b3054c, - 0x15333209, - 0x15b49dc9, - 0x50642, - 0x18fb51, - 0x70151, - 0x8394d, - 0x17acd1, - 0x114311, - 0x12ed4f, - 0x13048f, - 0x13314c, - 0x149d0c, - 0x1a688d, - 0x1bb815, - 0x5064c, - 0x11f0cc, - 0xe9c50, - 0x11d44c, - 0x12a54c, - 0x15e999, - 0x168399, - 0x16fd99, - 0x175d54, - 0x181ad4, - 0x19b7d4, - 0x19d714, - 0x1ac314, - 0x16250709, - 0x1699ba89, - 0x1731f189, - 0x11e224c9, - 0x50642, - 0x126224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0x12e224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0x136224c9, - 0x50642, - 0x13e224c9, - 0x50642, - 0x146224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0x14e224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0x156224c9, - 0x50642, - 0x15e224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0x166224c9, - 0x50642, - 0x16e224c9, - 0x50642, - 0x176224c9, - 0x50642, - 0x15e98a, - 0x50642, - 0xaf545, - 0x1a5b04, - 0x2bb84, - 0x1aa404, - 0x1a75c4, - 0xc484, - 0x13fc4, - 0x58f44, - 0xff384, - 0x14ab3c3, - 0x143e603, - 0xfb244, - 0x1547c03, - 0x2bb82, - 0x100c3, - 0x205702, - 0x2099c2, - 0x2006c2, - 0x218342, - 0x20d882, - 0x200442, - 0x203082, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x24a5c3, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x205503, - 0x200983, - 0x3fc3, - 0x2e9dc3, - 0x205702, - 0x38d2c3, - 0x1aea84c3, - 0x3b8e47, - 0x2e9dc3, - 0x206343, - 0x211cc4, - 0x205503, - 0x200983, - 0x255cca, - 0x264a85, - 0x201303, - 0x20b0c2, - 0x16d208, - 0x16d208, - 0x99c2, - 0x11fd02, - 0x6c845, - 0x129845, - 0x16d208, - 0x1b887, - 0xa84c3, - 0x1ba38e47, - 0x13ee06, - 0x1bd49c05, - 0x11de07, - 0x66ca, - 0x3748, - 0x65c7, - 0x56948, - 0x28d87, - 0x2c6cf, - 0x30b87, - 0x3b806, - 0x117090, - 0x12330f, - 0x104204, - 0x1c11dece, - 0xa8b4c, - 0x4f14a, - 0x9a2c7, - 0x112b8a, - 0x18f409, - 0xbf34a, - 0x5414a, - 0x104186, - 0x9a38a, - 0x8350a, - 0xe47c9, - 0xd5a48, - 0xd5d46, - 0xd9a8d, - 0xb3c45, - 0x1a78c7, - 0x5d6c7, - 0xd9394, - 0xf938b, - 0x68a0a, - 0xa2d0d, - 0x1cdc3, - 0x1cdc3, - 0x1cdc6, - 0x1cdc3, - 0x18d2c3, - 0x16d208, - 0x99c2, - 0x49944, - 0x887c3, - 0x173605, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2030c3, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x294483, - 0x25ed03, - 0x2030c3, - 0x25ef44, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2082c3, - 0x2a84c3, - 0x232403, - 0x218343, - 0x2163c3, - 0x2e9dc3, - 0x3b1384, - 0x353903, - 0x227f83, - 0x209703, - 0x205503, - 0x200983, - 0x201303, - 0x311dc3, - 0x1dea84c3, - 0x232403, - 0x246383, - 0x2e9dc3, - 0x20a203, - 0x227f83, - 0x200983, - 0x2072c3, - 0x33bac4, - 0x16d208, - 0x1e6a84c3, - 0x232403, - 0x2a6443, - 0x2e9dc3, - 0x209703, - 0x211cc4, - 0x205503, - 0x200983, - 0x21db03, - 0x16d208, - 0x1eea84c3, - 0x232403, - 0x2163c3, - 0x204e83, - 0x200983, - 0x16d208, - 0x1594ac7, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x211cc4, - 0x205503, - 0x200983, - 0x129845, - 0x16fc07, - 0xd95cb, - 0xd69c4, - 0xb3c45, - 0x1456108, - 0xa6a8d, - 0x20284a05, - 0x18004, - 0x169c3, - 0x186345, - 0x349a05, - 0x16d208, - 0x1cdc2, - 0x336c3, - 0xf1446, - 0x319ec8, - 0x313bc7, - 0x25ef44, - 0x3b2c86, - 0x3bb6c6, - 0x16d208, - 0x30ce43, - 0x33e589, - 0x237295, - 0x3729f, - 0x2a84c3, - 0x31d012, - 0xefac6, - 0x10a045, - 0x26e8a, - 0x56b49, - 0x31cdcf, - 0x2d5f04, - 0x20b145, - 0x2fa150, - 0x3b0887, - 0x204e83, - 0x28b148, - 0x125bc6, - 0x2ae1ca, - 0x256044, - 0x2ec883, - 0x264a86, - 0x20b0c2, - 0x22d54b, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x2f1743, - 0x2099c2, - 0x2cd83, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x206343, - 0x221f03, - 0x200983, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x205503, - 0x200983, - 0x205702, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x9885, - 0x25ef44, - 0x2a84c3, - 0x232403, - 0x210444, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x2143c3, - 0x209703, - 0x205503, - 0x200983, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x391683, - 0x63643, - 0x6343, - 0x205503, - 0x200983, - 0x30d44a, - 0x32b0c9, - 0x346b0b, - 0x34708a, - 0x34d94a, - 0x35d74b, - 0x371e0a, - 0x37814a, - 0x37fc4a, - 0x37fecb, - 0x39f689, - 0x3a140a, - 0x3a178b, - 0x3acfcb, - 0x3b9eca, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x209703, - 0x205503, - 0x200983, - 0x4589, - 0x16d208, - 0x2a84c3, - 0x25cb44, - 0x207ac2, - 0x211cc4, - 0x26fc45, - 0x2030c3, - 0x25ef44, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x249944, - 0x2d5f04, - 0x3b1384, - 0x227f83, - 0x205503, - 0x200983, - 0x27a305, - 0x2082c3, - 0x201303, - 0x22ed03, - 0x250cc4, - 0x390fc4, - 0x34ae45, - 0x16d208, - 0x302044, - 0x3510c6, - 0x276384, - 0x2099c2, - 0x371007, - 0x24c0c7, - 0x247784, - 0x2555c5, - 0x302e85, - 0x2a9305, - 0x3b1384, - 0x3b8ac8, - 0x239486, - 0x30c188, - 0x24ed05, - 0x2da905, - 0x236b84, - 0x200983, - 0x2ed844, - 0x35c946, - 0x264b83, - 0x250cc4, - 0x256005, - 0x32d104, - 0x334944, - 0x20b0c2, - 0x2425c6, - 0x3962c6, - 0x2fdc05, - 0x205702, - 0x38d2c3, - 0x262099c2, - 0x2333c4, - 0x20d882, - 0x209703, - 0x202c82, - 0x205503, - 0x200442, - 0x214843, - 0x25ed03, - 0x16d208, - 0x16d208, - 0x2e9dc3, - 0x205702, - 0x26e099c2, - 0x2e9dc3, - 0x245b43, - 0x353903, - 0x327344, - 0x205503, - 0x200983, - 0x16d208, - 0x205702, - 0x276099c2, - 0x2a84c3, - 0x205503, - 0x200983, - 0x482, - 0x20a9c2, - 0x212982, - 0x206343, - 0x2e87c3, - 0x205702, - 0x129845, - 0x16d208, - 0x16fc07, - 0x2099c2, - 0x232403, - 0x249944, - 0x2032c3, - 0x2e9dc3, - 0x2143c3, - 0x209703, - 0x205503, - 0x216b03, - 0x200983, - 0x21da83, - 0x118fd3, - 0x11c954, - 0x16fc07, - 0x13b46, - 0x53b4b, - 0x1cdc6, - 0x51b87, - 0x11ab09, - 0xe6d4a, - 0x8850d, - 0x1b240c, - 0x1ada8a, - 0x192345, - 0x6708, - 0x5d306, - 0x125c86, - 0x22bb82, - 0xff14c, - 0x1a5cc7, - 0x22e51, - 0x2a84c3, - 0x568c5, - 0x77848, - 0x9e04, - 0x288347c6, - 0x17e86, - 0x8cb46, - 0x8da0a, - 0xac543, - 0x28e54b04, - 0x11aac5, - 0xde283, - 0xdc105, - 0xd104c, - 0xf04c8, - 0xb5708, - 0x9e009, - 0x134b08, - 0x141e046, - 0xda40a, - 0x82b48, - 0xf4648, - 0xff384, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x205702, - 0x2099c2, - 0x2e9dc3, - 0x202bc2, - 0x205503, - 0x200983, - 0x214843, - 0x3653cf, - 0x36578e, - 0x16d208, - 0x2a84c3, - 0x42f87, - 0x232403, - 0x2e9dc3, - 0x244183, - 0x205503, - 0x200983, - 0x201bc3, - 0x201bc7, - 0x200142, - 0x32c249, - 0x200242, - 0x23f88b, - 0x297b8a, - 0x2a2a49, - 0x200882, - 0x391206, - 0x34ed15, - 0x23f9d5, - 0x246993, - 0x23ff53, - 0x202a82, - 0x205ac5, - 0x3b364c, - 0x27160b, - 0x2726c5, - 0x201702, - 0x284202, - 0x386fc6, - 0x200ec2, - 0x3695c6, - 0x2d4c4d, - 0x27ef4c, - 0x224dc4, - 0x203dc2, - 0x205942, - 0x2248c8, - 0x202a42, - 0x312fc6, - 0x2ba844, - 0x34eed5, - 0x246b13, - 0x210783, - 0x32fa0a, - 0x3bb147, - 0x3094c9, - 0x37b887, - 0x30f242, - 0x200002, - 0x3aef06, - 0x20cb42, - 0x16d208, - 0x2105c2, - 0x20b382, - 0x274e87, - 0x20f687, - 0x21b585, - 0x201c02, - 0x21da47, - 0x21dc08, - 0x242b42, - 0x2bf3c2, - 0x22e802, - 0x201ec2, - 0x237b88, - 0x201ec3, - 0x2b5308, - 0x2cf1cd, - 0x213c03, - 0x327988, - 0x239f8f, - 0x23a34e, - 0x25edca, - 0x229751, - 0x229bd0, - 0x2bcdcd, - 0x2bd10c, - 0x311c47, - 0x32fb87, - 0x3b2d49, - 0x224ec2, - 0x206c02, - 0x25340c, - 0x25370b, - 0x204142, - 0x2ab046, - 0x21a1c2, - 0x209882, - 0x21b102, - 0x2099c2, - 0x383a84, - 0x238bc7, - 0x204682, - 0x23d147, - 0x23e487, - 0x20e142, - 0x2301c2, - 0x242e45, - 0x205742, - 0x362e0e, - 0x2ebb8d, - 0x232403, - 0x2be90e, - 0x2e064d, - 0x37eac3, - 0x200e02, - 0x21fec4, - 0x2454c2, - 0x2175c2, - 0x358e45, - 0x364b47, - 0x383382, - 0x218342, - 0x249547, - 0x24d288, - 0x248902, - 0x2aeac6, - 0x25328c, - 0x2535cb, - 0x20fc82, - 0x25924f, - 0x259610, - 0x259a0f, - 0x259dd5, - 0x25a314, - 0x25a80e, - 0x25ab8e, - 0x25af0f, - 0x25b2ce, - 0x25b654, - 0x25bb53, - 0x25c00d, - 0x272a89, - 0x2895c3, - 0x200782, - 0x22b0c5, - 0x207f86, - 0x20d882, - 0x21f507, - 0x2e9dc3, - 0x205e82, - 0x362a08, - 0x229991, - 0x229dd0, - 0x206482, - 0x288d87, - 0x203942, - 0x214607, - 0x20be02, - 0x319cc9, - 0x386f87, - 0x27aac8, - 0x234606, - 0x2e86c3, - 0x32a105, - 0x232682, - 0x202082, - 0x3af305, - 0x380685, - 0x2040c2, - 0x24c543, - 0x32d187, - 0x223787, - 0x200502, - 0x254684, - 0x223b83, - 0x223b89, - 0x22c548, - 0x200282, - 0x204bc2, - 0x3105c7, - 0x31ff05, - 0x2a5348, - 0x219947, - 0x200e83, - 0x28c446, - 0x2bcc4d, - 0x2bcfcc, - 0x2b45c6, - 0x208d02, - 0x2a8542, - 0x202342, - 0x239e0f, - 0x23a20e, - 0x302f07, - 0x203d02, - 0x2bf745, - 0x2bf746, - 0x20f242, - 0x20ec42, - 0x221f06, - 0x214543, - 0x214546, - 0x2c6985, - 0x2c698d, - 0x2c6f55, - 0x2c814c, - 0x2c95cd, - 0x2c9992, - 0x20e602, - 0x2675c2, - 0x202d02, - 0x240806, - 0x2f7f86, - 0x2033c2, - 0x208006, - 0x2023c2, - 0x38b785, - 0x200542, - 0x2ebc89, - 0x31554c, - 0x31588b, - 0x200442, - 0x24e748, - 0x203b02, - 0x2056c2, - 0x26a346, - 0x222445, - 0x226747, - 0x257d85, - 0x29e405, - 0x243002, - 0x2067c2, - 0x2013c2, - 0x2df507, - 0x380c0d, - 0x380f8c, - 0x22f087, - 0x20f982, - 0x2069c2, - 0x241248, - 0x31e488, - 0x2e3988, - 0x308484, - 0x2ab407, - 0x2e90c3, - 0x228ec2, - 0x2082c2, - 0x2eb3c9, - 0x3a40c7, - 0x201302, - 0x26a745, - 0x22d4c2, - 0x21aa02, - 0x2f9f03, - 0x2f9f06, - 0x2f1742, - 0x2f23c2, - 0x201a42, - 0x202f86, - 0x21fe07, - 0x213bc2, - 0x205ec2, - 0x2b514f, - 0x2be74d, - 0x3872ce, - 0x2e04cc, - 0x2009c2, - 0x207302, - 0x234445, - 0x30ba46, - 0x2018c2, - 0x202482, - 0x200482, - 0x2198c4, - 0x2cf044, - 0x2d0e86, - 0x203082, - 0x36cac7, - 0x203083, - 0x285d48, - 0x34e488, - 0x239887, - 0x240706, - 0x203902, - 0x234b03, - 0x234b07, - 0x273946, - 0x2dee45, - 0x308808, - 0x200d02, - 0x331207, - 0x222702, - 0x361782, - 0x20cfc2, - 0x2c6749, - 0x230982, - 0x200842, - 0x22f303, - 0x331c87, - 0x2002c2, - 0x3156cc, - 0x3159cb, - 0x2b4646, - 0x2de1c5, - 0x221c82, - 0x203b42, - 0x2b7bc6, - 0x260dc3, - 0x38c187, - 0x236102, - 0x201442, - 0x34eb95, - 0x23fb95, - 0x246853, - 0x2400d3, - 0x2585c7, - 0x271a48, - 0x271a50, - 0x28d2cf, - 0x297953, - 0x2a2812, - 0x32be10, - 0x2d544f, - 0x35f7d2, - 0x30c3d1, - 0x2b7613, - 0x2c6512, - 0x2cff4f, - 0x2d2e8e, - 0x2d3f52, - 0x2d71d1, - 0x2d7c8f, - 0x30440e, - 0x2f0691, - 0x2f17d0, - 0x2f2752, - 0x2fc711, - 0x364586, - 0x36d3c7, - 0x372187, - 0x203142, - 0x27d8c5, - 0x3933c7, - 0x212982, - 0x209942, - 0x228a85, - 0x21e743, - 0x34b0c6, - 0x380dcd, - 0x38110c, - 0x201682, - 0x3b34cb, - 0x2714ca, - 0x20598a, - 0x2b6449, - 0x2ea64b, - 0x219a8d, - 0x2fa5cc, - 0x25180a, - 0x22090c, - 0x26908b, - 0x27250c, - 0x29474b, - 0x3154c3, - 0x36cfc6, - 0x3a98c2, - 0x2f4542, - 0x20a743, - 0x208602, - 0x21fe83, - 0x2366c6, - 0x259f87, - 0x2c7fc6, - 0x39e4c8, - 0x31e188, - 0x2ce146, - 0x201f82, - 0x2fd5cd, - 0x2fd90c, - 0x2d5fc7, - 0x301f07, - 0x213b82, - 0x201502, - 0x234a82, - 0x24d642, - 0x2099c2, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x211cc4, - 0x205503, - 0x200983, - 0x214843, - 0x205702, - 0x2021c2, - 0x2ae8fdc5, - 0x2b247e45, - 0x2b717806, - 0x16d208, - 0x2baaee05, - 0x2099c2, - 0x2006c2, - 0x2bfb3ac5, - 0x2c27bdc5, - 0x2c67c9c7, - 0x2ca86a09, - 0x2ce3bc44, - 0x20d882, - 0x205e82, - 0x2d24b5c5, - 0x2d68f849, - 0x2db1db88, - 0x2deab805, - 0x2e300187, - 0x2e61ed48, - 0x2eae5d85, - 0x2ee00106, - 0x2f337809, - 0x2f6b5a48, - 0x2fac0488, - 0x2fe9704a, - 0x302732c4, - 0x306d13c5, - 0x30abc9c8, - 0x30e03a85, - 0x20cec2, - 0x31248a43, - 0x316a1686, - 0x31b60148, - 0x31eb94c6, - 0x32281f08, - 0x32719606, - 0x32adef04, - 0x200c82, - 0x32f2cb87, - 0x332a75c4, - 0x336756c7, - 0x33ba2987, - 0x200442, - 0x33e9b0c5, - 0x34334f84, - 0x346cd907, - 0x34a5f187, - 0x34e80886, - 0x3527c585, - 0x356959c7, - 0x35ad0b48, - 0x35e2b447, - 0x363164c9, - 0x36793105, - 0x36b31dc7, - 0x36e8f546, - 0x37391408, - 0x2273cd, - 0x279909, - 0x28174b, - 0x2a4b0b, - 0x34058b, - 0x2ffe8b, - 0x30bc4b, - 0x30bf0b, - 0x30c809, - 0x30d6cb, - 0x30d98b, - 0x30e48b, - 0x30f5ca, - 0x30fb0a, - 0x31010c, - 0x314d8b, - 0x31670a, - 0x32904a, - 0x33404e, - 0x33568e, - 0x335a0a, - 0x33808a, - 0x338dcb, - 0x33908b, - 0x339e8b, - 0x354ecb, - 0x3554ca, - 0x35618b, - 0x35644a, - 0x3566ca, - 0x35694a, - 0x372b0b, - 0x37914b, - 0x37c74e, - 0x37cacb, - 0x38454b, - 0x385acb, - 0x38900a, - 0x389289, - 0x3894ca, - 0x38a94a, - 0x3a00cb, - 0x3a1a4b, - 0x3a22ca, - 0x3a48cb, - 0x3a8c4b, - 0x3b990b, - 0x3767e648, - 0x37a87c89, - 0x37e9de89, - 0x382dacc8, - 0x342505, - 0x217083, - 0x21c6c4, - 0x220005, - 0x23b986, - 0x25da05, - 0x2864c4, - 0x21f408, - 0x308005, - 0x291784, - 0x203447, - 0x29cf8a, - 0x3712ca, - 0x338547, - 0x3af9c7, - 0x2f8f07, - 0x264e87, - 0x2f60c5, - 0x33bb86, - 0x2bb847, - 0x2b4904, - 0x2e4646, - 0x2e4546, - 0x3b9585, - 0x26d1c4, - 0x3519c6, - 0x29bf47, - 0x285746, - 0x2e3247, - 0x25e443, - 0x2b1c06, - 0x2328c5, - 0x27cac7, - 0x2641ca, - 0x260e44, - 0x217c08, - 0x2abd89, - 0x2cd247, - 0x336286, - 0x24e9c8, - 0x2b9c09, - 0x309684, - 0x366944, - 0x244245, - 0x2bb548, - 0x2c4b07, - 0x2a9709, - 0x364688, - 0x345e86, - 0x3204c6, - 0x298048, - 0x359646, - 0x247e45, - 0x280946, - 0x275ec8, - 0x24da46, - 0x2525cb, - 0x298646, - 0x29994d, - 0x3a6005, - 0x2a7486, - 0x208b45, - 0x2f9bc9, - 0x2f9a87, - 0x37a208, - 0x266986, - 0x298bc9, - 0x3793c6, - 0x264145, - 0x268686, - 0x2cae46, - 0x2cb3c9, - 0x3530c6, - 0x339487, - 0x26ad85, - 0x202ac3, - 0x252745, - 0x299c07, - 0x33c6c6, - 0x3a5f09, - 0x317806, - 0x280b86, - 0x210c49, - 0x280349, - 0x29fc07, - 0x282f88, - 0x28c989, - 0x27d548, - 0x378386, - 0x2d5805, - 0x2418ca, - 0x280c06, - 0x3b7986, - 0x2c8985, - 0x265808, - 0x223307, - 0x22f50a, - 0x249e46, - 0x279d45, - 0x37aa46, - 0x21ac47, - 0x336147, - 0x21bbc5, - 0x264305, - 0x357dc6, - 0x2ac5c6, - 0x34dc06, - 0x2b3204, - 0x27f689, - 0x288b46, - 0x2dd38a, - 0x21b388, - 0x3078c8, - 0x3712ca, - 0x20b445, - 0x29be85, - 0x350b88, - 0x2b2c88, - 0x27b5c7, - 0x258946, - 0x322388, - 0x2fdec7, - 0x27dc48, - 0x2b3846, - 0x281408, - 0x294f06, - 0x24ee87, - 0x299ec6, - 0x3519c6, - 0x3778ca, - 0x2bd8c6, - 0x2d5809, - 0x26dbc6, - 0x2af14a, - 0x2def09, - 0x2fb486, - 0x2b4b04, - 0x22b18d, - 0x287f07, - 0x326cc6, - 0x2c0345, - 0x379445, - 0x374246, - 0x2cd749, - 0x2b1647, - 0x277306, - 0x2cc246, - 0x286549, - 0x247d84, - 0x3482c4, - 0x352cc8, - 0x236a86, - 0x26a808, - 0x2e41c8, - 0x312747, - 0x3b7549, - 0x34de07, - 0x2aecca, - 0x2e1f8f, - 0x23188a, - 0x234245, - 0x276105, - 0x216e85, - 0x2ba787, - 0x21a803, - 0x283188, - 0x396786, - 0x396889, - 0x2b87c6, - 0x3b5207, - 0x298989, - 0x37a108, - 0x2c8a47, - 0x30a343, - 0x342585, - 0x21a785, - 0x2b304b, - 0x203b44, - 0x2c2084, - 0x274646, - 0x30abc7, - 0x382bca, - 0x248ac7, - 0x311e87, - 0x27bdc5, - 0x200645, - 0x2eef89, - 0x3519c6, - 0x24894d, - 0x353305, - 0x2b1383, - 0x205043, - 0x26f685, - 0x345c45, - 0x24e9c8, - 0x2790c7, - 0x348046, - 0x29db06, - 0x229105, - 0x2326c7, - 0x312247, - 0x239347, - 0x2d144a, - 0x2b1cc8, - 0x2b3204, - 0x24d7c7, - 0x27acc7, - 0x339306, - 0x262107, - 0x2dc4c8, - 0x2e6f08, - 0x268506, - 0x303008, - 0x2c87c4, - 0x2bb846, - 0x2353c6, - 0x33bfc6, - 0x2ba986, - 0x286004, - 0x264f46, - 0x2bf5c6, - 0x297546, - 0x247846, - 0x204f06, - 0x26e2c6, - 0x347f48, - 0x2b0748, - 0x2d1c88, - 0x25dc08, - 0x350b06, - 0x20dcc5, - 0x315ec6, - 0x2ab885, - 0x388447, - 0x215305, - 0x2125c3, - 0x211585, - 0x344cc4, - 0x205045, - 0x203b03, - 0x33a447, - 0x354648, - 0x2e3306, - 0x2c218d, - 0x2760c6, - 0x296ac5, - 0x2b7843, - 0x2bc389, - 0x247f06, - 0x28e7c6, - 0x29f4c4, - 0x231807, - 0x233606, - 0x2b1905, - 0x203cc3, - 0x3abd84, - 0x27ae86, - 0x2354c4, - 0x2da048, - 0x38ba89, - 0x215589, - 0x29f2ca, - 0x2a070d, - 0x313447, - 0x2b9186, - 0x206804, - 0x286a09, - 0x284688, - 0x287b06, - 0x33f286, - 0x262107, - 0x2b6b46, - 0x226346, - 0x26d606, - 0x3a2a0a, - 0x21ed48, - 0x2bacc5, - 0x262549, - 0x27e14a, - 0x2f5d08, - 0x29b908, - 0x295f08, - 0x2a7acc, - 0x30e705, - 0x29dd88, - 0x2e6586, - 0x37a386, - 0x3b50c7, - 0x2489c5, - 0x280ac5, - 0x215449, - 0x20e247, - 0x396845, - 0x227887, - 0x205043, - 0x2c5045, - 0x20ef48, - 0x252ac7, - 0x29b7c9, - 0x2d7985, - 0x2fa984, - 0x2a03c8, - 0x32ccc7, - 0x2c8c08, - 0x38d688, - 0x354b05, - 0x3a3946, - 0x278cc6, - 0x244609, - 0x2b01c7, - 0x2ac006, - 0x313787, - 0x210103, - 0x23bc44, - 0x2a1785, - 0x232804, - 0x3833c4, - 0x27fdc7, - 0x26c147, - 0x22e704, - 0x29b610, - 0x3b3c47, - 0x200645, - 0x24c20c, - 0x20a8c4, - 0x2c1488, - 0x24ed89, - 0x35acc6, - 0x334c48, - 0x215244, - 0x36c4c8, - 0x22fb06, - 0x2accc8, - 0x29c506, - 0x2bec0b, - 0x202ac5, - 0x2c8748, - 0x215ac4, - 0x38beca, - 0x29b7c9, - 0x245f06, - 0x216f48, - 0x256385, - 0x2b0f44, - 0x2c1386, - 0x239208, - 0x27e648, - 0x322c06, - 0x3a9ec4, - 0x241846, - 0x34de87, - 0x2755c7, - 0x26210f, - 0x207347, - 0x2fb547, - 0x3709c5, - 0x353e05, - 0x29f8c9, - 0x2dd046, - 0x27cc05, - 0x280647, - 0x2e0bc8, - 0x297645, - 0x299ec6, - 0x21b1c8, - 0x2b94ca, - 0x2db4c8, - 0x28ac87, - 0x2e23c6, - 0x262506, - 0x21a5c3, - 0x216a43, - 0x27e309, - 0x28c809, - 0x2c1286, - 0x2d7985, - 0x33bd48, - 0x216f48, - 0x3597c8, - 0x26d68b, - 0x2c23c7, - 0x30a589, - 0x262388, - 0x343084, - 0x3514c8, - 0x28cd89, - 0x2ac305, - 0x2ba687, - 0x23bcc5, - 0x27e548, - 0x28fc4b, - 0x295710, - 0x2a6dc5, - 0x215a0c, - 0x348205, - 0x27be43, - 0x2a8f86, - 0x2be6c4, - 0x335086, - 0x29bf47, - 0x21b244, - 0x240b88, - 0x28304d, - 0x302945, - 0x29b104, - 0x2243c4, - 0x276949, - 0x2a11c8, - 0x317687, - 0x22fb88, - 0x27f748, - 0x277605, - 0x209287, - 0x277587, - 0x33e347, - 0x264309, - 0x233489, - 0x214c46, - 0x2bd306, - 0x262346, - 0x37f785, - 0x3a7184, - 0x200006, - 0x200386, - 0x277648, - 0x21a90b, - 0x260d07, - 0x206804, - 0x353646, - 0x2fe447, - 0x26dec5, - 0x391d05, - 0x219644, - 0x233406, - 0x200088, - 0x286a09, - 0x2510c6, - 0x284048, - 0x2b19c6, - 0x345248, - 0x306dcc, - 0x2774c6, - 0x29678d, - 0x296c0b, - 0x339545, - 0x312387, - 0x3531c6, - 0x336008, - 0x214cc9, - 0x2d0588, - 0x200645, - 0x277987, - 0x27d648, - 0x349649, - 0x28e946, - 0x250fca, - 0x335d88, - 0x2d03cb, - 0x39818c, - 0x36c5c8, - 0x27a7c6, - 0x208c88, - 0x3b77c7, - 0x32cf49, - 0x28f74d, - 0x299dc6, - 0x27b808, - 0x2b0609, - 0x2bda48, - 0x281508, - 0x2bfe0c, - 0x2c0b47, - 0x2c1887, - 0x264145, - 0x2ad587, - 0x2e0a88, - 0x2c1406, - 0x2556cc, - 0x2ef888, - 0x2ccb88, - 0x25dec6, - 0x21a507, - 0x214e44, - 0x25dc08, - 0x22200c, - 0x2ce24c, - 0x2342c5, - 0x2d0d47, - 0x3a9e46, - 0x21a486, - 0x2f9d88, - 0x3af904, - 0x28574b, - 0x36cc0b, - 0x2e23c6, - 0x282ec7, - 0x37a805, - 0x269a05, - 0x285886, - 0x256345, - 0x203b05, - 0x2cc9c7, - 0x274c49, - 0x2ac784, - 0x2fbb45, - 0x2e4bc5, - 0x2d9dc8, - 0x329d05, - 0x2b72c9, - 0x2ae5c7, - 0x2ae5cb, - 0x381306, - 0x347c89, - 0x26d108, - 0x276545, - 0x33e448, - 0x2334c8, - 0x245747, - 0x3776c7, - 0x27fe49, - 0x2acc07, - 0x28a989, - 0x2aa70c, - 0x3163c8, - 0x2b2ac9, - 0x2b3d47, - 0x27f809, - 0x26c287, - 0x398288, - 0x3b7705, - 0x2bb7c6, - 0x2c0388, - 0x308a88, - 0x27e009, - 0x203b47, - 0x269ac5, - 0x222b09, - 0x2bd6c6, - 0x28f544, - 0x30e1c6, - 0x35ffc8, - 0x232ac7, - 0x21ab08, - 0x3030c9, - 0x3a3707, - 0x29d146, - 0x312444, - 0x211609, - 0x209108, - 0x25dd87, - 0x27eb46, - 0x21a846, - 0x3b7904, - 0x2241c6, - 0x204fc3, - 0x3b1649, - 0x202a86, - 0x303345, - 0x29db06, - 0x26cac5, - 0x27dac8, - 0x36c307, - 0x381646, - 0x3b3b06, - 0x3078c8, - 0x29fa47, - 0x299e05, - 0x29b408, - 0x3a1e48, - 0x335d88, - 0x3480c5, - 0x2bb846, - 0x215349, - 0x244484, - 0x26c94b, - 0x22604b, - 0x2babc9, - 0x205043, - 0x254485, - 0x2214c6, - 0x385208, - 0x2e1f04, - 0x2e3306, - 0x2d1589, - 0x2ca445, - 0x2cc906, - 0x32ccc6, - 0x216f44, - 0x2a764a, - 0x303288, - 0x308a86, - 0x3b8645, - 0x37a687, - 0x2e0fc7, - 0x3a3944, - 0x226287, - 0x2aecc4, - 0x33bf46, - 0x2096c3, - 0x264305, - 0x32ad45, - 0x207588, - 0x24d985, - 0x277209, - 0x25da47, - 0x25da4b, - 0x2a148c, - 0x2a224a, - 0x300187, - 0x203503, - 0x3afc08, - 0x348285, - 0x2976c5, - 0x205104, - 0x398186, - 0x24ed86, - 0x224207, - 0x33448b, - 0x286004, - 0x2e6684, - 0x21f044, - 0x2cafc6, - 0x21b244, - 0x2bb648, - 0x342445, - 0x21ba45, - 0x359707, - 0x312489, - 0x345c45, - 0x37424a, - 0x26ac89, - 0x2996ca, - 0x3a2b49, - 0x33fec4, - 0x2cc305, - 0x2b6c48, - 0x2cd9cb, - 0x244245, - 0x2f2fc6, - 0x213e84, - 0x277746, - 0x3a3589, - 0x353707, - 0x3179c8, - 0x2a0a86, - 0x34de07, - 0x27e648, - 0x3747c6, - 0x375604, - 0x365ac7, - 0x357305, - 0x367287, - 0x200104, - 0x353146, - 0x2f4308, - 0x296dc8, - 0x2e6047, - 0x274fc8, - 0x294fc5, - 0x204e84, - 0x3711c8, - 0x2750c4, - 0x216e05, - 0x2f5fc4, - 0x2fdfc7, - 0x288c07, - 0x27f948, - 0x2c8d86, - 0x24d905, - 0x277008, - 0x2db6c8, - 0x29f209, - 0x226346, - 0x22f588, - 0x38bd4a, - 0x26df48, - 0x2e5d85, - 0x20b306, - 0x26ab48, - 0x277a4a, - 0x210f87, - 0x284c45, - 0x292708, - 0x2ade04, - 0x265886, - 0x2c1c08, - 0x204f06, - 0x38e7c8, - 0x28f187, - 0x203346, - 0x2b4b04, - 0x284fc7, - 0x2b0d84, - 0x3a3547, - 0x28e60d, - 0x27b645, - 0x2cd54b, - 0x29c606, - 0x24e848, - 0x240b44, - 0x350d06, - 0x27ae86, - 0x208fc7, - 0x29644d, - 0x243cc7, - 0x2b12c8, - 0x269b85, - 0x278648, - 0x2c4a86, - 0x295048, - 0x228086, - 0x33d987, - 0x300449, - 0x343ac7, - 0x287dc8, - 0x2706c5, - 0x21b608, - 0x21a3c5, - 0x3a4245, - 0x3a2dc5, - 0x234543, - 0x2809c4, - 0x262545, - 0x337809, - 0x27ea46, - 0x2dc5c8, - 0x377485, - 0x2b2e87, - 0x2a78ca, - 0x2cc849, - 0x2cad4a, - 0x2d1d08, - 0x2276cc, - 0x2806cd, - 0x2fc003, - 0x38e6c8, - 0x3abd45, - 0x2b9286, - 0x379f86, - 0x2e58c5, - 0x313889, - 0x33cc45, - 0x277008, - 0x2552c6, - 0x347806, - 0x2a0289, - 0x393947, - 0x28ff06, - 0x2a7848, - 0x33bec8, - 0x2daec7, - 0x2ace4e, - 0x2c4cc5, - 0x349545, - 0x204e08, - 0x21fcc7, - 0x21a882, - 0x2bf984, - 0x334f8a, - 0x25de48, - 0x2fe546, - 0x298ac8, - 0x278cc6, - 0x332608, - 0x2ac008, - 0x3a4204, - 0x2b33c5, - 0x676384, - 0x676384, - 0x676384, - 0x202b43, - 0x21a6c6, - 0x2774c6, - 0x29cb0c, - 0x203383, - 0x27e146, - 0x2151c4, - 0x247e88, - 0x2d13c5, - 0x335086, - 0x2bcac8, - 0x2d2bc6, - 0x3815c6, - 0x245d08, - 0x2a1807, - 0x2ac9c9, - 0x2f214a, - 0x22b484, - 0x215305, - 0x2a96c5, - 0x247c06, - 0x313486, - 0x29d546, - 0x2f5546, - 0x2acb04, - 0x2acb0b, - 0x231804, - 0x29ccc5, - 0x2aad85, - 0x312806, - 0x3a6308, - 0x280587, - 0x317784, - 0x236203, - 0x2ad905, - 0x306047, - 0x28048b, - 0x207487, - 0x2bc9c8, - 0x2e62c7, - 0x370b06, - 0x279bc8, - 0x2a820b, - 0x21ff46, - 0x212309, - 0x2a8385, - 0x30a343, - 0x2cc906, - 0x28f088, - 0x213403, - 0x24f403, - 0x27e646, - 0x278cc6, - 0x35d10a, - 0x27a805, - 0x27accb, - 0x29da4b, - 0x23ef83, - 0x202843, - 0x2aec44, - 0x278a87, - 0x28f104, - 0x244504, - 0x2e6404, - 0x26e248, - 0x3b8588, - 0x3baf89, - 0x393188, - 0x2b9dc7, - 0x247846, - 0x2dc20f, - 0x2c4e06, - 0x2d1344, - 0x3b83ca, - 0x305f47, - 0x3b9606, - 0x28f589, - 0x3baf05, - 0x2076c5, - 0x3bb046, - 0x21b743, - 0x2ade49, - 0x21eec6, - 0x3afa89, - 0x382bc6, - 0x264305, - 0x2346c5, - 0x207343, - 0x278bc8, - 0x20d787, - 0x396784, - 0x247d08, - 0x2e1244, - 0x2f1006, - 0x2a8f86, - 0x23c346, - 0x2c8609, - 0x297645, - 0x3519c6, - 0x2582c9, - 0x2c41c6, - 0x26e2c6, - 0x387886, - 0x2160c5, - 0x2f5fc6, - 0x33d984, - 0x3b7705, - 0x2c0384, - 0x2b2246, - 0x3532c4, - 0x203c43, - 0x284745, - 0x2331c8, - 0x25e607, - 0x2b8209, - 0x284b48, - 0x297e11, - 0x32cd4a, - 0x2e2307, - 0x2e7246, - 0x2151c4, - 0x2c0488, - 0x22e448, - 0x297fca, - 0x2b708d, - 0x268686, - 0x245e06, - 0x285086, - 0x21ba47, - 0x2b1385, - 0x3912c7, - 0x247dc5, - 0x2ae704, - 0x2a6206, - 0x224047, - 0x2adb4d, - 0x26aa87, - 0x21f308, - 0x277309, - 0x20b206, - 0x28e8c5, - 0x22cb04, - 0x3600c6, - 0x3a3846, - 0x25dfc6, - 0x299348, - 0x215f83, - 0x208fc3, - 0x352105, - 0x277dc6, - 0x2abfc5, - 0x2a0c88, - 0x29c10a, - 0x282084, - 0x247e88, - 0x295f08, - 0x312647, - 0x377549, - 0x2bc6c8, - 0x286a87, - 0x2587c6, - 0x204f0a, - 0x360148, - 0x2f98c9, - 0x2a1288, - 0x221609, - 0x2e7107, - 0x2f2f05, - 0x26d886, - 0x2c1288, - 0x27e7c8, - 0x296088, - 0x2e24c8, - 0x29ccc5, - 0x208a84, - 0x20d488, - 0x23e2c4, - 0x3a2944, - 0x264305, - 0x2917c7, - 0x312249, - 0x208dc7, - 0x210cc5, - 0x274846, - 0x34f606, - 0x212444, - 0x2a05c6, - 0x24d744, - 0x278546, - 0x312006, - 0x213246, - 0x200645, - 0x2a0b47, - 0x203503, - 0x2079c9, - 0x3076c8, - 0x247d04, - 0x28690d, - 0x296ec8, - 0x2e3788, - 0x2f9846, - 0x300549, - 0x2cc849, - 0x3a3285, - 0x29c20a, - 0x27cf4a, - 0x29d74c, - 0x29d8c6, - 0x275446, - 0x2c4f86, - 0x2b4749, - 0x2b94c6, - 0x29fa86, - 0x33cd06, - 0x25dc08, - 0x274fc6, - 0x2ce80b, - 0x291945, - 0x21ba45, - 0x2756c5, - 0x352a46, - 0x204ec3, - 0x23c2c6, - 0x26aa07, - 0x2c0345, - 0x320585, - 0x379445, - 0x318446, - 0x31da84, - 0x31da86, - 0x292f49, - 0x3528cc, - 0x2ae448, - 0x239184, - 0x2f5c06, - 0x29c706, - 0x28f088, - 0x216f48, - 0x3527c9, - 0x37a687, - 0x2367c9, - 0x24cfc6, - 0x22e904, - 0x20ea44, - 0x280144, - 0x27e648, - 0x31208a, - 0x345bc6, - 0x353cc7, - 0x362c47, - 0x347d85, - 0x2a9684, - 0x28cd46, - 0x2b13c6, - 0x2336c3, - 0x307507, - 0x38d588, - 0x3a33ca, - 0x2cbb88, - 0x281f08, - 0x353305, - 0x339645, - 0x260e05, - 0x348146, - 0x3ad906, - 0x26c085, - 0x3b1889, - 0x2a948c, - 0x260ec7, - 0x298048, - 0x2e5c05, - 0x676384, - 0x320944, - 0x252c04, - 0x22df86, - 0x29eb0e, - 0x207747, - 0x21bc45, - 0x24440c, - 0x2e1107, - 0x223fc7, - 0x225109, - 0x217cc9, - 0x284c45, - 0x3076c8, - 0x215349, - 0x335c45, - 0x2c0288, - 0x2c2586, - 0x371446, - 0x2def04, - 0x2553c8, - 0x20b3c3, - 0x2af8c4, - 0x2ad985, - 0x3bab07, - 0x21c245, - 0x38bc09, - 0x28b30d, - 0x2a33c6, - 0x225fc4, - 0x2588c8, - 0x274a8a, - 0x2611c7, - 0x235d45, - 0x23b403, - 0x29dc0e, - 0x278ccc, - 0x2f5e07, - 0x29ecc7, - 0x200143, - 0x2b9505, - 0x252c05, - 0x298e88, - 0x295d49, - 0x239086, - 0x28f104, - 0x2e2246, - 0x27b5cb, - 0x2cc5cc, - 0x366d87, - 0x2d0305, - 0x3a1d48, - 0x2dac85, - 0x3b83c7, - 0x32cb87, - 0x247585, - 0x204ec3, - 0x26e584, - 0x21c685, - 0x2ac685, - 0x2ac686, - 0x292008, - 0x224047, - 0x37a286, - 0x26c486, - 0x3a2d06, - 0x268789, - 0x209387, - 0x25e286, - 0x2cc746, - 0x2731c6, - 0x2a7585, - 0x3b2b46, - 0x380145, - 0x329d88, - 0x29114b, - 0x28c346, - 0x362c84, - 0x2b4389, - 0x25da44, - 0x2c2508, - 0x30e2c7, - 0x281404, - 0x2bbd88, - 0x2c1684, - 0x2a75c4, - 0x286845, - 0x302986, - 0x26e187, - 0x203043, - 0x29d205, - 0x323284, - 0x349586, - 0x3a3308, - 0x38d2c5, - 0x290e09, - 0x222d05, - 0x2dbf88, - 0x215087, - 0x388588, - 0x2b8047, - 0x2fb609, - 0x264dc6, - 0x32bb46, - 0x28cac4, - 0x258705, - 0x2fce4c, - 0x2756c7, - 0x275fc7, - 0x362b08, - 0x2a33c6, - 0x26a944, - 0x328004, - 0x27fcc9, - 0x2c5086, - 0x298a07, - 0x208c04, - 0x23da46, - 0x33b785, - 0x2c88c7, - 0x2ce786, - 0x250e89, - 0x27cd87, - 0x262107, - 0x2a0106, - 0x23d985, - 0x27c548, - 0x21ed48, - 0x247a46, - 0x38d305, - 0x390586, - 0x2034c3, - 0x298d09, - 0x29d2ce, - 0x2b7d48, - 0x2e1348, - 0x24784b, - 0x291046, - 0x313104, - 0x2802c4, - 0x29d3ca, - 0x215907, - 0x25e345, - 0x212309, - 0x2bf685, - 0x3a2987, - 0x245c84, - 0x287087, - 0x2e40c8, - 0x2cd306, - 0x27b989, - 0x2bc7ca, - 0x215886, - 0x296a06, - 0x2aad05, - 0x37d085, - 0x282d07, - 0x244e48, - 0x33b6c8, - 0x3a4206, - 0x234745, - 0x31320e, - 0x2b3204, - 0x2479c5, - 0x2741c9, - 0x2dce48, - 0x28abc6, - 0x29af0c, - 0x29bd10, - 0x29e74f, - 0x29f7c8, - 0x300187, - 0x200645, - 0x262545, - 0x26e009, - 0x292909, - 0x241946, - 0x2442c7, - 0x2d0cc5, - 0x337b09, - 0x339386, - 0x2b930d, - 0x280009, - 0x244504, - 0x2b7ac8, - 0x20d549, - 0x345d86, - 0x274945, - 0x32bb46, - 0x317889, - 0x2f3c48, - 0x20dcc5, - 0x2553c4, - 0x29b0cb, - 0x345c45, - 0x29b206, - 0x280a06, - 0x265e46, - 0x276d4b, - 0x290f09, - 0x26c3c5, - 0x388347, - 0x32ccc6, - 0x334dc6, - 0x252988, - 0x302a89, - 0x21f0cc, - 0x305e48, - 0x309e46, - 0x322c03, - 0x2ba886, - 0x276b85, - 0x27b008, - 0x234146, - 0x2c8b08, - 0x248b45, - 0x279305, - 0x32eb08, - 0x332787, - 0x379ec7, - 0x224207, - 0x334c48, - 0x3002c8, - 0x2ad486, - 0x2b2087, - 0x23bb07, - 0x276a4a, - 0x201e03, - 0x352a46, - 0x2392c5, - 0x334f84, - 0x277309, - 0x2fb584, - 0x25e684, - 0x29c584, - 0x29eccb, - 0x20d6c7, - 0x313445, - 0x294cc8, - 0x274846, - 0x274848, - 0x27a746, - 0x28b085, - 0x28b645, - 0x28d886, - 0x28ee48, - 0x28f4c8, - 0x2774c6, - 0x294b0f, - 0x2987d0, - 0x3a6005, - 0x203503, - 0x22e9c5, - 0x30a4c8, - 0x292809, - 0x335d88, - 0x268608, - 0x2b8d48, - 0x20d787, - 0x274509, - 0x2c8d08, - 0x265304, - 0x29c408, - 0x2d9e89, - 0x2b27c7, - 0x299d44, - 0x208e88, - 0x2a090a, - 0x2e77c6, - 0x268686, - 0x226209, - 0x29bf47, - 0x2cba08, - 0x204848, - 0x2ddd88, - 0x35cc45, - 0x37e005, - 0x21ba45, - 0x252bc5, - 0x3b5987, - 0x204ec5, - 0x2c0345, - 0x313686, - 0x335cc7, - 0x2cd907, - 0x2a0c06, - 0x2d2245, - 0x29b206, - 0x27ba85, - 0x2b58c8, - 0x2f4284, - 0x2c4246, - 0x33b5c4, - 0x2b0f48, - 0x2c434a, - 0x2790cc, - 0x334685, - 0x21bb06, - 0x21f286, - 0x351fc6, - 0x309ec4, - 0x33ba45, - 0x27a587, - 0x29bfc9, - 0x2cb4c7, - 0x676384, - 0x676384, - 0x317605, - 0x37b944, - 0x29a8ca, - 0x2746c6, - 0x279e04, - 0x3b9585, - 0x37e405, - 0x2b12c4, - 0x280647, - 0x222c87, - 0x2cafc8, - 0x33de88, - 0x20dcc9, - 0x29cd88, - 0x29aa8b, - 0x2318c4, - 0x366885, - 0x27cc85, - 0x224189, - 0x302a89, - 0x2b4288, - 0x30e048, - 0x2d6604, - 0x29c745, - 0x217083, - 0x247bc5, - 0x351a46, - 0x295b8c, - 0x208b06, - 0x36c3c6, - 0x28ae45, - 0x3184c8, - 0x2b7ec6, - 0x2e73c6, - 0x268686, - 0x22920c, - 0x25e184, - 0x3a2e4a, - 0x28ad88, - 0x2959c7, - 0x323186, - 0x239147, - 0x2ec145, - 0x27eb46, - 0x34d406, - 0x35b847, - 0x25e6c4, - 0x2fe0c5, - 0x2741c4, - 0x2ae787, - 0x274408, - 0x2752ca, - 0x27d4c7, - 0x303407, - 0x300107, - 0x2dadc9, - 0x295b8a, - 0x21f083, - 0x25e5c5, - 0x213283, - 0x2e6449, - 0x33dc08, - 0x3709c7, - 0x335e89, - 0x21ee46, - 0x2b88c8, - 0x33a3c5, - 0x2db7ca, - 0x2d3549, - 0x2683c9, - 0x3b50c7, - 0x22e549, - 0x213148, - 0x35ba06, - 0x21bcc8, - 0x2160c7, - 0x2acc07, - 0x26ac87, - 0x2d0b48, - 0x2f5a86, - 0x2a06c5, - 0x27a587, - 0x296508, - 0x33b544, - 0x2dd244, - 0x28fe07, - 0x2ac387, - 0x2151ca, - 0x35b986, - 0x38c74a, - 0x2bf8c7, - 0x2b2fc7, - 0x246004, - 0x28aa44, - 0x2ce686, - 0x202d04, - 0x202d0c, - 0x3aff05, - 0x216d89, - 0x2d4f04, - 0x2b1385, - 0x274a08, - 0x279fc5, - 0x374246, - 0x223ec4, - 0x293c4a, - 0x2b00c6, - 0x29ba8a, - 0x22b447, - 0x21ac45, - 0x21b745, - 0x347dca, - 0x28efc5, - 0x26dfc6, - 0x23e2c4, - 0x2aedc6, - 0x282dc5, - 0x234206, - 0x2e604c, - 0x2cb14a, - 0x2587c4, - 0x247846, - 0x29bf47, - 0x2cf984, - 0x25dc08, - 0x393006, - 0x313089, - 0x2c7549, - 0x3164c9, - 0x26cb06, - 0x2161c6, - 0x21be07, - 0x3b17c8, - 0x215fc9, - 0x20d6c7, - 0x294e46, - 0x34de87, - 0x284f45, - 0x2b3204, - 0x21b9c7, - 0x23bcc5, - 0x286785, - 0x226987, - 0x247448, - 0x3a1cc6, - 0x29738d, - 0x29908f, - 0x29da4d, - 0x210d04, - 0x2332c6, - 0x2d3c08, - 0x33ccc5, - 0x276c08, - 0x24560a, - 0x244504, - 0x27bb46, - 0x26f3c7, - 0x286007, - 0x2a18c9, - 0x21bc85, - 0x2b12c4, - 0x2b330a, - 0x2bc289, - 0x22e647, - 0x265706, - 0x345d86, - 0x29c686, - 0x365b86, - 0x2d320f, - 0x2d3ac9, - 0x274fc6, - 0x22e346, - 0x31a809, - 0x2b2187, - 0x217443, - 0x229386, - 0x216a43, - 0x2e5788, - 0x34dcc7, - 0x29f9c9, - 0x2a8e08, - 0x37a008, - 0x203c86, - 0x208a49, - 0x242785, - 0x2b2244, - 0x2a99c7, - 0x2b47c5, - 0x210d04, - 0x313508, - 0x215bc4, - 0x2b1ec7, - 0x3545c6, - 0x357e85, - 0x2a1288, - 0x345c4b, - 0x331dc7, - 0x348046, - 0x2c4e84, - 0x319586, - 0x264305, - 0x23bcc5, - 0x27c2c9, - 0x280249, - 0x2acc44, - 0x2acc85, - 0x247885, - 0x2db646, - 0x3077c8, - 0x2bf046, - 0x38d3cb, - 0x35ab4a, - 0x2b0e85, - 0x28b6c6, - 0x396485, - 0x2cf485, - 0x2a54c7, - 0x352cc8, - 0x2367c4, - 0x25f806, - 0x28f546, - 0x213307, - 0x30a304, - 0x27ae86, - 0x237cc5, - 0x237cc9, - 0x2163c4, - 0x2a9809, - 0x2774c6, - 0x2c0c08, - 0x247885, - 0x362d45, - 0x234206, - 0x21efc9, - 0x217cc9, - 0x36c446, - 0x2dcf48, - 0x244508, - 0x396444, - 0x2b3644, - 0x2b3648, - 0x326dc8, - 0x2368c9, - 0x3519c6, - 0x268686, - 0x32224d, - 0x2e3306, - 0x306c89, - 0x315fc5, - 0x3bb046, - 0x391408, - 0x31d9c5, - 0x23bb44, - 0x264305, - 0x27fb48, - 0x29a689, - 0x274284, - 0x353146, - 0x279e8a, - 0x2f5d08, - 0x215349, - 0x38174a, - 0x335e06, - 0x299248, - 0x3b8185, - 0x2e0908, - 0x2b8145, - 0x21ed09, - 0x36a349, - 0x20d8c2, - 0x2a8385, - 0x269746, - 0x277407, - 0x3b05c5, - 0x308986, - 0x301448, - 0x2a33c6, - 0x2b6b09, - 0x2760c6, - 0x252808, - 0x2a89c5, - 0x23ebc6, - 0x33da88, - 0x27e648, - 0x2e7008, - 0x345f08, - 0x3b2b44, - 0x22a183, - 0x2b6d44, - 0x27d6c6, - 0x284f84, - 0x2e1287, - 0x2e72c9, - 0x2c45c5, - 0x204846, - 0x229386, - 0x291e4b, - 0x2b0dc6, - 0x3b8cc6, - 0x2c8488, - 0x3204c6, - 0x21aa43, - 0x3af743, - 0x2b3204, - 0x22f485, - 0x2b1807, - 0x274408, - 0x27440f, - 0x27a48b, - 0x3075c8, - 0x3531c6, - 0x3078ce, - 0x2319c3, - 0x2b1784, - 0x2b0d45, - 0x2b1146, - 0x28ce4b, - 0x291886, - 0x21b249, - 0x357e85, - 0x3899c8, - 0x20c688, - 0x217b8c, - 0x29ed06, - 0x247c06, - 0x2d7985, - 0x287b88, - 0x2790c5, - 0x343088, - 0x29b28a, - 0x29de89, - 0x676384, - 0x38a099c2, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x205503, - 0x200983, - 0x20cf83, - 0x25ef44, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x2d5f04, - 0x2e9dc3, - 0x3b0887, - 0x209703, - 0x204e83, - 0x28b148, - 0x200983, - 0x2ae1cb, - 0x2ec883, - 0x264a86, - 0x20b0c2, - 0x22d54b, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x200983, - 0x26be43, - 0x204783, - 0x205702, - 0x16d208, - 0x325f45, - 0x23bd48, - 0x2df7c8, - 0x2099c2, - 0x37ab45, - 0x38c347, - 0x2007c2, - 0x240d87, - 0x20d882, - 0x248707, - 0x32c589, - 0x3b7d48, - 0x2ddc09, - 0x23e202, - 0x263647, - 0x36c1c4, - 0x38c407, - 0x35aa47, - 0x2bbbc2, - 0x209703, - 0x20e602, - 0x200c82, - 0x200442, - 0x2013c2, - 0x205ec2, - 0x209842, - 0x2a80c5, - 0x320885, - 0x99c2, - 0x32403, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x12083, - 0x1ec1, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x205503, - 0x200983, - 0x219503, - 0x3b819d06, - 0x13f443, - 0x7df85, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x4a82, - 0x16d208, - 0x44e04, - 0xdb085, - 0x205702, - 0x26f544, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x2358c3, - 0x2a9305, - 0x244183, - 0x206343, - 0x205503, - 0x21c2c3, - 0x200983, - 0x214843, - 0x2387c3, - 0x25ed03, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2099c2, - 0x200983, - 0x16d208, - 0x2e9dc3, - 0x16d208, - 0x200c03, - 0x2a84c3, - 0x22fd84, - 0x232403, - 0x2e9dc3, - 0x202bc2, - 0x209703, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x202bc2, - 0x227f83, - 0x205503, - 0x200983, - 0x2e87c3, - 0x214843, - 0x205702, - 0x2099c2, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x264a85, - 0xe4886, - 0x25ef44, - 0x20b0c2, - 0x16d208, - 0x205702, - 0x1d848, - 0x1b4183, - 0x2099c2, - 0x3fc91386, - 0x1320c4, - 0xd95cb, - 0x13eec6, - 0x9807, - 0x232403, - 0x47208, - 0x2e9dc3, - 0xb9b45, - 0x13fb84, - 0x260f83, - 0x4ce87, - 0xd78c4, - 0x205503, - 0x7f1c4, - 0x200983, - 0x2ed844, - 0xd9388, - 0x125c86, - 0x82b48, - 0x6cf05, - 0x1fa49, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x204e83, - 0x200983, - 0x2ec883, - 0x20b0c2, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x24a5c3, - 0x211cc4, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2d5f04, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x264a86, - 0x232403, - 0x2e9dc3, - 0x176e43, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x9807, - 0x16d208, - 0x2e9dc3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x426a84c3, - 0x232403, - 0x205503, - 0x200983, - 0x16d208, - 0x205702, - 0x2099c2, - 0x2a84c3, - 0x2e9dc3, - 0x205503, - 0x200442, - 0x200983, - 0x316e87, - 0x33e6cb, - 0x22d703, - 0x241608, - 0x3b1547, - 0x20a7c6, - 0x2c2c45, - 0x372349, - 0x209488, - 0x360d49, - 0x38f790, - 0x360d4b, - 0x39e189, - 0x201b03, - 0x20fb89, - 0x230f06, - 0x230f0c, - 0x326008, - 0x3b4f08, - 0x34af09, - 0x2905ce, - 0x2dd9cb, - 0x2f364c, - 0x2030c3, - 0x263d0c, - 0x207089, - 0x2fee47, - 0x23234c, - 0x3a89ca, - 0x2030c4, - 0x2d084d, - 0x263bc8, - 0x20cf8d, - 0x273846, - 0x28decb, - 0x283349, - 0x3b8b87, - 0x32fd06, - 0x330f89, - 0x351b8a, - 0x30b148, - 0x2ec484, - 0x2fba07, - 0x34f707, - 0x2bab04, - 0x37b5c4, - 0x22a749, - 0x281d49, - 0x22ae48, - 0x210785, - 0x3b4005, - 0x20db86, - 0x2d0709, - 0x24588d, - 0x2f30c8, - 0x20da87, - 0x2c2cc8, - 0x2e1886, - 0x38b6c4, - 0x3523c5, - 0x202986, - 0x204b04, - 0x206f87, - 0x20b8ca, - 0x212244, - 0x2157c6, - 0x216a09, - 0x216a0f, - 0x21788d, - 0x2184c6, - 0x21d450, - 0x21d846, - 0x21df87, - 0x21e4c7, - 0x21e4cf, - 0x21f6c9, - 0x224c46, - 0x225347, - 0x225348, - 0x225809, - 0x246088, - 0x2e52c7, - 0x20cc83, - 0x372986, - 0x3ba948, - 0x29088a, - 0x213c09, - 0x2095c3, - 0x38c246, - 0x25f64a, - 0x29e587, - 0x2fec8a, - 0x313d4e, - 0x21f806, - 0x2a8587, - 0x20e006, - 0x207146, - 0x37de0b, - 0x20414a, - 0x317f0d, - 0x216287, - 0x33ce88, - 0x33ce89, - 0x33ce8f, - 0x2b838c, - 0x27b289, - 0x2e6a0e, - 0x3b098a, - 0x2ba246, - 0x2f4586, - 0x30b58c, - 0x30ce8c, - 0x30dc08, - 0x3439c7, - 0x2b8c45, - 0x351e04, - 0x33c90e, - 0x228d04, - 0x351747, - 0x26030a, - 0x362554, - 0x36dd8f, - 0x21e688, - 0x372848, - 0x35040d, - 0x35040e, - 0x376ec9, - 0x3a8ec8, - 0x3a8ecf, - 0x23204c, - 0x23204f, - 0x233007, - 0x236dca, - 0x2435cb, - 0x238508, - 0x239cc7, - 0x3690cd, - 0x250406, - 0x2d0a06, - 0x23c149, - 0x394648, - 0x242088, - 0x24208e, - 0x2b5007, - 0x243885, - 0x244bc5, - 0x2063c4, - 0x20aa86, - 0x22ad48, - 0x202203, - 0x2ca10e, - 0x369488, - 0x2a2fcb, - 0x200dc7, - 0x3a4045, - 0x22e206, - 0x2aa0c7, - 0x333d08, - 0x26cd09, - 0x292e45, - 0x284788, - 0x212c06, - 0x38ad4a, - 0x33c809, - 0x232409, - 0x23240b, - 0x38dc48, - 0x2ba9c9, - 0x210846, - 0x22eb8a, - 0x2dc80a, - 0x236fcc, - 0x3a6687, - 0x32c38a, - 0x26ea8b, - 0x26ea99, - 0x3b6a88, - 0x264b05, - 0x2c6086, - 0x211e49, - 0x390746, - 0x28550a, - 0x209686, - 0x202644, - 0x2c620d, - 0x202647, - 0x211149, - 0x246385, - 0x2464c8, - 0x246fc9, - 0x247784, - 0x248387, - 0x248388, - 0x248c87, - 0x261908, - 0x24d487, - 0x26c645, - 0x25488c, - 0x2550c9, - 0x2bc00a, - 0x3937c9, - 0x20fc89, - 0x275a0c, - 0x25774b, - 0x257ec8, - 0x259048, - 0x25c404, - 0x2810c8, - 0x283c89, - 0x3a8a87, - 0x216c46, - 0x2835c7, - 0x2dcac9, - 0x26e6cb, - 0x319407, - 0x200a07, - 0x22b587, - 0x20cf04, - 0x20cf05, - 0x29a545, - 0x341c0b, - 0x39c644, - 0x3b2988, - 0x26614a, - 0x212cc7, - 0x2f6707, - 0x28bed2, - 0x278446, - 0x22f706, - 0x33c24e, - 0x27aa06, - 0x292588, - 0x29374f, - 0x20d348, - 0x37f308, - 0x30eaca, - 0x30ead1, - 0x2a0e8e, - 0x24dd0a, - 0x24dd0c, - 0x21e307, - 0x3a90d0, - 0x200408, - 0x2a1085, - 0x2aa4ca, - 0x204b4c, - 0x29518d, - 0x2f7e46, - 0x2f7e47, - 0x2f7e4c, - 0x300e4c, - 0x3292cc, - 0x2873cb, - 0x284184, - 0x226384, - 0x346d89, - 0x3050c7, - 0x225e49, - 0x37e909, - 0x39f1c7, - 0x3a8846, - 0x3a8849, - 0x2ad1c3, - 0x21c74a, - 0x31a287, - 0x33eb8b, - 0x317d8a, - 0x248844, - 0x22ba46, - 0x27d749, - 0x202b84, - 0x3affca, - 0x348345, - 0x2bdd45, - 0x2bdd4d, - 0x2be08e, - 0x28cc05, - 0x323906, - 0x264687, - 0x3870ca, - 0x39b686, - 0x3616c4, - 0x36d747, - 0x2c3f0b, - 0x2e1947, - 0x33fa84, - 0x24bb86, - 0x24bb8d, - 0x21e1cc, - 0x2053c6, - 0x2f32ca, - 0x2e03c6, - 0x2ed0c8, - 0x377c47, - 0x23568a, - 0x23d6c6, - 0x216183, - 0x391586, - 0x3ba7c8, - 0x29ac8a, - 0x275807, - 0x275808, - 0x281684, - 0x24b687, - 0x279348, - 0x2bd748, - 0x27c0c8, - 0x38c94a, - 0x2da905, - 0x2cf0c7, - 0x24db53, - 0x31e806, - 0x266348, - 0x221a09, - 0x240c48, - 0x203d0b, - 0x2cb608, - 0x2a5f44, - 0x32ec06, - 0x30bac6, - 0x3027c9, - 0x2c3dc7, - 0x254988, - 0x28af06, - 0x226884, - 0x2cb8c5, - 0x2c55c8, - 0x2c5bca, - 0x2c5e88, - 0x2cbf86, - 0x29944a, - 0x2ac808, - 0x2cf788, - 0x2d18c8, - 0x2d1f06, - 0x2d3e06, - 0x38e18c, - 0x2d43d0, - 0x27d2c5, - 0x20d148, - 0x301950, - 0x20d150, - 0x38f60e, - 0x38de0e, - 0x38de14, - 0x32fe8f, - 0x330246, - 0x332d51, - 0x33d213, - 0x33d688, - 0x3b3445, - 0x241b48, - 0x386245, - 0x329a8c, - 0x291549, - 0x228b49, - 0x3201c7, - 0x236b89, - 0x380887, - 0x2f6146, - 0x3521c7, - 0x269c45, - 0x2120c3, - 0x2023c9, - 0x221cc9, - 0x376e43, - 0x27f384, - 0x32a20d, - 0x206bcf, - 0x2268c5, - 0x329986, - 0x211407, - 0x325d87, - 0x288786, - 0x28878b, - 0x2a2405, - 0x256786, - 0x2f6c07, - 0x24e489, - 0x3a7486, - 0x21d305, - 0x22854b, - 0x235946, - 0x249245, - 0x357988, - 0x306a88, - 0x2c8f0c, - 0x2c8f10, - 0x2d2409, - 0x2ffd07, - 0x32840b, - 0x2e3b86, - 0x2e518a, - 0x2e754b, - 0x2e794a, - 0x2e7bc6, - 0x2e8685, - 0x319fc6, - 0x36c808, - 0x32028a, - 0x35009c, - 0x2ec94c, - 0x2ecc48, - 0x264a85, - 0x34ea07, - 0x26bec6, - 0x274e05, - 0x21afc6, - 0x288948, - 0x2bc507, - 0x2904c8, - 0x2a868a, - 0x33130c, - 0x331589, - 0x38b847, - 0x2198c4, - 0x244c86, - 0x37ee8a, - 0x37ea05, - 0x209f8c, - 0x20e648, - 0x367388, - 0x21a00c, - 0x22550c, - 0x225a09, - 0x225c47, - 0x231d4c, - 0x23aa84, - 0x23c60a, - 0x35e6cc, - 0x26b28b, - 0x242b8b, - 0x2efec6, - 0x24a107, - 0x24c687, - 0x3a930f, - 0x2f8a51, - 0x2d8592, - 0x24c68d, - 0x24c68e, - 0x24c9ce, - 0x330048, - 0x330052, - 0x24fbc8, - 0x3b1187, - 0x24aeca, - 0x3681c8, - 0x27a9c5, - 0x3b57ca, - 0x21dd87, - 0x2e36c4, - 0x201543, - 0x2a57c5, - 0x30ed47, - 0x2f5007, - 0x29538e, - 0x3382cd, - 0x33af89, - 0x222705, - 0x35c3c3, - 0x3a78c6, - 0x36e745, - 0x2a3208, - 0x205b49, - 0x2983c5, - 0x3692cf, - 0x2d96c7, - 0x372285, - 0x20178a, - 0x2a36c6, - 0x2ed249, - 0x396ccc, - 0x2f51c9, - 0x3abdc6, - 0x265f4c, - 0x322d06, - 0x2f7588, - 0x2f7786, - 0x3b6c06, - 0x3b96c4, - 0x258243, - 0x2a1fca, - 0x327191, - 0x3a9c0a, - 0x27ee85, - 0x265047, - 0x252207, - 0x279444, - 0x27944b, - 0x3b7bc8, - 0x2b7bc6, - 0x362b85, - 0x38b044, - 0x255f09, - 0x31ad84, - 0x254f07, - 0x32f345, - 0x32f347, - 0x33c485, - 0x2a8183, - 0x3b1048, - 0x33b80a, - 0x203043, - 0x325f8a, - 0x203046, - 0x36904f, - 0x2b4f89, - 0x2ca090, - 0x2f1548, - 0x2ccc89, - 0x2971c7, - 0x24bb0f, - 0x336244, - 0x2d5f84, - 0x21d6c6, - 0x22f246, - 0x25708a, - 0x23cc46, - 0x2f58c7, - 0x300788, - 0x300987, - 0x301207, - 0x30370a, - 0x30534b, - 0x2f3dc5, - 0x2d81c8, - 0x21bb03, - 0x23800c, - 0x36f78f, - 0x2b8a4d, - 0x2a7147, - 0x33b0c9, - 0x22bcc7, - 0x24a2c8, - 0x36274c, - 0x2a5e48, - 0x250bc8, - 0x318ace, - 0x32d354, - 0x32d864, - 0x3475ca, - 0x36148b, - 0x380944, - 0x380949, - 0x27bbc8, - 0x245345, - 0x201d0a, - 0x3696c7, - 0x26f744, - 0x38d2c3, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x209703, - 0x2d43c6, - 0x211cc4, - 0x205503, - 0x200983, - 0x201303, - 0x205702, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x2e9dc3, - 0x244183, - 0x2d43c6, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x211cc4, - 0x205503, - 0x200983, - 0x205702, - 0x2bb143, - 0x2099c2, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x201ec2, - 0x219f02, - 0x2099c2, - 0x2a84c3, - 0x202242, - 0x201fc2, - 0x3b1384, - 0x210444, - 0x227382, - 0x211cc4, - 0x200442, - 0x200983, - 0x201303, - 0x2efec6, - 0x212982, - 0x202dc2, - 0x222f02, - 0x44e0d343, - 0x4521e303, - 0x52d46, - 0x52d46, - 0x25ef44, - 0x204e83, - 0x142abca, - 0x12778c, - 0x102cc, - 0x7dd8d, - 0x129845, - 0x21347, - 0x18648, - 0x1b887, - 0x20348, - 0x19d4ca, - 0x45ed6a45, - 0x12b809, - 0xaf848, - 0x4a70a, - 0x8a64e, - 0x1440a4b, - 0x1320c4, - 0x77848, - 0x68bc8, - 0x38f47, - 0x12807, - 0x4efc9, - 0x2c07, - 0xd4ac8, - 0x1318c9, - 0x3adc5, - 0x124d4e, - 0xa8a0d, - 0x9688, - 0x4622a586, - 0x46c2a588, - 0x70cc8, - 0x117090, - 0x5f347, - 0x601c7, - 0x64547, - 0x69447, - 0xdb42, - 0x190bc7, - 0x430c, - 0x35fc7, - 0xa4246, - 0xa4909, - 0xa6388, - 0x17f42, - 0x1fc2, - 0xb8fcb, - 0x7f247, - 0x11809, - 0xbb9c9, - 0x17e248, - 0xafd42, - 0x113a49, - 0xcdf8a, - 0xc9e09, - 0xd6fc9, - 0xd7ac8, - 0xd8a47, - 0xda889, - 0xde345, - 0xde6d0, - 0x175b86, - 0x192345, - 0x5e98d, - 0xf986, - 0xe9187, - 0xed858, - 0x1b1a48, - 0xb4c8a, - 0x1c42, - 0x52f4d, - 0x27c2, - 0x5d306, - 0x8d108, - 0x86ec8, - 0x16d0c9, - 0x55b08, - 0x5fb4e, - 0x1a78c7, - 0x19d0d, - 0xf2d05, - 0x190948, - 0x194448, - 0xfacc6, - 0xc2, - 0x125c86, - 0x7b02, - 0x341, - 0x57a07, - 0xc8e83, - 0x466ee0c4, - 0x46a94443, - 0x141, - 0x10986, - 0x141, - 0x1, - 0x10986, - 0xc8e83, - 0x1596bc5, - 0x2030c4, - 0x2a84c3, - 0x249944, - 0x3b1384, - 0x205503, - 0x2218c5, - 0x219503, - 0x23e743, - 0x373605, - 0x25ed03, - 0x47ea84c3, - 0x232403, - 0x2e9dc3, - 0x200041, - 0x209703, - 0x210444, - 0x211cc4, - 0x205503, - 0x200983, - 0x214843, - 0x16d208, - 0x205702, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x201fc2, - 0x3b1384, - 0x244183, - 0x209703, - 0x205503, - 0x204e83, - 0x200983, - 0x25ed03, - 0x16d208, - 0x36f502, - 0x99c2, - 0x1456108, - 0x100b4e, - 0x48e016c2, - 0x31a448, - 0x234386, - 0x209cc6, - 0x233d07, - 0x4920c202, - 0x49768ec8, - 0x20884a, - 0x25cc88, - 0x200242, - 0x31a0c9, - 0x2f3e07, - 0x216bc6, - 0x3b0d89, - 0x2cf204, - 0x20a6c6, - 0x2dbcc4, - 0x26ffc4, - 0x2544c9, - 0x326686, - 0x320945, - 0x22c445, - 0x384e07, - 0x2bfb47, - 0x28fa44, - 0x233f46, - 0x2fb005, - 0x2fde45, - 0x3963c5, - 0x3b3dc7, - 0x200c05, - 0x314b49, - 0x312945, - 0x333e44, - 0x39b5c7, - 0x31974e, - 0x32e5c9, - 0x33c109, - 0x3a64c6, - 0x23d408, - 0x26d98b, - 0x2aeecc, - 0x37f806, - 0x2dd887, - 0x20a305, - 0x37b5ca, - 0x22af49, - 0x20bf49, - 0x24ff86, - 0x2f69c5, - 0x27ce45, - 0x3490c9, - 0x39654b, - 0x273346, - 0x33a786, - 0x202504, - 0x28bb86, - 0x243908, - 0x3ba646, - 0x214386, - 0x207c08, - 0x20bb47, - 0x20bd09, - 0x20c585, - 0x16d208, - 0x212784, - 0x3ada04, - 0x283785, - 0x399a49, - 0x220f07, - 0x220f0b, - 0x22394a, - 0x227a45, - 0x49a08d42, - 0x33ea47, - 0x49e28908, - 0x2afb87, - 0x350e85, - 0x20c1ca, - 0x99c2, - 0x34dfcb, - 0x24d5ca, - 0x221bc6, - 0x282bc3, - 0x28e34d, - 0x3492cc, - 0x35084d, - 0x245c45, - 0x32ae05, - 0x202247, - 0x3aba49, - 0x208746, - 0x23cac5, - 0x2d29c8, - 0x28ba83, - 0x2dfac8, - 0x28ba88, - 0x2c3747, - 0x309708, - 0x3a7209, - 0x2cc447, - 0x33e247, - 0x396a48, - 0x251f44, - 0x251f47, - 0x273748, - 0x3a3ac6, - 0x205f4f, - 0x211a07, - 0x2e5446, - 0x225d85, - 0x223083, - 0x371847, - 0x36c043, - 0x248e46, - 0x24aa86, - 0x24b286, - 0x290c05, - 0x261903, - 0x388208, - 0x36f009, - 0x38224b, - 0x24b408, - 0x24d145, - 0x24f605, - 0x4a248902, - 0x352289, - 0x3b1407, - 0x256805, - 0x2543c7, - 0x2559c6, - 0x365a45, - 0x36e58b, - 0x257ec4, - 0x25c845, - 0x25c987, - 0x272cc6, - 0x273105, - 0x2812c7, - 0x281a07, - 0x2cd884, - 0x289c0a, - 0x28a0c8, - 0x3b8209, - 0x241e85, - 0x207886, - 0x243aca, - 0x22c346, - 0x261e07, - 0x3b7ecd, - 0x29c809, - 0x38d185, - 0x314187, - 0x332288, - 0x33d848, - 0x3b3107, - 0x379d86, - 0x215dc7, - 0x249f43, - 0x341c04, - 0x363485, - 0x392707, - 0x395dc9, - 0x22be48, - 0x344c45, - 0x23cd84, - 0x246245, - 0x24b80d, - 0x200f82, - 0x373746, - 0x25d246, - 0x2c578a, - 0x376546, - 0x37edc5, - 0x33df85, - 0x33df87, - 0x38ab8c, - 0x270b4a, - 0x28b846, - 0x2b9645, - 0x28b9c6, - 0x28bd07, - 0x28e186, - 0x290b0c, - 0x3b0ec9, - 0x4a610e07, - 0x293b05, - 0x293b06, - 0x293ec8, - 0x23b705, - 0x2a2c85, - 0x2a3848, - 0x2a3a4a, - 0x4aa4ecc2, - 0x4ae0ee02, - 0x2e6705, - 0x284f83, - 0x3adf08, - 0x204043, - 0x2a3cc4, - 0x2ed38b, - 0x26dd48, - 0x2e4d48, - 0x4b349909, - 0x2a7dc9, - 0x2a8906, - 0x2a9d48, - 0x2a9f49, - 0x2aab46, - 0x2aacc5, - 0x3843c6, - 0x2ab5c9, - 0x331f47, - 0x23ea86, - 0x233747, - 0x2085c7, - 0x32c8c4, - 0x4b7b1d49, - 0x2cab88, - 0x368dc8, - 0x383447, - 0x2c5246, - 0x226ac9, - 0x209c87, - 0x32e90a, - 0x38c588, - 0x3af5c7, - 0x3b9786, - 0x24f38a, - 0x262708, - 0x2dccc5, - 0x226645, - 0x2ee487, - 0x2f7349, - 0x36510b, - 0x315008, - 0x3129c9, - 0x24bfc7, - 0x2b550c, - 0x2b5c4c, - 0x2b5f4a, - 0x2b61cc, - 0x2c2708, - 0x2c2908, - 0x2c2b04, - 0x2c2ec9, - 0x2c3109, - 0x2c334a, - 0x2c35c9, - 0x2c3907, - 0x3af00c, - 0x241146, - 0x34acc8, - 0x22c406, - 0x32e7c6, - 0x38d087, - 0x3b3288, - 0x39034b, - 0x2afa47, - 0x352489, - 0x3445c9, - 0x249ac7, - 0x278a04, - 0x265187, - 0x2db346, - 0x214a06, - 0x2f3485, - 0x2a5888, - 0x291444, - 0x291446, - 0x270a0b, - 0x21ca49, - 0x214b46, - 0x21c489, - 0x3b3f46, - 0x254688, - 0x223b83, - 0x2f6b45, - 0x22edc9, - 0x261145, - 0x2f9684, - 0x272206, - 0x231545, - 0x228f86, - 0x3056c7, - 0x26e986, - 0x3a304b, - 0x22ea87, - 0x3379c6, - 0x346f06, - 0x384ec6, - 0x28fa09, - 0x2ef14a, - 0x2b3505, - 0x2170cd, - 0x2a3b46, - 0x235546, - 0x2b4e86, - 0x2ed045, - 0x2de9c7, - 0x2e14c7, - 0x3581ce, - 0x209703, - 0x2c5209, - 0x391dc9, - 0x37b9c7, - 0x358f07, - 0x29d645, - 0x27ec45, - 0x4ba2a88f, - 0x2ccec7, - 0x2cd088, - 0x2cd484, - 0x2cde46, - 0x4be44c42, - 0x2d2186, - 0x2d43c6, - 0x391f8e, - 0x2df90a, - 0x357b06, - 0x285eca, - 0x203549, - 0x324105, - 0x398008, - 0x3b5606, - 0x38cec8, - 0x26f088, - 0x28eb8b, - 0x233e05, - 0x200c88, - 0x207d4c, - 0x2bd507, - 0x24ae06, - 0x2e28c8, - 0x20a948, - 0x4c208442, - 0x20a48b, - 0x282549, - 0x329f09, - 0x3bb287, - 0x20f7c8, - 0x4c61bf48, - 0x3511cb, - 0x37e0c9, - 0x234fcd, - 0x2750c8, - 0x224a48, - 0x4ca03ec2, - 0x20e3c4, - 0x4ce1a2c2, - 0x2f4ec6, - 0x4d2004c2, - 0x3813ca, - 0x21c346, - 0x285908, - 0x284488, - 0x2af446, - 0x22d8c6, - 0x2f12c6, - 0x2a3185, - 0x238c04, - 0x4d61e144, - 0x205146, - 0x272707, - 0x4dae8bc7, - 0x35490b, - 0x319b09, - 0x32ae4a, - 0x391804, - 0x33e0c8, - 0x23e84d, - 0x2eb709, - 0x2eb948, - 0x2ebfc9, - 0x2ed844, - 0x243484, - 0x27c885, - 0x317b4b, - 0x26dcc6, - 0x3424c5, - 0x250149, - 0x234008, - 0x2047c4, - 0x37b749, - 0x208105, - 0x2bfb88, - 0x33e907, - 0x33c508, - 0x27d946, - 0x35e387, - 0x292349, - 0x2286c9, - 0x2492c5, - 0x334ec5, - 0x4de2d902, - 0x333c04, - 0x2049c5, - 0x32c146, - 0x318385, - 0x2b1ac7, - 0x205245, - 0x272d04, - 0x3a6586, - 0x23cb47, - 0x232986, - 0x2dca05, - 0x203188, - 0x234585, - 0x2062c7, - 0x20f1c9, - 0x21cb8a, - 0x2e1b87, - 0x2e1b8c, - 0x320906, - 0x343cc9, - 0x23b385, - 0x23b648, - 0x210803, - 0x210805, - 0x2e8a05, - 0x261607, - 0x4e20c002, - 0x22d0c7, - 0x2e4f06, - 0x342786, - 0x2e7d06, - 0x20a886, - 0x208388, - 0x241c85, - 0x2e5507, - 0x2e550d, - 0x201543, - 0x21ec05, - 0x201547, - 0x22d408, - 0x201105, - 0x218c88, - 0x36c0c6, - 0x32b9c7, - 0x2c4785, - 0x233e86, - 0x26f5c5, - 0x21390a, - 0x2f2e06, - 0x377ac7, - 0x2ca505, - 0x3612c7, - 0x36d6c4, - 0x2f9606, - 0x2fb3c5, - 0x32648b, - 0x2db1c9, - 0x2bb24a, - 0x249348, - 0x301d08, - 0x304a4c, - 0x306287, - 0x3073c8, - 0x310a48, - 0x31e945, - 0x34020a, - 0x35c3c9, - 0x4e600802, - 0x200806, - 0x219d04, - 0x2ea849, - 0x220b49, - 0x269287, - 0x294947, - 0x37e789, - 0x38cb48, - 0x38cb4f, - 0x315d06, - 0x2d670b, - 0x36e8c5, - 0x36e8c7, - 0x385889, - 0x212ac6, - 0x37b6c7, - 0x2d8905, - 0x2303c4, - 0x261006, - 0x211ac4, - 0x2ce4c7, - 0x307048, - 0x4eaf68c8, - 0x2f7085, - 0x2f71c7, - 0x236549, - 0x23e284, - 0x23e288, - 0x4ee2b888, - 0x279444, - 0x231388, - 0x32fdc4, - 0x3ab849, - 0x2173c5, - 0x4f20b0c2, - 0x315d45, - 0x2e4345, - 0x251288, - 0x232e47, - 0x4f601442, - 0x204785, - 0x2cf606, - 0x24b106, - 0x333bc8, - 0x302108, - 0x318346, - 0x327f06, - 0x2e2e49, - 0x3426c6, - 0x21298b, - 0x296305, - 0x368106, - 0x377088, - 0x250506, - 0x292cc6, - 0x21914a, - 0x23084a, - 0x245005, - 0x241d47, - 0x308786, - 0x4fa01682, - 0x201687, - 0x238705, - 0x243a44, - 0x243a45, - 0x391706, - 0x26a447, - 0x219a85, - 0x220c04, - 0x2c7e88, - 0x292d85, - 0x333a47, - 0x3a1645, - 0x213845, - 0x256e04, - 0x287609, - 0x2fae48, - 0x2e0286, - 0x2d9d06, - 0x2b6e46, - 0x4fefbc88, - 0x2fbe87, - 0x2fc0cd, - 0x2fcb4c, - 0x2fd149, - 0x2fd389, - 0x5035b2c2, - 0x3a8603, - 0x207943, - 0x2db405, - 0x39280a, - 0x327dc6, - 0x302385, - 0x305884, - 0x30588b, - 0x31b70c, - 0x31c14c, - 0x31c455, - 0x31d74d, - 0x320a8f, - 0x320e52, - 0x3212cf, - 0x321692, - 0x321b13, - 0x321fcd, - 0x32258d, - 0x32290e, - 0x322e8e, - 0x3236cc, - 0x323a8c, - 0x323ecb, - 0x32424e, - 0x325392, - 0x327b8c, - 0x328790, - 0x335212, - 0x33640c, - 0x336acd, - 0x336e0c, - 0x339a51, - 0x33a90d, - 0x34084d, - 0x340e4a, - 0x3410cc, - 0x3419cc, - 0x3421cc, - 0x34290c, - 0x344dd3, - 0x345450, - 0x345850, - 0x34610d, - 0x34670c, - 0x347309, - 0x34890d, - 0x348c53, - 0x34a311, - 0x34a753, - 0x34b24f, - 0x34b60c, - 0x34b90f, - 0x34bccd, - 0x34c2cf, - 0x34c690, - 0x34d10e, - 0x3539ce, - 0x353f50, - 0x35518d, - 0x355b0e, - 0x355e8c, - 0x356e93, - 0x35934e, - 0x3599d0, - 0x359dd1, - 0x35a20f, - 0x35a5d3, - 0x35ae4d, - 0x35b18f, - 0x35b54e, - 0x35bc10, - 0x35c009, - 0x35cd90, - 0x35d38f, - 0x35da0f, - 0x35ddd2, - 0x35efce, - 0x35fc4d, - 0x36070d, - 0x360a4d, - 0x36184d, - 0x361b8d, - 0x361ed0, - 0x3622cb, - 0x36324c, - 0x3635cc, - 0x363bcc, - 0x363ece, - 0x371a10, - 0x372dd2, - 0x37324b, - 0x3738ce, - 0x373c4e, - 0x3744ce, - 0x37494b, - 0x50774f56, - 0x37624d, - 0x3766d4, - 0x377e0d, - 0x37b115, - 0x37c40d, - 0x37cd8f, - 0x37d5cf, - 0x38250f, - 0x3828ce, - 0x382e4d, - 0x383f91, - 0x38674c, - 0x386a4c, - 0x386d4b, - 0x38764c, - 0x387a0f, - 0x387dd2, - 0x38878d, - 0x38974c, - 0x389bcc, - 0x389ecd, - 0x38a20f, - 0x38a5ce, - 0x3924cc, - 0x392a8d, - 0x392dcb, - 0x39358c, - 0x393b0d, - 0x393e4e, - 0x3941c9, - 0x394d13, - 0x39524d, - 0x39558d, - 0x395b8c, - 0x39600e, - 0x396fcf, - 0x39738c, - 0x39768d, - 0x3979cf, - 0x397d8c, - 0x39848c, - 0x39890c, - 0x398c0c, - 0x3992cd, - 0x399612, - 0x399c8c, - 0x399f8c, - 0x39a291, - 0x39a6cf, - 0x39aa8f, - 0x39ae53, - 0x39bcce, - 0x39c04f, - 0x39c40c, - 0x50b9c74e, - 0x39cacf, - 0x39ce96, - 0x39dc12, - 0x39f38c, - 0x39fd0f, - 0x3a038d, - 0x3a06cf, - 0x3a0a8c, - 0x3a0d8d, - 0x3a10cd, - 0x3a254e, - 0x3a4b8c, - 0x3a4e8c, - 0x3a5190, - 0x3a7a91, - 0x3a7ecb, - 0x3a820c, - 0x3a850e, - 0x3aa811, - 0x3aac4e, - 0x3aafcd, - 0x3b53cb, - 0x3b5e8f, - 0x3b6d94, - 0x228782, - 0x228782, - 0x200c83, - 0x228782, - 0x200c83, - 0x228782, - 0x205142, - 0x384405, - 0x3aa50c, - 0x228782, - 0x228782, - 0x205142, - 0x228782, - 0x294545, - 0x21cb85, - 0x228782, - 0x228782, - 0x20b382, - 0x294545, - 0x31f3c9, - 0x34a00c, - 0x228782, - 0x228782, - 0x228782, - 0x228782, - 0x384405, - 0x228782, - 0x228782, - 0x228782, - 0x228782, - 0x20b382, - 0x31f3c9, - 0x228782, - 0x228782, - 0x228782, - 0x21cb85, - 0x228782, - 0x21cb85, - 0x34a00c, - 0x3aa50c, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x205503, - 0x200983, - 0x2708, - 0x5fc84, - 0xe0e08, - 0x205702, - 0x51a099c2, - 0x23dbc3, - 0x24f2c4, - 0x2032c3, - 0x393304, - 0x22f706, - 0x20e883, - 0x3328c4, - 0x286bc5, - 0x209703, - 0x205503, - 0x200983, - 0x255cca, - 0x2efec6, - 0x373fcc, - 0x16d208, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x227f83, - 0x2d43c6, - 0x205503, - 0x200983, - 0x201303, - 0xa4508, - 0x129845, - 0x14902, - 0x52f86185, - 0x21347, - 0xc93c8, - 0xec0e, - 0x88192, - 0xfe20b, - 0x532d6a45, - 0x536d6a4c, - 0xb007, - 0x16fc07, - 0x1b254a, - 0x3a6d0, - 0x149c05, - 0xd95cb, - 0x68bc8, - 0x38f47, - 0x304cb, - 0x4efc9, - 0x11dd07, - 0x2c07, - 0x73587, - 0x1c106, - 0xd4ac8, - 0x53c1cdc6, - 0xa8a0d, - 0x1b1f10, - 0x5402bb82, - 0x9688, - 0x4a450, - 0x14434c, - 0x5474e88d, - 0x655c7, - 0x78749, - 0x52e06, - 0x940c8, - 0x67e42, - 0x9f54a, - 0x27f07, - 0x35fc7, - 0xa4909, - 0xa6388, - 0xb9b45, - 0xec50e, - 0xb54e, - 0xdecf, - 0x11809, - 0xbb9c9, - 0x43e4b, - 0x7664f, - 0x8780c, - 0x9ef4b, - 0xbbf48, - 0x154807, - 0xcdc48, - 0xfb80b, - 0xf568c, - 0xf640c, - 0xf908c, - 0xfe68d, - 0x17e248, - 0xeab02, - 0x113a49, - 0x185d4b, - 0xc5446, - 0x116fcb, - 0xd804a, - 0xd8c05, - 0xde6d0, - 0x111806, - 0x192345, - 0xe3f48, - 0xe9187, - 0xe9447, - 0xff487, - 0xf4d0a, - 0xc924a, - 0x5d306, - 0x91a0d, - 0x86ec8, - 0x55b08, - 0x56d49, - 0xb3c45, - 0xf484c, - 0xfe88b, - 0x165044, - 0xfaa89, - 0xfacc6, - 0x1af7c6, - 0x2dc2, - 0x125c86, - 0x107247, - 0x7b02, - 0xc83c5, - 0x29544, - 0x1ec1, - 0x4c983, - 0x53a85146, - 0x94443, - 0xd882, - 0x27f04, - 0x242, - 0x5ef44, - 0x3dc2, - 0x8142, - 0x2502, - 0x10f242, - 0x1ec2, - 0xd6a42, - 0x4142, - 0x1b102, - 0x2cd82, - 0x5742, - 0xdc2, - 0xf882, - 0x32403, - 0x5f02, - 0x7c2, - 0x18342, - 0xfc82, - 0x5e82, - 0x1ae02, - 0x17f42, - 0x15c2, - 0x29c2, - 0x1fc2, - 0x44183, - 0x3942, - 0x6502, - 0xafd42, - 0xbe02, - 0x282, - 0x4bc2, - 0x1f42, - 0xa8542, - 0x2342, - 0x152bc2, - 0x675c2, - 0x2c82, - 0x5503, - 0x8c2, - 0x8442, - 0x33c2, - 0xb482, - 0x49245, - 0xba02, - 0x2d4c2, - 0x3c083, - 0x482, - 0x1c42, - 0x27c2, - 0x3902, - 0x1102, - 0x1442, - 0xc2, - 0x2dc2, - 0x9885, - 0x75c47, - 0x212503, - 0x205702, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x20ad83, - 0x227f83, - 0x205503, - 0x204e83, - 0x200983, - 0x294483, - 0x169c3, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x209703, - 0x205503, - 0x204e83, - 0x200983, - 0x2a84c3, - 0x232403, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x200041, - 0x209703, - 0x205503, - 0x21c2c3, - 0x200983, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x209683, - 0x2163c3, - 0x277dc3, - 0x280b83, - 0x21c303, - 0x252c03, - 0x2e9dc3, - 0x3b1384, - 0x205503, - 0x200983, - 0x25ed03, - 0x352e84, - 0x231a03, - 0x30c3, - 0x228483, - 0x37a908, - 0x24f3c4, - 0x3b870a, - 0x2b8ec6, - 0x1b6a04, - 0x39b2c7, - 0x21e7ca, - 0x315bc9, - 0x3ab587, - 0x3b724a, - 0x38d2c3, - 0x2e678b, - 0x2b9fc9, - 0x2bd645, - 0x2d1fc7, - 0x99c2, - 0x2a84c3, - 0x205747, - 0x2e2b85, - 0x2dbdc9, - 0x232403, - 0x233c06, - 0x2c1a43, - 0xdb283, - 0x104e46, - 0x18ec46, - 0xe807, - 0x212e46, - 0x21b185, - 0x282407, - 0x2d5b87, - 0x56ae9dc3, - 0x336647, - 0x365e03, - 0x206a05, - 0x3b1384, - 0x220688, - 0x38644c, - 0x2ad745, - 0x29c986, - 0x205607, - 0x38b907, - 0x238347, - 0x245108, - 0x303b8f, - 0x315e05, - 0x23dcc7, - 0x26f287, - 0x2a3e0a, - 0x2d2809, - 0x304f85, - 0x30664a, - 0x82a06, - 0x2c1ac5, - 0x374b84, - 0x2843c6, - 0x2f1d47, - 0x2eaa07, - 0x3bb408, - 0x22dc85, - 0x2e2a86, - 0x214305, - 0x3adcc5, - 0x21c984, - 0x2af347, - 0x2081ca, - 0x334808, - 0x35ba86, - 0x27f83, - 0x2da905, - 0x25f906, - 0x3af246, - 0x392246, - 0x209703, - 0x388a07, - 0x26f205, - 0x205503, - 0x2d830d, - 0x204e83, - 0x3bb508, - 0x27f404, - 0x272fc5, - 0x2a3d06, - 0x234d46, - 0x368007, - 0x2a6ec7, - 0x267345, - 0x200983, - 0x21fbc7, - 0x2788c9, - 0x311a49, - 0x22708a, - 0x243002, - 0x2069c4, - 0x2e5084, - 0x390207, - 0x22cf88, - 0x2ea2c9, - 0x21eac9, - 0x2eaf47, - 0x2ba486, - 0xec286, - 0x2ed844, - 0x2ede4a, - 0x2f0d48, - 0x2f1189, - 0x2bdbc6, - 0x2b1445, - 0x3346c8, - 0x2c5f8a, - 0x22ed03, - 0x353006, - 0x2eb047, - 0x223ec5, - 0x3a5e05, - 0x264b83, - 0x250cc4, - 0x226605, - 0x281b07, - 0x2faf85, - 0x2ee346, - 0xfc605, - 0x247d83, - 0x357bc9, - 0x272d8c, - 0x29344c, - 0x2ced08, - 0x293087, - 0x2f7908, - 0x2f7c4a, - 0x2f888b, - 0x2ba108, - 0x234e48, - 0x239586, - 0x390d45, - 0x38da4a, - 0x3a6205, - 0x20b0c2, - 0x2c4647, - 0x25fe86, - 0x35c8c5, - 0x370809, - 0x2f39c5, - 0x27e985, - 0x2ddf09, - 0x351846, - 0x237e88, - 0x33f383, - 0x20f486, - 0x272146, - 0x306445, - 0x306449, - 0x2b6789, - 0x279ac7, - 0x109104, - 0x309107, - 0x21e9c9, - 0x238d05, - 0x413c8, - 0x3b2e85, - 0x330e85, - 0x380509, - 0x201702, - 0x25e544, - 0x201e82, - 0x203942, - 0x31ecc5, - 0x3b6788, - 0x2b3b85, - 0x2c3ac3, - 0x2c3ac5, - 0x2d2383, - 0x20f442, - 0x377804, - 0x2ac783, - 0x2056c2, - 0x379884, - 0x2e5d43, - 0x2082c2, - 0x2b3c03, - 0x28d084, - 0x2e4c83, - 0x248684, - 0x203082, - 0x218943, - 0x22ef03, - 0x200d02, - 0x361782, - 0x2b65c9, - 0x207842, - 0x288d04, - 0x203cc2, - 0x334544, - 0x2ba444, - 0x2b74c4, - 0x202dc2, - 0x2391c2, - 0x225bc3, - 0x2f8403, - 0x23d904, - 0x281c84, - 0x2eb1c4, - 0x2f0f04, - 0x30a483, - 0x26e543, - 0x282984, - 0x30a2c4, - 0x30aac6, - 0x22a282, - 0x2099c2, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x205702, - 0x38d2c3, - 0x2a84c3, - 0x232403, - 0x2007c3, - 0x2e9dc3, - 0x3b1384, - 0x2b6884, - 0x211cc4, - 0x205503, - 0x200983, - 0x201303, - 0x2ee644, - 0x31a403, - 0x2bd0c3, - 0x34ab84, - 0x3b2c86, - 0x202f03, - 0x16fc07, - 0x222403, - 0x2459c3, - 0x2b0543, - 0x206a43, - 0x227f83, - 0x2d6cc5, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x282c43, - 0x2a5143, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x244183, - 0x205503, - 0x23a504, - 0x200983, - 0x26bec4, - 0x2bf145, - 0x16fc07, - 0x2099c2, - 0x2006c2, - 0x20d882, - 0x200c82, - 0x200442, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x211cc4, - 0x205503, - 0x200983, - 0x214843, - 0x25ef44, - 0x16d208, - 0x2a84c3, - 0x204e83, - 0x169c3, - 0x2030c4, - 0x16d208, - 0x2a84c3, - 0x249944, - 0x3b1384, - 0x204e83, - 0x203ec2, - 0x200983, - 0x23e743, - 0x50cc4, - 0x373605, - 0x20b0c2, - 0x30a403, - 0x205702, - 0x16d208, - 0x2099c2, - 0x232403, - 0x2e9dc3, - 0x201fc2, - 0x200983, - 0x205702, - 0x1b7407, - 0x12e3c9, - 0x6f83, - 0x16d208, - 0x18ebc3, - 0x5a31fd87, - 0xa84c3, - 0x708, - 0x232403, - 0x2e9dc3, - 0x1ae886, - 0x244183, - 0x8f2c8, - 0xc0e08, - 0x41a46, - 0x209703, - 0xca988, - 0xb1b43, - 0xdf145, - 0x32607, - 0x8003, - 0x174c0a, - 0x11ed83, - 0x308d44, - 0x10398b, - 0x103f48, - 0x8d742, - 0x205702, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2d5f04, - 0x2e9dc3, - 0x244183, - 0x209703, - 0x205503, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x227f83, - 0x205503, - 0x200983, - 0x21aa03, - 0x214843, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x169c3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x227f83, - 0x205503, - 0x200983, - 0x212982, - 0x200141, - 0x205702, - 0x200001, - 0x320b82, - 0x16d208, - 0x21d445, - 0x201ec1, - 0xa84c3, - 0x200701, - 0x200301, - 0x200081, - 0x298602, - 0x36c044, - 0x384383, - 0x200181, - 0x200401, - 0x200041, - 0x200101, - 0x2e9907, - 0x2eab8f, - 0x340446, - 0x200281, - 0x37f6c6, - 0x200e81, - 0x2008c1, - 0x332a0e, - 0x200441, - 0x200983, - 0x201301, - 0x270e85, - 0x20f942, - 0x264a85, - 0x200341, - 0x200801, - 0x2002c1, - 0x20b0c2, - 0x2000c1, - 0x200201, - 0x200bc1, - 0x2005c1, - 0x201cc1, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x219503, - 0x2a84c3, - 0x2e9dc3, - 0x8d688, - 0x209703, - 0x205503, - 0x20803, - 0x200983, - 0x14e7e88, - 0x16d208, - 0x44e04, - 0x14e7e8a, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x205503, - 0x200983, - 0x2030c3, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2d5f04, - 0x200983, - 0x27a305, - 0x33b804, - 0x2a84c3, - 0x205503, - 0x200983, - 0x225ca, - 0xd5284, - 0x10c9c6, - 0x2099c2, - 0x2a84c3, - 0x230309, - 0x232403, - 0x3034c9, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x2ed648, - 0x22ca47, - 0x373605, - 0x18ed88, - 0x1b7407, - 0x2d20a, - 0xecb, - 0x4ab87, - 0x3d2c8, - 0x1b1b8a, - 0x10a48, - 0x12e3c9, - 0x264c7, - 0x3be87, - 0x152b08, - 0x708, - 0x3df8f, - 0x11d85, - 0xa07, - 0x1ae886, - 0x137607, - 0x3d586, - 0x8f2c8, - 0xa5606, - 0x151647, - 0x19c9, - 0x1aa1c7, - 0xa46c9, - 0xb4a09, - 0xbeec6, - 0xc0e08, - 0xbfcc5, - 0x4eb4a, - 0xca988, - 0xb1b43, - 0xd2648, - 0x32607, - 0x6d505, - 0x69c50, - 0x8003, - 0x1aa047, - 0x15ec5, - 0xe9748, - 0x13ce05, - 0x11ed83, - 0x6fd48, - 0xcd46, - 0x42849, - 0xaa147, - 0x6fa0b, - 0x14ac44, - 0xfa544, - 0x10398b, - 0x103f48, - 0x104d47, - 0x129845, - 0x2a84c3, - 0x232403, - 0x2163c3, - 0x200983, - 0x22a403, - 0x2e9dc3, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x43f8b, - 0x205702, - 0x2099c2, - 0x200983, - 0x16d208, - 0x205702, - 0x2099c2, - 0x20d882, - 0x201fc2, - 0x203d02, - 0x205503, - 0x200442, - 0x205702, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x20d882, - 0x2e9dc3, - 0x244183, - 0x209703, - 0x211cc4, - 0x205503, - 0x216b03, - 0x200983, - 0x308d44, - 0x25ed03, - 0x2e9dc3, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x204e83, - 0x200983, - 0x39f847, - 0x2a84c3, - 0x2614c7, - 0x2c7ac6, - 0x219203, - 0x218343, - 0x2e9dc3, - 0x2143c3, - 0x3b1384, - 0x37ef04, - 0x31ea46, - 0x20d143, - 0x205503, - 0x200983, - 0x27a305, - 0x318284, - 0x3b2a43, - 0x38b743, - 0x2c4647, - 0x33e885, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x28e87, - 0x205942, - 0x287003, - 0x2bf143, - 0x38d2c3, - 0x626a84c3, - 0x202242, - 0x232403, - 0x2032c3, - 0x2e9dc3, - 0x3b1384, - 0x353903, - 0x315e03, - 0x209703, - 0x211cc4, - 0x62a04642, - 0x205503, - 0x200983, - 0x2082c3, - 0x229543, - 0x212982, - 0x25ed03, - 0x16d208, - 0x2e9dc3, - 0x169c3, - 0x26f744, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x235ac4, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x282104, - 0x210444, - 0x2d43c6, - 0x211cc4, - 0x205503, - 0x200983, - 0x201303, - 0x25fe86, - 0x13f08b, - 0x1cdc6, - 0x5eb4a, - 0x107e4a, - 0x16d208, - 0x2142c4, - 0x63ea84c3, - 0x38d284, - 0x232403, - 0x256e84, - 0x2e9dc3, - 0x391683, - 0x209703, - 0x205503, - 0x200983, - 0x56243, - 0x32f78b, - 0x3a140a, - 0x3b9bcc, - 0xda688, - 0x205702, - 0x2099c2, - 0x20d882, - 0x2a9305, - 0x3b1384, - 0x202342, - 0x209703, - 0x210444, - 0x200c82, - 0x200442, - 0x209842, - 0x212982, - 0x18d2c3, - 0x19f02, - 0x2a1cc9, - 0x25d548, - 0x309a89, - 0x337449, - 0x23490a, - 0x23634a, - 0x20cc02, - 0x21b102, - 0x99c2, - 0x2a84c3, - 0x204682, - 0x23de86, - 0x35d882, - 0x201242, - 0x20124e, - 0x21898e, - 0x27b107, - 0x205487, - 0x275d02, - 0x232403, - 0x2e9dc3, - 0x200042, - 0x201fc2, - 0x4a5c3, - 0x2eec0f, - 0x200f42, - 0x32c787, - 0x2c7d07, - 0x2d3907, - 0x2ad24c, - 0x3151cc, - 0x3a3a44, - 0x27c6ca, - 0x2188c2, - 0x20be02, - 0x2b6fc4, - 0x2226c2, - 0x2c2702, - 0x315404, - 0x20cec2, - 0x200282, - 0x6343, - 0x2a5687, - 0x2352c5, - 0x201f42, - 0x2eeb84, - 0x352bc2, - 0x2da248, - 0x205503, - 0x3b0208, - 0x200d42, - 0x233385, - 0x3b04c6, - 0x200983, - 0x20ba02, - 0x2ea507, - 0xf942, - 0x26b005, - 0x3a9f45, - 0x201642, - 0x242b02, - 0x3b7a8a, - 0x2671ca, - 0x202c42, - 0x2e4744, - 0x2002c2, - 0x206888, - 0x201c82, - 0x30a848, - 0x2feb47, - 0x2ff649, - 0x26b082, - 0x305645, - 0x33bc85, - 0x22dd4b, - 0x2c6c4c, - 0x22e848, - 0x3188c8, - 0x22a282, - 0x35f782, - 0x205702, - 0x16d208, - 0x2099c2, - 0x2a84c3, - 0x20d882, - 0x200c82, - 0x200442, - 0x200983, - 0x209842, - 0x205702, - 0x652099c2, - 0x656e9dc3, - 0x206343, - 0x202342, - 0x205503, - 0x375cc3, - 0x200983, - 0x2e87c3, - 0x275d46, - 0x1614843, - 0x16d208, - 0x192345, - 0xa6a8d, - 0xa4dca, - 0x65c87, - 0x65e011c2, - 0x66200242, - 0x66600ec2, - 0x66a00c02, - 0x66e0de02, - 0x67201ec2, - 0x16fc07, - 0x676099c2, - 0x67a301c2, - 0x67e09982, - 0x68200dc2, - 0x218983, - 0x9e04, - 0x225d83, - 0x686149c2, - 0x68a00182, - 0x49f47, - 0x68e03002, - 0x69202e42, - 0x69600b42, - 0x69a02bc2, - 0x69e029c2, - 0x6a201fc2, - 0xb3985, - 0x234543, - 0x202b84, - 0x6a6226c2, - 0x6aa03a82, - 0x6ae03202, - 0x16c90b, - 0x6b200e82, - 0x6ba49a02, - 0x6be02342, - 0x6c203d02, - 0x6c60f242, - 0x6ca0ec42, - 0x6ce0e602, - 0x6d2675c2, - 0x6d604642, - 0x6da01b42, - 0x6de00c82, - 0x6e2042c2, - 0x6e61c702, - 0x6ea00e42, - 0x7f1c4, - 0x350703, - 0x6ee33082, - 0x6f216982, - 0x6f603402, - 0x6fa089c2, - 0x6fe00442, - 0x702056c2, - 0x44107, - 0x70601302, - 0x70a07302, - 0x70e09842, - 0x71218942, - 0xf484c, - 0x71621c82, - 0x71a3ab02, - 0x71e11602, - 0x72201682, - 0x72601f82, - 0x72a34a82, - 0x72e00202, - 0x7320e8c2, - 0x736724c2, - 0x73a56642, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0xa203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x6b753903, - 0x20a203, - 0x2d6d44, - 0x25d446, - 0x2f1743, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x353903, - 0x20a203, - 0x219f02, - 0x219f02, - 0x353903, - 0x20a203, - 0x742a84c3, - 0x232403, - 0x37ac03, - 0x209703, - 0x205503, - 0x200983, - 0x16d208, - 0x2099c2, - 0x2a84c3, - 0x205503, - 0x200983, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x209703, - 0x205503, - 0x200983, - 0x2030c4, - 0x2099c2, - 0x2a84c3, - 0x2028c3, - 0x232403, - 0x249944, - 0x2163c3, - 0x2e9dc3, - 0x3b1384, - 0x244183, - 0x209703, - 0x205503, - 0x200983, - 0x23e743, - 0x373605, - 0x2a1fc3, - 0x25ed03, - 0x2099c2, - 0x2a84c3, - 0x353903, - 0x205503, - 0x200983, - 0x205702, - 0x38d2c3, - 0x16d208, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x22f706, - 0x3b1384, - 0x244183, - 0x211cc4, - 0x205503, - 0x200983, - 0x201303, - 0x2a84c3, - 0x232403, - 0x205503, - 0x200983, - 0x14bb147, - 0x2a84c3, - 0x1cdc6, - 0x232403, - 0x2e9dc3, - 0xdba46, - 0x205503, - 0x200983, - 0x3149c8, - 0x318709, - 0x328b89, - 0x333808, - 0x37dc48, - 0x37dc49, - 0x24318d, - 0x2ee80f, - 0x251490, - 0x34848d, - 0x3638cc, - 0x37f98b, - 0x98605, - 0x205702, - 0x33e6c5, - 0x200243, - 0x772099c2, - 0x232403, - 0x2e9dc3, - 0x343ec7, - 0x206a43, - 0x209703, - 0x205503, - 0x21c2c3, - 0x20dcc3, - 0x204e83, - 0x200983, - 0x2efec6, - 0x20b0c2, - 0x25ed03, - 0x16d208, - 0x205702, - 0x38d2c3, - 0x2099c2, - 0x2a84c3, - 0x232403, - 0x2e9dc3, - 0x3b1384, - 0x209703, - 0x205503, - 0x200983, - 0x214843, - 0x14f53c6, - 0x205702, - 0x2099c2, - 0x2e9dc3, - 0x209703, - 0x200983, -} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [ 1 bits] unused -// [ 1 bits] wildcard bit -// [ 2 bits] node type -// [14 bits] high nodes index (exclusive) of children -// [14 bits] low nodes index (inclusive) of children -var children = [...]uint32{ - 0x0, - 0x10000000, - 0x20000000, - 0x40000000, - 0x50000000, - 0x60000000, - 0x184c60d, - 0x1850613, - 0x1870614, - 0x19cc61c, - 0x19e0673, - 0x19f4678, - 0x1a0467d, - 0x1a20681, - 0x1a24688, - 0x1a3c689, - 0x1a6468f, - 0x1a68699, - 0x1a8069a, - 0x1a846a0, - 0x1a886a1, - 0x1ab06a2, - 0x1ab46ac, - 0x21abc6ad, - 0x1b046af, - 0x1b086c1, - 0x1b286c2, - 0x1b3c6ca, - 0x1b406cf, - 0x1b706d0, - 0x1b8c6dc, - 0x1bb46e3, - 0x1bc06ed, - 0x1bc46f0, - 0x1c5c6f1, - 0x1c70717, - 0x1c8471c, - 0x1cb4721, - 0x1cc472d, - 0x1cd8731, - 0x1cfc736, - 0x1e3473f, - 0x1e3878d, - 0x1ea478e, - 0x1f107a9, - 0x1f247c4, - 0x1f387c9, - 0x1f407ce, - 0x1f507d0, - 0x1f547d4, - 0x1f6c7d5, - 0x1fb87db, - 0x1fd47ee, - 0x1fd87f5, - 0x1fdc7f6, - 0x1fe87f7, - 0x20247fa, - 0x62028809, - 0x203c80a, - 0x205080f, - 0x2054814, - 0x2064815, - 0x2114819, - 0x2118845, - 0x22124846, - 0x2212c849, - 0x216484b, - 0x2168859, - 0x25b885a, - 0x2265896e, - 0x2265c996, - 0x22660997, - 0x2266c998, - 0x2267099b, - 0x2267c99c, - 0x2268099f, - 0x226849a0, - 0x226889a1, - 0x2268c9a2, - 0x226909a3, - 0x2269c9a4, - 0x226a09a7, - 0x226ac9a8, - 0x226b09ab, - 0x226b49ac, - 0x226b89ad, - 0x226c49ae, - 0x226c89b1, - 0x226cc9b2, - 0x226d09b3, - 0x26d49b4, - 0x226d89b5, - 0x226e49b6, - 0x226e89b9, - 0x26f09ba, - 0x227089bc, - 0x2270c9c2, - 0x27189c3, - 0x2271c9c6, - 0x27209c7, - 0x227249c8, - 0x27409c9, - 0x27589d0, - 0x275c9d6, - 0x276c9d7, - 0x27749db, - 0x27a89dd, - 0x27ac9ea, - 0x27bc9eb, - 0x28609ef, - 0x22864a18, - 0x286ca19, - 0x2870a1b, - 0x2888a1c, - 0x289ca22, - 0x28c4a27, - 0x28e4a31, - 0x2914a39, - 0x293ca45, - 0x2940a4f, - 0x2964a50, - 0x2968a59, - 0x297ca5a, - 0x2980a5f, - 0x2984a60, - 0x29a4a61, - 0x29c0a69, - 0x29c4a70, - 0x229c8a71, - 0x29cca72, - 0x29d0a73, - 0x29e0a74, - 0x29e4a78, - 0x2a5ca79, - 0x2a78a97, - 0x2a88a9e, - 0x2a9caa2, - 0x2ab4aa7, - 0x2ac8aad, - 0x2ae0ab2, - 0x2ae4ab8, - 0x2afcab9, - 0x2b14abf, - 0x2b30ac5, - 0x2b48acc, - 0x2ba8ad2, - 0x2bc0aea, - 0x2bc4af0, - 0x2bd8af1, - 0x2c1caf6, - 0x2c9cb07, - 0x2cc8b27, - 0x2cccb32, - 0x2cd4b33, - 0x2cf4b35, - 0x2cf8b3d, - 0x2d18b3e, - 0x2d20b46, - 0x2d5cb48, - 0x2d9cb57, - 0x2da0b67, - 0x2e00b68, - 0x2e04b80, - 0x22e08b81, - 0x2e20b82, - 0x2e44b88, - 0x2e64b91, - 0x3428b99, - 0x3434d0a, - 0x3454d0d, - 0x3610d15, - 0x36e0d84, - 0x3750db8, - 0x37a8dd4, - 0x3890dea, - 0x38e8e24, - 0x3924e3a, - 0x3a20e49, - 0x3aece88, - 0x3b84ebb, - 0x3c14ee1, - 0x3c78f05, - 0x3eb0f1e, - 0x3f68fac, - 0x4034fda, - 0x408100d, - 0x4109020, - 0x4145042, - 0x4195051, - 0x420d065, - 0x64211083, - 0x64215084, - 0x64219085, - 0x4295086, - 0x42f10a5, - 0x436d0bc, - 0x43e50db, - 0x44650f9, - 0x44d1119, - 0x45fd134, - 0x465517f, - 0x64659195, - 0x46f1196, - 0x47791bc, - 0x47c51de, - 0x482d1f1, - 0x48d520b, - 0x499d235, - 0x4a05267, - 0x4b19281, - 0x64b1d2c6, - 0x64b212c7, - 0x4b7d2c8, - 0x4bd92df, - 0x4c692f6, - 0x4ce531a, - 0x4d29339, - 0x4e0d34a, - 0x4e41383, - 0x4ea1390, - 0x4f153a8, - 0x4f9d3c5, - 0x4fdd3e7, - 0x504d3f7, - 0x65051413, - 0x65055414, - 0x25059415, - 0x5071416, - 0x508d41c, - 0x50d1423, - 0x50e1434, - 0x50f9438, - 0x517143e, - 0x517945c, - 0x518d45e, - 0x51a5463, - 0x51cd469, - 0x51d1473, - 0x51d9474, - 0x51ed476, - 0x520947b, - 0x520d482, - 0x5215483, - 0x5251485, - 0x5265494, - 0x526d499, - 0x527549b, - 0x527949d, - 0x529d49e, - 0x52c14a7, - 0x52d94b0, - 0x52dd4b6, - 0x52e54b7, - 0x52e94b9, - 0x534d4ba, - 0x53514d3, - 0x53754d4, - 0x53954dd, - 0x53b14e5, - 0x53c14ec, - 0x53d54f0, - 0x53d94f5, - 0x53e14f6, - 0x53f54f8, - 0x54054fd, - 0x5409501, - 0x5425502, - 0x5cb5509, - 0x5ced72d, - 0x5d1973b, - 0x5d31746, - 0x5d5174c, - 0x5d71754, - 0x5db575c, - 0x5dbd76d, - 0x25dc176f, - 0x25dc5770, - 0x5dcd771, - 0x5f29773, - 0x25f2d7ca, - 0x25f3d7cb, - 0x25f457cf, - 0x25f517d1, - 0x5f557d4, - 0x5f597d5, - 0x5f817d6, - 0x5fa97e0, - 0x5fad7ea, - 0x5fe57eb, - 0x5ff97f9, - 0x6b517fe, - 0x6b55ad4, - 0x6b59ad5, - 0x26b5dad6, - 0x6b61ad7, - 0x26b65ad8, - 0x6b69ad9, - 0x26b75ada, - 0x6b79add, - 0x6b7dade, - 0x26b81adf, - 0x6b85ae0, - 0x26b8dae1, - 0x6b91ae3, - 0x6b95ae4, - 0x26ba5ae5, - 0x6ba9ae9, - 0x6badaea, - 0x6bb1aeb, - 0x6bb5aec, - 0x26bb9aed, - 0x6bbdaee, - 0x6bc1aef, - 0x6bc5af0, - 0x6bc9af1, - 0x26bd1af2, - 0x6bd5af4, - 0x6bd9af5, - 0x6bddaf6, - 0x26be1af7, - 0x6be5af8, - 0x26bedaf9, - 0x26bf1afb, - 0x6c0dafc, - 0x6c19b03, - 0x6c59b06, - 0x6c5db16, - 0x6c81b17, - 0x6c85b20, - 0x6c89b21, - 0x6e01b22, - 0x26e05b80, - 0x26e0db81, - 0x26e11b83, - 0x26e15b84, - 0x6e1db85, - 0x6ef9b87, - 0x26efdbbe, - 0x6f01bbf, - 0x6f2dbc0, - 0x6f31bcb, - 0x6f51bcc, - 0x6f5dbd4, - 0x6f7dbd7, - 0x6fb5bdf, - 0x724dbed, - 0x7309c93, - 0x731dcc2, - 0x7351cc7, - 0x7381cd4, - 0x739dce0, - 0x73c1ce7, - 0x73ddcf0, - 0x73f9cf7, - 0x741dcfe, - 0x742dd07, - 0x7431d0b, - 0x7465d0c, - 0x7481d19, - 0x74edd20, - 0x274f1d3b, - 0x7515d3c, - 0x7535d45, - 0x7549d4d, - 0x755dd52, - 0x7561d57, - 0x7581d58, - 0x7625d60, - 0x7641d89, - 0x7661d90, - 0x7665d98, - 0x766dd99, - 0x7671d9b, - 0x7685d9c, - 0x76a5da1, - 0x76b1da9, - 0x76bddac, - 0x76eddaf, - 0x77bddbb, - 0x77c1def, - 0x77d5df0, - 0x77d9df5, - 0x77f1df6, - 0x77f5dfc, - 0x7801dfd, - 0x7805e00, - 0x7821e01, - 0x785de08, - 0x7861e17, - 0x7881e18, - 0x78d1e20, - 0x78ede34, - 0x7941e3b, - 0x7945e50, - 0x7949e51, - 0x794de52, - 0x7991e53, - 0x79a1e64, - 0x79dde68, - 0x79e1e77, - 0x7a11e78, - 0x7b59e84, - 0x7b7ded6, - 0x7ba9edf, - 0x7bb5eea, - 0x7bbdeed, - 0x7ccdeef, - 0x7cd9f33, - 0x7ce5f36, - 0x7cf1f39, - 0x7cfdf3c, - 0x7d09f3f, - 0x7d15f42, - 0x7d21f45, - 0x7d2df48, - 0x7d39f4b, - 0x7d45f4e, - 0x7d51f51, - 0x7d5df54, - 0x7d69f57, - 0x7d71f5a, - 0x7d7df5c, - 0x7d89f5f, - 0x7d95f62, - 0x7da1f65, - 0x7dadf68, - 0x7db9f6b, - 0x7dc5f6e, - 0x7dd1f71, - 0x7dddf74, - 0x7de9f77, - 0x7df5f7a, - 0x7e01f7d, - 0x7e0df80, - 0x7e19f83, - 0x7e25f86, - 0x7e31f89, - 0x7e3df8c, - 0x7e45f8f, - 0x7e51f91, - 0x7e5df94, - 0x7e69f97, - 0x7e75f9a, - 0x7e81f9d, - 0x7e8dfa0, - 0x7e99fa3, - 0x7ea5fa6, - 0x7eb1fa9, - 0x7ebdfac, - 0x7ec9faf, - 0x7ed5fb2, - 0x7ee1fb5, - 0x7ee9fb8, - 0x7ef5fba, - 0x7f01fbd, - 0x7f0dfc0, - 0x7f19fc3, - 0x7f25fc6, - 0x7f31fc9, - 0x7f3dfcc, - 0x7f49fcf, - 0x7f4dfd2, - 0x7f59fd3, - 0x7f71fd6, - 0x7f75fdc, - 0x7f85fdd, - 0x7f9dfe1, - 0x7fe1fe7, - 0x7ff5ff8, - 0x8029ffd, - 0x803a00a, - 0x805a00e, - 0x8072016, - 0x808a01c, - 0x808e022, - 0x280d2023, - 0x80d6034, - 0x8102035, - 0x8106040, - 0x811a041, -} - -// max children 479 (capacity 511) -// max text offset 28411 (capacity 32767) -// max text length 36 (capacity 63) -// max hi 8262 (capacity 16383) -// max lo 8257 (capacity 16383) diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table_test.go deleted file mode 100644 index 416512cb9..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/net/publicsuffix/table_test.go +++ /dev/null @@ -1,16474 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -var rules = [...]string{ - "ac", - "com.ac", - "edu.ac", - "gov.ac", - "net.ac", - "mil.ac", - "org.ac", - "ad", - "nom.ad", - "ae", - "co.ae", - "net.ae", - "org.ae", - "sch.ae", - "ac.ae", - "gov.ae", - "mil.ae", - "aero", - "accident-investigation.aero", - "accident-prevention.aero", - "aerobatic.aero", - "aeroclub.aero", - "aerodrome.aero", - "agents.aero", - "aircraft.aero", - "airline.aero", - "airport.aero", - "air-surveillance.aero", - "airtraffic.aero", - "air-traffic-control.aero", - "ambulance.aero", - "amusement.aero", - "association.aero", - "author.aero", - "ballooning.aero", - "broker.aero", - "caa.aero", - "cargo.aero", - "catering.aero", - "certification.aero", - "championship.aero", - "charter.aero", - "civilaviation.aero", - "club.aero", - "conference.aero", - "consultant.aero", - "consulting.aero", - "control.aero", - "council.aero", - "crew.aero", - "design.aero", - "dgca.aero", - "educator.aero", - "emergency.aero", - "engine.aero", - "engineer.aero", - "entertainment.aero", - "equipment.aero", - "exchange.aero", - "express.aero", - "federation.aero", - "flight.aero", - "freight.aero", - "fuel.aero", - "gliding.aero", - "government.aero", - "groundhandling.aero", - "group.aero", - "hanggliding.aero", - "homebuilt.aero", - "insurance.aero", - "journal.aero", - "journalist.aero", - "leasing.aero", - "logistics.aero", - "magazine.aero", - "maintenance.aero", - "media.aero", - "microlight.aero", - "modelling.aero", - "navigation.aero", - "parachuting.aero", - "paragliding.aero", - "passenger-association.aero", - "pilot.aero", - "press.aero", - "production.aero", - "recreation.aero", - "repbody.aero", - "res.aero", - "research.aero", - "rotorcraft.aero", - "safety.aero", - "scientist.aero", - "services.aero", - "show.aero", - "skydiving.aero", - "software.aero", - "student.aero", - "trader.aero", - "trading.aero", - "trainer.aero", - "union.aero", - "workinggroup.aero", - "works.aero", - "af", - "gov.af", - "com.af", - "org.af", - "net.af", - "edu.af", - "ag", - "com.ag", - "org.ag", - "net.ag", - "co.ag", - "nom.ag", - "ai", - "off.ai", - "com.ai", - "net.ai", - "org.ai", - "al", - "com.al", - "edu.al", - "gov.al", - "mil.al", - "net.al", - "org.al", - "am", - "ao", - "ed.ao", - "gv.ao", - "og.ao", - "co.ao", - "pb.ao", - "it.ao", - "aq", - "ar", - "com.ar", - "edu.ar", - "gob.ar", - "gov.ar", - "int.ar", - "mil.ar", - "musica.ar", - "net.ar", - "org.ar", - "tur.ar", - "arpa", - "e164.arpa", - "in-addr.arpa", - "ip6.arpa", - "iris.arpa", - "uri.arpa", - "urn.arpa", - "as", - "gov.as", - "asia", - "at", - "ac.at", - "co.at", - "gv.at", - "or.at", - "au", - "com.au", - "net.au", - "org.au", - "edu.au", - "gov.au", - "asn.au", - "id.au", - "info.au", - "conf.au", - "oz.au", - "act.au", - "nsw.au", - "nt.au", - "qld.au", - "sa.au", - "tas.au", - "vic.au", - "wa.au", - "act.edu.au", - "nsw.edu.au", - "nt.edu.au", - "qld.edu.au", - "sa.edu.au", - "tas.edu.au", - "vic.edu.au", - "wa.edu.au", - "qld.gov.au", - "sa.gov.au", - "tas.gov.au", - "vic.gov.au", - "wa.gov.au", - "aw", - "com.aw", - "ax", - "az", - "com.az", - "net.az", - "int.az", - "gov.az", - "org.az", - "edu.az", - "info.az", - "pp.az", - "mil.az", - "name.az", - "pro.az", - "biz.az", - "ba", - "com.ba", - "edu.ba", - "gov.ba", - "mil.ba", - "net.ba", - "org.ba", - "bb", - "biz.bb", - "co.bb", - "com.bb", - "edu.bb", - "gov.bb", - "info.bb", - "net.bb", - "org.bb", - "store.bb", - "tv.bb", - "*.bd", - "be", - "ac.be", - "bf", - "gov.bf", - "bg", - "a.bg", - "b.bg", - "c.bg", - "d.bg", - "e.bg", - "f.bg", - "g.bg", - "h.bg", - "i.bg", - "j.bg", - "k.bg", - "l.bg", - "m.bg", - "n.bg", - "o.bg", - "p.bg", - "q.bg", - "r.bg", - "s.bg", - "t.bg", - "u.bg", - "v.bg", - "w.bg", - "x.bg", - "y.bg", - "z.bg", - "0.bg", - "1.bg", - "2.bg", - "3.bg", - "4.bg", - "5.bg", - "6.bg", - "7.bg", - "8.bg", - "9.bg", - "bh", - "com.bh", - "edu.bh", - "net.bh", - "org.bh", - "gov.bh", - "bi", - "co.bi", - "com.bi", - "edu.bi", - "or.bi", - "org.bi", - "biz", - "bj", - "asso.bj", - "barreau.bj", - "gouv.bj", - "bm", - "com.bm", - "edu.bm", - "gov.bm", - "net.bm", - "org.bm", - "*.bn", - "bo", - "com.bo", - "edu.bo", - "gov.bo", - "gob.bo", - "int.bo", - "org.bo", - "net.bo", - "mil.bo", - "tv.bo", - "br", - "adm.br", - "adv.br", - "agr.br", - "am.br", - "arq.br", - "art.br", - "ato.br", - "b.br", - "belem.br", - "bio.br", - "blog.br", - "bmd.br", - "cim.br", - "cng.br", - "cnt.br", - "com.br", - "coop.br", - "cri.br", - "def.br", - "ecn.br", - "eco.br", - "edu.br", - "emp.br", - "eng.br", - "esp.br", - "etc.br", - "eti.br", - "far.br", - "flog.br", - "floripa.br", - "fm.br", - "fnd.br", - "fot.br", - "fst.br", - "g12.br", - "ggf.br", - "gov.br", - "ac.gov.br", - "al.gov.br", - "am.gov.br", - "ap.gov.br", - "ba.gov.br", - "ce.gov.br", - "df.gov.br", - "es.gov.br", - "go.gov.br", - "ma.gov.br", - "mg.gov.br", - "ms.gov.br", - "mt.gov.br", - "pa.gov.br", - "pb.gov.br", - "pe.gov.br", - "pi.gov.br", - "pr.gov.br", - "rj.gov.br", - "rn.gov.br", - "ro.gov.br", - "rr.gov.br", - "rs.gov.br", - "sc.gov.br", - "se.gov.br", - "sp.gov.br", - "to.gov.br", - "imb.br", - "ind.br", - "inf.br", - "jampa.br", - "jor.br", - "jus.br", - "leg.br", - "lel.br", - "mat.br", - "med.br", - "mil.br", - "mp.br", - "mus.br", - "net.br", - "*.nom.br", - "not.br", - "ntr.br", - "odo.br", - "org.br", - "poa.br", - "ppg.br", - "pro.br", - "psc.br", - "psi.br", - "qsl.br", - "radio.br", - "rec.br", - "recife.br", - "slg.br", - "srv.br", - "taxi.br", - "teo.br", - "tmp.br", - "trd.br", - "tur.br", - "tv.br", - "vet.br", - "vix.br", - "vlog.br", - "wiki.br", - "zlg.br", - "bs", - "com.bs", - "net.bs", - "org.bs", - "edu.bs", - "gov.bs", - "bt", - "com.bt", - "edu.bt", - "gov.bt", - "net.bt", - "org.bt", - "bv", - "bw", - "co.bw", - "org.bw", - "by", - "gov.by", - "mil.by", - "com.by", - "of.by", - "bz", - "com.bz", - "net.bz", - "org.bz", - "edu.bz", - "gov.bz", - "ca", - "ab.ca", - "bc.ca", - "mb.ca", - "nb.ca", - "nf.ca", - "nl.ca", - "ns.ca", - "nt.ca", - "nu.ca", - "on.ca", - "pe.ca", - "qc.ca", - "sk.ca", - "yk.ca", - "gc.ca", - "cat", - "cc", - "cd", - "gov.cd", - "cf", - "cg", - "ch", - "ci", - "org.ci", - "or.ci", - "com.ci", - "co.ci", - "edu.ci", - "ed.ci", - "ac.ci", - "net.ci", - "go.ci", - "asso.ci", - "xn--aroport-bya.ci", - "int.ci", - "presse.ci", - "md.ci", - "gouv.ci", - "*.ck", - "!www.ck", - "cl", - "gov.cl", - "gob.cl", - "co.cl", - "mil.cl", - "cm", - "co.cm", - "com.cm", - "gov.cm", - "net.cm", - "cn", - "ac.cn", - "com.cn", - "edu.cn", - "gov.cn", - "net.cn", - "org.cn", - "mil.cn", - "xn--55qx5d.cn", - "xn--io0a7i.cn", - "xn--od0alg.cn", - "ah.cn", - "bj.cn", - "cq.cn", - "fj.cn", - "gd.cn", - "gs.cn", - "gz.cn", - "gx.cn", - "ha.cn", - "hb.cn", - "he.cn", - "hi.cn", - "hl.cn", - "hn.cn", - "jl.cn", - "js.cn", - "jx.cn", - "ln.cn", - "nm.cn", - "nx.cn", - "qh.cn", - "sc.cn", - "sd.cn", - "sh.cn", - "sn.cn", - "sx.cn", - "tj.cn", - "xj.cn", - "xz.cn", - "yn.cn", - "zj.cn", - "hk.cn", - "mo.cn", - "tw.cn", - "co", - "arts.co", - "com.co", - "edu.co", - "firm.co", - "gov.co", - "info.co", - "int.co", - "mil.co", - "net.co", - "nom.co", - "org.co", - "rec.co", - "web.co", - "com", - "coop", - "cr", - "ac.cr", - "co.cr", - "ed.cr", - "fi.cr", - "go.cr", - "or.cr", - "sa.cr", - "cu", - "com.cu", - "edu.cu", - "org.cu", - "net.cu", - "gov.cu", - "inf.cu", - "cv", - "cw", - "com.cw", - "edu.cw", - "net.cw", - "org.cw", - "cx", - "gov.cx", - "cy", - "ac.cy", - "biz.cy", - "com.cy", - "ekloges.cy", - "gov.cy", - "ltd.cy", - "name.cy", - "net.cy", - "org.cy", - "parliament.cy", - "press.cy", - "pro.cy", - "tm.cy", - "cz", - "de", - "dj", - "dk", - "dm", - "com.dm", - "net.dm", - "org.dm", - "edu.dm", - "gov.dm", - "do", - "art.do", - "com.do", - "edu.do", - "gob.do", - "gov.do", - "mil.do", - "net.do", - "org.do", - "sld.do", - "web.do", - "dz", - "com.dz", - "org.dz", - "net.dz", - "gov.dz", - "edu.dz", - "asso.dz", - "pol.dz", - "art.dz", - "ec", - "com.ec", - "info.ec", - "net.ec", - "fin.ec", - "k12.ec", - "med.ec", - "pro.ec", - "org.ec", - "edu.ec", - "gov.ec", - "gob.ec", - "mil.ec", - "edu", - "ee", - "edu.ee", - "gov.ee", - "riik.ee", - "lib.ee", - "med.ee", - "com.ee", - "pri.ee", - "aip.ee", - "org.ee", - "fie.ee", - "eg", - "com.eg", - "edu.eg", - "eun.eg", - "gov.eg", - "mil.eg", - "name.eg", - "net.eg", - "org.eg", - "sci.eg", - "*.er", - "es", - "com.es", - "nom.es", - "org.es", - "gob.es", - "edu.es", - "et", - "com.et", - "gov.et", - "org.et", - "edu.et", - "biz.et", - "name.et", - "info.et", - "net.et", - "eu", - "fi", - "aland.fi", - "*.fj", - "*.fk", - "fm", - "fo", - "fr", - "com.fr", - "asso.fr", - "nom.fr", - "prd.fr", - "presse.fr", - "tm.fr", - "aeroport.fr", - "assedic.fr", - "avocat.fr", - "avoues.fr", - "cci.fr", - "chambagri.fr", - "chirurgiens-dentistes.fr", - "experts-comptables.fr", - "geometre-expert.fr", - "gouv.fr", - "greta.fr", - "huissier-justice.fr", - "medecin.fr", - "notaires.fr", - "pharmacien.fr", - "port.fr", - "veterinaire.fr", - "ga", - "gb", - "gd", - "ge", - "com.ge", - "edu.ge", - "gov.ge", - "org.ge", - "mil.ge", - "net.ge", - "pvt.ge", - "gf", - "gg", - "co.gg", - "net.gg", - "org.gg", - "gh", - "com.gh", - "edu.gh", - "gov.gh", - "org.gh", - "mil.gh", - "gi", - "com.gi", - "ltd.gi", - "gov.gi", - "mod.gi", - "edu.gi", - "org.gi", - "gl", - "co.gl", - "com.gl", - "edu.gl", - "net.gl", - "org.gl", - "gm", - "gn", - "ac.gn", - "com.gn", - "edu.gn", - "gov.gn", - "org.gn", - "net.gn", - "gov", - "gp", - "com.gp", - "net.gp", - "mobi.gp", - "edu.gp", - "org.gp", - "asso.gp", - "gq", - "gr", - "com.gr", - "edu.gr", - "net.gr", - "org.gr", - "gov.gr", - "gs", - "gt", - "com.gt", - "edu.gt", - "gob.gt", - "ind.gt", - "mil.gt", - "net.gt", - "org.gt", - "*.gu", - "gw", - "gy", - "co.gy", - "com.gy", - "edu.gy", - "gov.gy", - "net.gy", - "org.gy", - "hk", - "com.hk", - "edu.hk", - "gov.hk", - "idv.hk", - "net.hk", - "org.hk", - "xn--55qx5d.hk", - "xn--wcvs22d.hk", - "xn--lcvr32d.hk", - "xn--mxtq1m.hk", - "xn--gmqw5a.hk", - "xn--ciqpn.hk", - "xn--gmq050i.hk", - "xn--zf0avx.hk", - "xn--io0a7i.hk", - "xn--mk0axi.hk", - "xn--od0alg.hk", - "xn--od0aq3b.hk", - "xn--tn0ag.hk", - "xn--uc0atv.hk", - "xn--uc0ay4a.hk", - "hm", - "hn", - "com.hn", - "edu.hn", - "org.hn", - "net.hn", - "mil.hn", - "gob.hn", - "hr", - "iz.hr", - "from.hr", - "name.hr", - "com.hr", - "ht", - "com.ht", - "shop.ht", - "firm.ht", - "info.ht", - "adult.ht", - "net.ht", - "pro.ht", - "org.ht", - "med.ht", - "art.ht", - "coop.ht", - "pol.ht", - "asso.ht", - "edu.ht", - "rel.ht", - "gouv.ht", - "perso.ht", - "hu", - "co.hu", - "info.hu", - "org.hu", - "priv.hu", - "sport.hu", - "tm.hu", - "2000.hu", - "agrar.hu", - "bolt.hu", - "casino.hu", - "city.hu", - "erotica.hu", - "erotika.hu", - "film.hu", - "forum.hu", - "games.hu", - "hotel.hu", - "ingatlan.hu", - "jogasz.hu", - "konyvelo.hu", - "lakas.hu", - "media.hu", - "news.hu", - "reklam.hu", - "sex.hu", - "shop.hu", - "suli.hu", - "szex.hu", - "tozsde.hu", - "utazas.hu", - "video.hu", - "id", - "ac.id", - "biz.id", - "co.id", - "desa.id", - "go.id", - "mil.id", - "my.id", - "net.id", - "or.id", - "sch.id", - "web.id", - "ie", - "gov.ie", - "il", - "ac.il", - "co.il", - "gov.il", - "idf.il", - "k12.il", - "muni.il", - "net.il", - "org.il", - "im", - "ac.im", - "co.im", - "com.im", - "ltd.co.im", - "net.im", - "org.im", - "plc.co.im", - "tt.im", - "tv.im", - "in", - "co.in", - "firm.in", - "net.in", - "org.in", - "gen.in", - "ind.in", - "nic.in", - "ac.in", - "edu.in", - "res.in", - "gov.in", - "mil.in", - "info", - "int", - "eu.int", - "io", - "com.io", - "iq", - "gov.iq", - "edu.iq", - "mil.iq", - "com.iq", - "org.iq", - "net.iq", - "ir", - "ac.ir", - "co.ir", - "gov.ir", - "id.ir", - "net.ir", - "org.ir", - "sch.ir", - "xn--mgba3a4f16a.ir", - "xn--mgba3a4fra.ir", - "is", - "net.is", - "com.is", - "edu.is", - "gov.is", - "org.is", - "int.is", - "it", - "gov.it", - "edu.it", - "abr.it", - "abruzzo.it", - "aosta-valley.it", - "aostavalley.it", - "bas.it", - "basilicata.it", - "cal.it", - "calabria.it", - "cam.it", - "campania.it", - "emilia-romagna.it", - "emiliaromagna.it", - "emr.it", - "friuli-v-giulia.it", - "friuli-ve-giulia.it", - "friuli-vegiulia.it", - "friuli-venezia-giulia.it", - "friuli-veneziagiulia.it", - "friuli-vgiulia.it", - "friuliv-giulia.it", - "friulive-giulia.it", - "friulivegiulia.it", - "friulivenezia-giulia.it", - "friuliveneziagiulia.it", - "friulivgiulia.it", - "fvg.it", - "laz.it", - "lazio.it", - "lig.it", - "liguria.it", - "lom.it", - "lombardia.it", - "lombardy.it", - "lucania.it", - "mar.it", - "marche.it", - "mol.it", - "molise.it", - "piedmont.it", - "piemonte.it", - "pmn.it", - "pug.it", - "puglia.it", - "sar.it", - "sardegna.it", - "sardinia.it", - "sic.it", - "sicilia.it", - "sicily.it", - "taa.it", - "tos.it", - "toscana.it", - "trentino-a-adige.it", - "trentino-aadige.it", - "trentino-alto-adige.it", - "trentino-altoadige.it", - "trentino-s-tirol.it", - "trentino-stirol.it", - "trentino-sud-tirol.it", - "trentino-sudtirol.it", - "trentino-sued-tirol.it", - "trentino-suedtirol.it", - "trentinoa-adige.it", - "trentinoaadige.it", - "trentinoalto-adige.it", - "trentinoaltoadige.it", - "trentinos-tirol.it", - "trentinostirol.it", - "trentinosud-tirol.it", - "trentinosudtirol.it", - "trentinosued-tirol.it", - "trentinosuedtirol.it", - "tuscany.it", - "umb.it", - "umbria.it", - "val-d-aosta.it", - "val-daosta.it", - "vald-aosta.it", - "valdaosta.it", - "valle-aosta.it", - "valle-d-aosta.it", - "valle-daosta.it", - "valleaosta.it", - "valled-aosta.it", - "valledaosta.it", - "vallee-aoste.it", - "valleeaoste.it", - "vao.it", - "vda.it", - "ven.it", - "veneto.it", - "ag.it", - "agrigento.it", - "al.it", - "alessandria.it", - "alto-adige.it", - "altoadige.it", - "an.it", - "ancona.it", - "andria-barletta-trani.it", - "andria-trani-barletta.it", - "andriabarlettatrani.it", - "andriatranibarletta.it", - "ao.it", - "aosta.it", - "aoste.it", - "ap.it", - "aq.it", - "aquila.it", - "ar.it", - "arezzo.it", - "ascoli-piceno.it", - "ascolipiceno.it", - "asti.it", - "at.it", - "av.it", - "avellino.it", - "ba.it", - "balsan.it", - "bari.it", - "barletta-trani-andria.it", - "barlettatraniandria.it", - "belluno.it", - "benevento.it", - "bergamo.it", - "bg.it", - "bi.it", - "biella.it", - "bl.it", - "bn.it", - "bo.it", - "bologna.it", - "bolzano.it", - "bozen.it", - "br.it", - "brescia.it", - "brindisi.it", - "bs.it", - "bt.it", - "bz.it", - "ca.it", - "cagliari.it", - "caltanissetta.it", - "campidano-medio.it", - "campidanomedio.it", - "campobasso.it", - "carbonia-iglesias.it", - "carboniaiglesias.it", - "carrara-massa.it", - "carraramassa.it", - "caserta.it", - "catania.it", - "catanzaro.it", - "cb.it", - "ce.it", - "cesena-forli.it", - "cesenaforli.it", - "ch.it", - "chieti.it", - "ci.it", - "cl.it", - "cn.it", - "co.it", - "como.it", - "cosenza.it", - "cr.it", - "cremona.it", - "crotone.it", - "cs.it", - "ct.it", - "cuneo.it", - "cz.it", - "dell-ogliastra.it", - "dellogliastra.it", - "en.it", - "enna.it", - "fc.it", - "fe.it", - "fermo.it", - "ferrara.it", - "fg.it", - "fi.it", - "firenze.it", - "florence.it", - "fm.it", - "foggia.it", - "forli-cesena.it", - "forlicesena.it", - "fr.it", - "frosinone.it", - "ge.it", - "genoa.it", - "genova.it", - "go.it", - "gorizia.it", - "gr.it", - "grosseto.it", - "iglesias-carbonia.it", - "iglesiascarbonia.it", - "im.it", - "imperia.it", - "is.it", - "isernia.it", - "kr.it", - "la-spezia.it", - "laquila.it", - "laspezia.it", - "latina.it", - "lc.it", - "le.it", - "lecce.it", - "lecco.it", - "li.it", - "livorno.it", - "lo.it", - "lodi.it", - "lt.it", - "lu.it", - "lucca.it", - "macerata.it", - "mantova.it", - "massa-carrara.it", - "massacarrara.it", - "matera.it", - "mb.it", - "mc.it", - "me.it", - "medio-campidano.it", - "mediocampidano.it", - "messina.it", - "mi.it", - "milan.it", - "milano.it", - "mn.it", - "mo.it", - "modena.it", - "monza-brianza.it", - "monza-e-della-brianza.it", - "monza.it", - "monzabrianza.it", - "monzaebrianza.it", - "monzaedellabrianza.it", - "ms.it", - "mt.it", - "na.it", - "naples.it", - "napoli.it", - "no.it", - "novara.it", - "nu.it", - "nuoro.it", - "og.it", - "ogliastra.it", - "olbia-tempio.it", - "olbiatempio.it", - "or.it", - "oristano.it", - "ot.it", - "pa.it", - "padova.it", - "padua.it", - "palermo.it", - "parma.it", - "pavia.it", - "pc.it", - "pd.it", - "pe.it", - "perugia.it", - "pesaro-urbino.it", - "pesarourbino.it", - "pescara.it", - "pg.it", - "pi.it", - "piacenza.it", - "pisa.it", - "pistoia.it", - "pn.it", - "po.it", - "pordenone.it", - "potenza.it", - "pr.it", - "prato.it", - "pt.it", - "pu.it", - "pv.it", - "pz.it", - "ra.it", - "ragusa.it", - "ravenna.it", - "rc.it", - "re.it", - "reggio-calabria.it", - "reggio-emilia.it", - "reggiocalabria.it", - "reggioemilia.it", - "rg.it", - "ri.it", - "rieti.it", - "rimini.it", - "rm.it", - "rn.it", - "ro.it", - "roma.it", - "rome.it", - "rovigo.it", - "sa.it", - "salerno.it", - "sassari.it", - "savona.it", - "si.it", - "siena.it", - "siracusa.it", - "so.it", - "sondrio.it", - "sp.it", - "sr.it", - "ss.it", - "suedtirol.it", - "sv.it", - "ta.it", - "taranto.it", - "te.it", - "tempio-olbia.it", - "tempioolbia.it", - "teramo.it", - "terni.it", - "tn.it", - "to.it", - "torino.it", - "tp.it", - "tr.it", - "trani-andria-barletta.it", - "trani-barletta-andria.it", - "traniandriabarletta.it", - "tranibarlettaandria.it", - "trapani.it", - "trentino.it", - "trento.it", - "treviso.it", - "trieste.it", - "ts.it", - "turin.it", - "tv.it", - "ud.it", - "udine.it", - "urbino-pesaro.it", - "urbinopesaro.it", - "va.it", - "varese.it", - "vb.it", - "vc.it", - "ve.it", - "venezia.it", - "venice.it", - "verbania.it", - "vercelli.it", - "verona.it", - "vi.it", - "vibo-valentia.it", - "vibovalentia.it", - "vicenza.it", - "viterbo.it", - "vr.it", - "vs.it", - "vt.it", - "vv.it", - "je", - "co.je", - "net.je", - "org.je", - "*.jm", - "jo", - "com.jo", - "org.jo", - "net.jo", - "edu.jo", - "sch.jo", - "gov.jo", - "mil.jo", - "name.jo", - "jobs", - "jp", - "ac.jp", - "ad.jp", - "co.jp", - "ed.jp", - "go.jp", - "gr.jp", - "lg.jp", - "ne.jp", - "or.jp", - "aichi.jp", - "akita.jp", - "aomori.jp", - "chiba.jp", - "ehime.jp", - "fukui.jp", - "fukuoka.jp", - "fukushima.jp", - "gifu.jp", - "gunma.jp", - "hiroshima.jp", - "hokkaido.jp", - "hyogo.jp", - "ibaraki.jp", - "ishikawa.jp", - "iwate.jp", - "kagawa.jp", - "kagoshima.jp", - "kanagawa.jp", - "kochi.jp", - "kumamoto.jp", - "kyoto.jp", - "mie.jp", - "miyagi.jp", - "miyazaki.jp", - "nagano.jp", - "nagasaki.jp", - "nara.jp", - "niigata.jp", - "oita.jp", - "okayama.jp", - "okinawa.jp", - "osaka.jp", - "saga.jp", - "saitama.jp", - "shiga.jp", - "shimane.jp", - "shizuoka.jp", - "tochigi.jp", - "tokushima.jp", - "tokyo.jp", - "tottori.jp", - "toyama.jp", - "wakayama.jp", - "yamagata.jp", - "yamaguchi.jp", - "yamanashi.jp", - "xn--4pvxs.jp", - "xn--vgu402c.jp", - "xn--c3s14m.jp", - "xn--f6qx53a.jp", - "xn--8pvr4u.jp", - "xn--uist22h.jp", - "xn--djrs72d6uy.jp", - "xn--mkru45i.jp", - "xn--0trq7p7nn.jp", - "xn--8ltr62k.jp", - "xn--2m4a15e.jp", - "xn--efvn9s.jp", - "xn--32vp30h.jp", - "xn--4it797k.jp", - "xn--1lqs71d.jp", - "xn--5rtp49c.jp", - "xn--5js045d.jp", - "xn--ehqz56n.jp", - "xn--1lqs03n.jp", - "xn--qqqt11m.jp", - "xn--kbrq7o.jp", - "xn--pssu33l.jp", - "xn--ntsq17g.jp", - "xn--uisz3g.jp", - "xn--6btw5a.jp", - "xn--1ctwo.jp", - "xn--6orx2r.jp", - "xn--rht61e.jp", - "xn--rht27z.jp", - "xn--djty4k.jp", - "xn--nit225k.jp", - "xn--rht3d.jp", - "xn--klty5x.jp", - "xn--kltx9a.jp", - "xn--kltp7d.jp", - "xn--uuwu58a.jp", - "xn--zbx025d.jp", - "xn--ntso0iqx3a.jp", - "xn--elqq16h.jp", - "xn--4it168d.jp", - "xn--klt787d.jp", - "xn--rny31h.jp", - "xn--7t0a264c.jp", - "xn--5rtq34k.jp", - "xn--k7yn95e.jp", - "xn--tor131o.jp", - "xn--d5qv7z876c.jp", - "*.kawasaki.jp", - "*.kitakyushu.jp", - "*.kobe.jp", - "*.nagoya.jp", - "*.sapporo.jp", - "*.sendai.jp", - "*.yokohama.jp", - "!city.kawasaki.jp", - "!city.kitakyushu.jp", - "!city.kobe.jp", - "!city.nagoya.jp", - "!city.sapporo.jp", - "!city.sendai.jp", - "!city.yokohama.jp", - "aisai.aichi.jp", - "ama.aichi.jp", - "anjo.aichi.jp", - "asuke.aichi.jp", - "chiryu.aichi.jp", - "chita.aichi.jp", - "fuso.aichi.jp", - "gamagori.aichi.jp", - "handa.aichi.jp", - "hazu.aichi.jp", - "hekinan.aichi.jp", - "higashiura.aichi.jp", - "ichinomiya.aichi.jp", - "inazawa.aichi.jp", - "inuyama.aichi.jp", - "isshiki.aichi.jp", - "iwakura.aichi.jp", - "kanie.aichi.jp", - "kariya.aichi.jp", - "kasugai.aichi.jp", - "kira.aichi.jp", - "kiyosu.aichi.jp", - "komaki.aichi.jp", - "konan.aichi.jp", - "kota.aichi.jp", - "mihama.aichi.jp", - "miyoshi.aichi.jp", - "nishio.aichi.jp", - "nisshin.aichi.jp", - "obu.aichi.jp", - "oguchi.aichi.jp", - "oharu.aichi.jp", - "okazaki.aichi.jp", - "owariasahi.aichi.jp", - "seto.aichi.jp", - "shikatsu.aichi.jp", - "shinshiro.aichi.jp", - "shitara.aichi.jp", - "tahara.aichi.jp", - "takahama.aichi.jp", - "tobishima.aichi.jp", - "toei.aichi.jp", - "togo.aichi.jp", - "tokai.aichi.jp", - "tokoname.aichi.jp", - "toyoake.aichi.jp", - "toyohashi.aichi.jp", - "toyokawa.aichi.jp", - "toyone.aichi.jp", - "toyota.aichi.jp", - "tsushima.aichi.jp", - "yatomi.aichi.jp", - "akita.akita.jp", - "daisen.akita.jp", - "fujisato.akita.jp", - "gojome.akita.jp", - "hachirogata.akita.jp", - "happou.akita.jp", - "higashinaruse.akita.jp", - "honjo.akita.jp", - "honjyo.akita.jp", - "ikawa.akita.jp", - "kamikoani.akita.jp", - "kamioka.akita.jp", - "katagami.akita.jp", - "kazuno.akita.jp", - "kitaakita.akita.jp", - "kosaka.akita.jp", - "kyowa.akita.jp", - "misato.akita.jp", - "mitane.akita.jp", - "moriyoshi.akita.jp", - "nikaho.akita.jp", - "noshiro.akita.jp", - "odate.akita.jp", - "oga.akita.jp", - "ogata.akita.jp", - "semboku.akita.jp", - "yokote.akita.jp", - "yurihonjo.akita.jp", - "aomori.aomori.jp", - "gonohe.aomori.jp", - "hachinohe.aomori.jp", - "hashikami.aomori.jp", - "hiranai.aomori.jp", - "hirosaki.aomori.jp", - "itayanagi.aomori.jp", - "kuroishi.aomori.jp", - "misawa.aomori.jp", - "mutsu.aomori.jp", - "nakadomari.aomori.jp", - "noheji.aomori.jp", - "oirase.aomori.jp", - "owani.aomori.jp", - "rokunohe.aomori.jp", - "sannohe.aomori.jp", - "shichinohe.aomori.jp", - "shingo.aomori.jp", - "takko.aomori.jp", - "towada.aomori.jp", - "tsugaru.aomori.jp", - "tsuruta.aomori.jp", - "abiko.chiba.jp", - "asahi.chiba.jp", - "chonan.chiba.jp", - "chosei.chiba.jp", - "choshi.chiba.jp", - "chuo.chiba.jp", - "funabashi.chiba.jp", - "futtsu.chiba.jp", - "hanamigawa.chiba.jp", - "ichihara.chiba.jp", - "ichikawa.chiba.jp", - "ichinomiya.chiba.jp", - "inzai.chiba.jp", - "isumi.chiba.jp", - "kamagaya.chiba.jp", - "kamogawa.chiba.jp", - "kashiwa.chiba.jp", - "katori.chiba.jp", - "katsuura.chiba.jp", - "kimitsu.chiba.jp", - "kisarazu.chiba.jp", - "kozaki.chiba.jp", - "kujukuri.chiba.jp", - "kyonan.chiba.jp", - "matsudo.chiba.jp", - "midori.chiba.jp", - "mihama.chiba.jp", - "minamiboso.chiba.jp", - "mobara.chiba.jp", - "mutsuzawa.chiba.jp", - "nagara.chiba.jp", - "nagareyama.chiba.jp", - "narashino.chiba.jp", - "narita.chiba.jp", - "noda.chiba.jp", - "oamishirasato.chiba.jp", - "omigawa.chiba.jp", - "onjuku.chiba.jp", - "otaki.chiba.jp", - "sakae.chiba.jp", - "sakura.chiba.jp", - "shimofusa.chiba.jp", - "shirako.chiba.jp", - "shiroi.chiba.jp", - "shisui.chiba.jp", - "sodegaura.chiba.jp", - "sosa.chiba.jp", - "tako.chiba.jp", - "tateyama.chiba.jp", - "togane.chiba.jp", - "tohnosho.chiba.jp", - "tomisato.chiba.jp", - "urayasu.chiba.jp", - "yachimata.chiba.jp", - "yachiyo.chiba.jp", - "yokaichiba.chiba.jp", - "yokoshibahikari.chiba.jp", - "yotsukaido.chiba.jp", - "ainan.ehime.jp", - "honai.ehime.jp", - "ikata.ehime.jp", - "imabari.ehime.jp", - "iyo.ehime.jp", - "kamijima.ehime.jp", - "kihoku.ehime.jp", - "kumakogen.ehime.jp", - "masaki.ehime.jp", - "matsuno.ehime.jp", - "matsuyama.ehime.jp", - "namikata.ehime.jp", - "niihama.ehime.jp", - "ozu.ehime.jp", - "saijo.ehime.jp", - "seiyo.ehime.jp", - "shikokuchuo.ehime.jp", - "tobe.ehime.jp", - "toon.ehime.jp", - "uchiko.ehime.jp", - "uwajima.ehime.jp", - "yawatahama.ehime.jp", - "echizen.fukui.jp", - "eiheiji.fukui.jp", - "fukui.fukui.jp", - "ikeda.fukui.jp", - "katsuyama.fukui.jp", - "mihama.fukui.jp", - "minamiechizen.fukui.jp", - "obama.fukui.jp", - "ohi.fukui.jp", - "ono.fukui.jp", - "sabae.fukui.jp", - "sakai.fukui.jp", - "takahama.fukui.jp", - "tsuruga.fukui.jp", - "wakasa.fukui.jp", - "ashiya.fukuoka.jp", - "buzen.fukuoka.jp", - "chikugo.fukuoka.jp", - "chikuho.fukuoka.jp", - "chikujo.fukuoka.jp", - "chikushino.fukuoka.jp", - "chikuzen.fukuoka.jp", - "chuo.fukuoka.jp", - "dazaifu.fukuoka.jp", - "fukuchi.fukuoka.jp", - "hakata.fukuoka.jp", - "higashi.fukuoka.jp", - "hirokawa.fukuoka.jp", - "hisayama.fukuoka.jp", - "iizuka.fukuoka.jp", - "inatsuki.fukuoka.jp", - "kaho.fukuoka.jp", - "kasuga.fukuoka.jp", - "kasuya.fukuoka.jp", - "kawara.fukuoka.jp", - "keisen.fukuoka.jp", - "koga.fukuoka.jp", - "kurate.fukuoka.jp", - "kurogi.fukuoka.jp", - "kurume.fukuoka.jp", - "minami.fukuoka.jp", - "miyako.fukuoka.jp", - "miyama.fukuoka.jp", - "miyawaka.fukuoka.jp", - "mizumaki.fukuoka.jp", - "munakata.fukuoka.jp", - "nakagawa.fukuoka.jp", - "nakama.fukuoka.jp", - "nishi.fukuoka.jp", - "nogata.fukuoka.jp", - "ogori.fukuoka.jp", - "okagaki.fukuoka.jp", - "okawa.fukuoka.jp", - "oki.fukuoka.jp", - "omuta.fukuoka.jp", - "onga.fukuoka.jp", - "onojo.fukuoka.jp", - "oto.fukuoka.jp", - "saigawa.fukuoka.jp", - "sasaguri.fukuoka.jp", - "shingu.fukuoka.jp", - "shinyoshitomi.fukuoka.jp", - "shonai.fukuoka.jp", - "soeda.fukuoka.jp", - "sue.fukuoka.jp", - "tachiarai.fukuoka.jp", - "tagawa.fukuoka.jp", - "takata.fukuoka.jp", - "toho.fukuoka.jp", - "toyotsu.fukuoka.jp", - "tsuiki.fukuoka.jp", - "ukiha.fukuoka.jp", - "umi.fukuoka.jp", - "usui.fukuoka.jp", - "yamada.fukuoka.jp", - "yame.fukuoka.jp", - "yanagawa.fukuoka.jp", - "yukuhashi.fukuoka.jp", - "aizubange.fukushima.jp", - "aizumisato.fukushima.jp", - "aizuwakamatsu.fukushima.jp", - "asakawa.fukushima.jp", - "bandai.fukushima.jp", - "date.fukushima.jp", - "fukushima.fukushima.jp", - "furudono.fukushima.jp", - "futaba.fukushima.jp", - "hanawa.fukushima.jp", - "higashi.fukushima.jp", - "hirata.fukushima.jp", - "hirono.fukushima.jp", - "iitate.fukushima.jp", - "inawashiro.fukushima.jp", - "ishikawa.fukushima.jp", - "iwaki.fukushima.jp", - "izumizaki.fukushima.jp", - "kagamiishi.fukushima.jp", - "kaneyama.fukushima.jp", - "kawamata.fukushima.jp", - "kitakata.fukushima.jp", - "kitashiobara.fukushima.jp", - "koori.fukushima.jp", - "koriyama.fukushima.jp", - "kunimi.fukushima.jp", - "miharu.fukushima.jp", - "mishima.fukushima.jp", - "namie.fukushima.jp", - "nango.fukushima.jp", - "nishiaizu.fukushima.jp", - "nishigo.fukushima.jp", - "okuma.fukushima.jp", - "omotego.fukushima.jp", - "ono.fukushima.jp", - "otama.fukushima.jp", - "samegawa.fukushima.jp", - "shimogo.fukushima.jp", - "shirakawa.fukushima.jp", - "showa.fukushima.jp", - "soma.fukushima.jp", - "sukagawa.fukushima.jp", - "taishin.fukushima.jp", - "tamakawa.fukushima.jp", - "tanagura.fukushima.jp", - "tenei.fukushima.jp", - "yabuki.fukushima.jp", - "yamato.fukushima.jp", - "yamatsuri.fukushima.jp", - "yanaizu.fukushima.jp", - "yugawa.fukushima.jp", - "anpachi.gifu.jp", - "ena.gifu.jp", - "gifu.gifu.jp", - "ginan.gifu.jp", - "godo.gifu.jp", - "gujo.gifu.jp", - "hashima.gifu.jp", - "hichiso.gifu.jp", - "hida.gifu.jp", - "higashishirakawa.gifu.jp", - "ibigawa.gifu.jp", - "ikeda.gifu.jp", - "kakamigahara.gifu.jp", - "kani.gifu.jp", - "kasahara.gifu.jp", - "kasamatsu.gifu.jp", - "kawaue.gifu.jp", - "kitagata.gifu.jp", - "mino.gifu.jp", - "minokamo.gifu.jp", - "mitake.gifu.jp", - "mizunami.gifu.jp", - "motosu.gifu.jp", - "nakatsugawa.gifu.jp", - "ogaki.gifu.jp", - "sakahogi.gifu.jp", - "seki.gifu.jp", - "sekigahara.gifu.jp", - "shirakawa.gifu.jp", - "tajimi.gifu.jp", - "takayama.gifu.jp", - "tarui.gifu.jp", - "toki.gifu.jp", - "tomika.gifu.jp", - "wanouchi.gifu.jp", - "yamagata.gifu.jp", - "yaotsu.gifu.jp", - "yoro.gifu.jp", - "annaka.gunma.jp", - "chiyoda.gunma.jp", - "fujioka.gunma.jp", - "higashiagatsuma.gunma.jp", - "isesaki.gunma.jp", - "itakura.gunma.jp", - "kanna.gunma.jp", - "kanra.gunma.jp", - "katashina.gunma.jp", - "kawaba.gunma.jp", - "kiryu.gunma.jp", - "kusatsu.gunma.jp", - "maebashi.gunma.jp", - "meiwa.gunma.jp", - "midori.gunma.jp", - "minakami.gunma.jp", - "naganohara.gunma.jp", - "nakanojo.gunma.jp", - "nanmoku.gunma.jp", - "numata.gunma.jp", - "oizumi.gunma.jp", - "ora.gunma.jp", - "ota.gunma.jp", - "shibukawa.gunma.jp", - "shimonita.gunma.jp", - "shinto.gunma.jp", - "showa.gunma.jp", - "takasaki.gunma.jp", - "takayama.gunma.jp", - "tamamura.gunma.jp", - "tatebayashi.gunma.jp", - "tomioka.gunma.jp", - "tsukiyono.gunma.jp", - "tsumagoi.gunma.jp", - "ueno.gunma.jp", - "yoshioka.gunma.jp", - "asaminami.hiroshima.jp", - "daiwa.hiroshima.jp", - "etajima.hiroshima.jp", - "fuchu.hiroshima.jp", - "fukuyama.hiroshima.jp", - "hatsukaichi.hiroshima.jp", - "higashihiroshima.hiroshima.jp", - "hongo.hiroshima.jp", - "jinsekikogen.hiroshima.jp", - "kaita.hiroshima.jp", - "kui.hiroshima.jp", - "kumano.hiroshima.jp", - "kure.hiroshima.jp", - "mihara.hiroshima.jp", - "miyoshi.hiroshima.jp", - "naka.hiroshima.jp", - "onomichi.hiroshima.jp", - "osakikamijima.hiroshima.jp", - "otake.hiroshima.jp", - "saka.hiroshima.jp", - "sera.hiroshima.jp", - "seranishi.hiroshima.jp", - "shinichi.hiroshima.jp", - "shobara.hiroshima.jp", - "takehara.hiroshima.jp", - "abashiri.hokkaido.jp", - "abira.hokkaido.jp", - "aibetsu.hokkaido.jp", - "akabira.hokkaido.jp", - "akkeshi.hokkaido.jp", - "asahikawa.hokkaido.jp", - "ashibetsu.hokkaido.jp", - "ashoro.hokkaido.jp", - "assabu.hokkaido.jp", - "atsuma.hokkaido.jp", - "bibai.hokkaido.jp", - "biei.hokkaido.jp", - "bifuka.hokkaido.jp", - "bihoro.hokkaido.jp", - "biratori.hokkaido.jp", - "chippubetsu.hokkaido.jp", - "chitose.hokkaido.jp", - "date.hokkaido.jp", - "ebetsu.hokkaido.jp", - "embetsu.hokkaido.jp", - "eniwa.hokkaido.jp", - "erimo.hokkaido.jp", - "esan.hokkaido.jp", - "esashi.hokkaido.jp", - "fukagawa.hokkaido.jp", - "fukushima.hokkaido.jp", - "furano.hokkaido.jp", - "furubira.hokkaido.jp", - "haboro.hokkaido.jp", - "hakodate.hokkaido.jp", - "hamatonbetsu.hokkaido.jp", - "hidaka.hokkaido.jp", - "higashikagura.hokkaido.jp", - "higashikawa.hokkaido.jp", - "hiroo.hokkaido.jp", - "hokuryu.hokkaido.jp", - "hokuto.hokkaido.jp", - "honbetsu.hokkaido.jp", - "horokanai.hokkaido.jp", - "horonobe.hokkaido.jp", - "ikeda.hokkaido.jp", - "imakane.hokkaido.jp", - "ishikari.hokkaido.jp", - "iwamizawa.hokkaido.jp", - "iwanai.hokkaido.jp", - "kamifurano.hokkaido.jp", - "kamikawa.hokkaido.jp", - "kamishihoro.hokkaido.jp", - "kamisunagawa.hokkaido.jp", - "kamoenai.hokkaido.jp", - "kayabe.hokkaido.jp", - "kembuchi.hokkaido.jp", - "kikonai.hokkaido.jp", - "kimobetsu.hokkaido.jp", - "kitahiroshima.hokkaido.jp", - "kitami.hokkaido.jp", - "kiyosato.hokkaido.jp", - "koshimizu.hokkaido.jp", - "kunneppu.hokkaido.jp", - "kuriyama.hokkaido.jp", - "kuromatsunai.hokkaido.jp", - "kushiro.hokkaido.jp", - "kutchan.hokkaido.jp", - "kyowa.hokkaido.jp", - "mashike.hokkaido.jp", - "matsumae.hokkaido.jp", - "mikasa.hokkaido.jp", - "minamifurano.hokkaido.jp", - "mombetsu.hokkaido.jp", - "moseushi.hokkaido.jp", - "mukawa.hokkaido.jp", - "muroran.hokkaido.jp", - "naie.hokkaido.jp", - "nakagawa.hokkaido.jp", - "nakasatsunai.hokkaido.jp", - "nakatombetsu.hokkaido.jp", - "nanae.hokkaido.jp", - "nanporo.hokkaido.jp", - "nayoro.hokkaido.jp", - "nemuro.hokkaido.jp", - "niikappu.hokkaido.jp", - "niki.hokkaido.jp", - "nishiokoppe.hokkaido.jp", - "noboribetsu.hokkaido.jp", - "numata.hokkaido.jp", - "obihiro.hokkaido.jp", - "obira.hokkaido.jp", - "oketo.hokkaido.jp", - "okoppe.hokkaido.jp", - "otaru.hokkaido.jp", - "otobe.hokkaido.jp", - "otofuke.hokkaido.jp", - "otoineppu.hokkaido.jp", - "oumu.hokkaido.jp", - "ozora.hokkaido.jp", - "pippu.hokkaido.jp", - "rankoshi.hokkaido.jp", - "rebun.hokkaido.jp", - "rikubetsu.hokkaido.jp", - "rishiri.hokkaido.jp", - "rishirifuji.hokkaido.jp", - "saroma.hokkaido.jp", - "sarufutsu.hokkaido.jp", - "shakotan.hokkaido.jp", - "shari.hokkaido.jp", - "shibecha.hokkaido.jp", - "shibetsu.hokkaido.jp", - "shikabe.hokkaido.jp", - "shikaoi.hokkaido.jp", - "shimamaki.hokkaido.jp", - "shimizu.hokkaido.jp", - "shimokawa.hokkaido.jp", - "shinshinotsu.hokkaido.jp", - "shintoku.hokkaido.jp", - "shiranuka.hokkaido.jp", - "shiraoi.hokkaido.jp", - "shiriuchi.hokkaido.jp", - "sobetsu.hokkaido.jp", - "sunagawa.hokkaido.jp", - "taiki.hokkaido.jp", - "takasu.hokkaido.jp", - "takikawa.hokkaido.jp", - "takinoue.hokkaido.jp", - "teshikaga.hokkaido.jp", - "tobetsu.hokkaido.jp", - "tohma.hokkaido.jp", - "tomakomai.hokkaido.jp", - "tomari.hokkaido.jp", - "toya.hokkaido.jp", - "toyako.hokkaido.jp", - "toyotomi.hokkaido.jp", - "toyoura.hokkaido.jp", - "tsubetsu.hokkaido.jp", - "tsukigata.hokkaido.jp", - "urakawa.hokkaido.jp", - "urausu.hokkaido.jp", - "uryu.hokkaido.jp", - "utashinai.hokkaido.jp", - "wakkanai.hokkaido.jp", - "wassamu.hokkaido.jp", - "yakumo.hokkaido.jp", - "yoichi.hokkaido.jp", - "aioi.hyogo.jp", - "akashi.hyogo.jp", - "ako.hyogo.jp", - "amagasaki.hyogo.jp", - "aogaki.hyogo.jp", - "asago.hyogo.jp", - "ashiya.hyogo.jp", - "awaji.hyogo.jp", - "fukusaki.hyogo.jp", - "goshiki.hyogo.jp", - "harima.hyogo.jp", - "himeji.hyogo.jp", - "ichikawa.hyogo.jp", - "inagawa.hyogo.jp", - "itami.hyogo.jp", - "kakogawa.hyogo.jp", - "kamigori.hyogo.jp", - "kamikawa.hyogo.jp", - "kasai.hyogo.jp", - "kasuga.hyogo.jp", - "kawanishi.hyogo.jp", - "miki.hyogo.jp", - "minamiawaji.hyogo.jp", - "nishinomiya.hyogo.jp", - "nishiwaki.hyogo.jp", - "ono.hyogo.jp", - "sanda.hyogo.jp", - "sannan.hyogo.jp", - "sasayama.hyogo.jp", - "sayo.hyogo.jp", - "shingu.hyogo.jp", - "shinonsen.hyogo.jp", - "shiso.hyogo.jp", - "sumoto.hyogo.jp", - "taishi.hyogo.jp", - "taka.hyogo.jp", - "takarazuka.hyogo.jp", - "takasago.hyogo.jp", - "takino.hyogo.jp", - "tamba.hyogo.jp", - "tatsuno.hyogo.jp", - "toyooka.hyogo.jp", - "yabu.hyogo.jp", - "yashiro.hyogo.jp", - "yoka.hyogo.jp", - "yokawa.hyogo.jp", - "ami.ibaraki.jp", - "asahi.ibaraki.jp", - "bando.ibaraki.jp", - "chikusei.ibaraki.jp", - "daigo.ibaraki.jp", - "fujishiro.ibaraki.jp", - "hitachi.ibaraki.jp", - "hitachinaka.ibaraki.jp", - "hitachiomiya.ibaraki.jp", - "hitachiota.ibaraki.jp", - "ibaraki.ibaraki.jp", - "ina.ibaraki.jp", - "inashiki.ibaraki.jp", - "itako.ibaraki.jp", - "iwama.ibaraki.jp", - "joso.ibaraki.jp", - "kamisu.ibaraki.jp", - "kasama.ibaraki.jp", - "kashima.ibaraki.jp", - "kasumigaura.ibaraki.jp", - "koga.ibaraki.jp", - "miho.ibaraki.jp", - "mito.ibaraki.jp", - "moriya.ibaraki.jp", - "naka.ibaraki.jp", - "namegata.ibaraki.jp", - "oarai.ibaraki.jp", - "ogawa.ibaraki.jp", - "omitama.ibaraki.jp", - "ryugasaki.ibaraki.jp", - "sakai.ibaraki.jp", - "sakuragawa.ibaraki.jp", - "shimodate.ibaraki.jp", - "shimotsuma.ibaraki.jp", - "shirosato.ibaraki.jp", - "sowa.ibaraki.jp", - "suifu.ibaraki.jp", - "takahagi.ibaraki.jp", - "tamatsukuri.ibaraki.jp", - "tokai.ibaraki.jp", - "tomobe.ibaraki.jp", - "tone.ibaraki.jp", - "toride.ibaraki.jp", - "tsuchiura.ibaraki.jp", - "tsukuba.ibaraki.jp", - "uchihara.ibaraki.jp", - "ushiku.ibaraki.jp", - "yachiyo.ibaraki.jp", - "yamagata.ibaraki.jp", - "yawara.ibaraki.jp", - "yuki.ibaraki.jp", - "anamizu.ishikawa.jp", - "hakui.ishikawa.jp", - "hakusan.ishikawa.jp", - "kaga.ishikawa.jp", - "kahoku.ishikawa.jp", - "kanazawa.ishikawa.jp", - "kawakita.ishikawa.jp", - "komatsu.ishikawa.jp", - "nakanoto.ishikawa.jp", - "nanao.ishikawa.jp", - "nomi.ishikawa.jp", - "nonoichi.ishikawa.jp", - "noto.ishikawa.jp", - "shika.ishikawa.jp", - "suzu.ishikawa.jp", - "tsubata.ishikawa.jp", - "tsurugi.ishikawa.jp", - "uchinada.ishikawa.jp", - "wajima.ishikawa.jp", - "fudai.iwate.jp", - "fujisawa.iwate.jp", - "hanamaki.iwate.jp", - "hiraizumi.iwate.jp", - "hirono.iwate.jp", - "ichinohe.iwate.jp", - "ichinoseki.iwate.jp", - "iwaizumi.iwate.jp", - "iwate.iwate.jp", - "joboji.iwate.jp", - "kamaishi.iwate.jp", - "kanegasaki.iwate.jp", - "karumai.iwate.jp", - "kawai.iwate.jp", - "kitakami.iwate.jp", - "kuji.iwate.jp", - "kunohe.iwate.jp", - "kuzumaki.iwate.jp", - "miyako.iwate.jp", - "mizusawa.iwate.jp", - "morioka.iwate.jp", - "ninohe.iwate.jp", - "noda.iwate.jp", - "ofunato.iwate.jp", - "oshu.iwate.jp", - "otsuchi.iwate.jp", - "rikuzentakata.iwate.jp", - "shiwa.iwate.jp", - "shizukuishi.iwate.jp", - "sumita.iwate.jp", - "tanohata.iwate.jp", - "tono.iwate.jp", - "yahaba.iwate.jp", - "yamada.iwate.jp", - "ayagawa.kagawa.jp", - "higashikagawa.kagawa.jp", - "kanonji.kagawa.jp", - "kotohira.kagawa.jp", - "manno.kagawa.jp", - "marugame.kagawa.jp", - "mitoyo.kagawa.jp", - "naoshima.kagawa.jp", - "sanuki.kagawa.jp", - "tadotsu.kagawa.jp", - "takamatsu.kagawa.jp", - "tonosho.kagawa.jp", - "uchinomi.kagawa.jp", - "utazu.kagawa.jp", - "zentsuji.kagawa.jp", - "akune.kagoshima.jp", - "amami.kagoshima.jp", - "hioki.kagoshima.jp", - "isa.kagoshima.jp", - "isen.kagoshima.jp", - "izumi.kagoshima.jp", - "kagoshima.kagoshima.jp", - "kanoya.kagoshima.jp", - "kawanabe.kagoshima.jp", - "kinko.kagoshima.jp", - "kouyama.kagoshima.jp", - "makurazaki.kagoshima.jp", - "matsumoto.kagoshima.jp", - "minamitane.kagoshima.jp", - "nakatane.kagoshima.jp", - "nishinoomote.kagoshima.jp", - "satsumasendai.kagoshima.jp", - "soo.kagoshima.jp", - "tarumizu.kagoshima.jp", - "yusui.kagoshima.jp", - "aikawa.kanagawa.jp", - "atsugi.kanagawa.jp", - "ayase.kanagawa.jp", - "chigasaki.kanagawa.jp", - "ebina.kanagawa.jp", - "fujisawa.kanagawa.jp", - "hadano.kanagawa.jp", - "hakone.kanagawa.jp", - "hiratsuka.kanagawa.jp", - "isehara.kanagawa.jp", - "kaisei.kanagawa.jp", - "kamakura.kanagawa.jp", - "kiyokawa.kanagawa.jp", - "matsuda.kanagawa.jp", - "minamiashigara.kanagawa.jp", - "miura.kanagawa.jp", - "nakai.kanagawa.jp", - "ninomiya.kanagawa.jp", - "odawara.kanagawa.jp", - "oi.kanagawa.jp", - "oiso.kanagawa.jp", - "sagamihara.kanagawa.jp", - "samukawa.kanagawa.jp", - "tsukui.kanagawa.jp", - "yamakita.kanagawa.jp", - "yamato.kanagawa.jp", - "yokosuka.kanagawa.jp", - "yugawara.kanagawa.jp", - "zama.kanagawa.jp", - "zushi.kanagawa.jp", - "aki.kochi.jp", - "geisei.kochi.jp", - "hidaka.kochi.jp", - "higashitsuno.kochi.jp", - "ino.kochi.jp", - "kagami.kochi.jp", - "kami.kochi.jp", - "kitagawa.kochi.jp", - "kochi.kochi.jp", - "mihara.kochi.jp", - "motoyama.kochi.jp", - "muroto.kochi.jp", - "nahari.kochi.jp", - "nakamura.kochi.jp", - "nankoku.kochi.jp", - "nishitosa.kochi.jp", - "niyodogawa.kochi.jp", - "ochi.kochi.jp", - "okawa.kochi.jp", - "otoyo.kochi.jp", - "otsuki.kochi.jp", - "sakawa.kochi.jp", - "sukumo.kochi.jp", - "susaki.kochi.jp", - "tosa.kochi.jp", - "tosashimizu.kochi.jp", - "toyo.kochi.jp", - "tsuno.kochi.jp", - "umaji.kochi.jp", - "yasuda.kochi.jp", - "yusuhara.kochi.jp", - "amakusa.kumamoto.jp", - "arao.kumamoto.jp", - "aso.kumamoto.jp", - "choyo.kumamoto.jp", - "gyokuto.kumamoto.jp", - "kamiamakusa.kumamoto.jp", - "kikuchi.kumamoto.jp", - "kumamoto.kumamoto.jp", - "mashiki.kumamoto.jp", - "mifune.kumamoto.jp", - "minamata.kumamoto.jp", - "minamioguni.kumamoto.jp", - "nagasu.kumamoto.jp", - "nishihara.kumamoto.jp", - "oguni.kumamoto.jp", - "ozu.kumamoto.jp", - "sumoto.kumamoto.jp", - "takamori.kumamoto.jp", - "uki.kumamoto.jp", - "uto.kumamoto.jp", - "yamaga.kumamoto.jp", - "yamato.kumamoto.jp", - "yatsushiro.kumamoto.jp", - "ayabe.kyoto.jp", - "fukuchiyama.kyoto.jp", - "higashiyama.kyoto.jp", - "ide.kyoto.jp", - "ine.kyoto.jp", - "joyo.kyoto.jp", - "kameoka.kyoto.jp", - "kamo.kyoto.jp", - "kita.kyoto.jp", - "kizu.kyoto.jp", - "kumiyama.kyoto.jp", - "kyotamba.kyoto.jp", - "kyotanabe.kyoto.jp", - "kyotango.kyoto.jp", - "maizuru.kyoto.jp", - "minami.kyoto.jp", - "minamiyamashiro.kyoto.jp", - "miyazu.kyoto.jp", - "muko.kyoto.jp", - "nagaokakyo.kyoto.jp", - "nakagyo.kyoto.jp", - "nantan.kyoto.jp", - "oyamazaki.kyoto.jp", - "sakyo.kyoto.jp", - "seika.kyoto.jp", - "tanabe.kyoto.jp", - "uji.kyoto.jp", - "ujitawara.kyoto.jp", - "wazuka.kyoto.jp", - "yamashina.kyoto.jp", - "yawata.kyoto.jp", - "asahi.mie.jp", - "inabe.mie.jp", - "ise.mie.jp", - "kameyama.mie.jp", - "kawagoe.mie.jp", - "kiho.mie.jp", - "kisosaki.mie.jp", - "kiwa.mie.jp", - "komono.mie.jp", - "kumano.mie.jp", - "kuwana.mie.jp", - "matsusaka.mie.jp", - "meiwa.mie.jp", - "mihama.mie.jp", - "minamiise.mie.jp", - "misugi.mie.jp", - "miyama.mie.jp", - "nabari.mie.jp", - "shima.mie.jp", - "suzuka.mie.jp", - "tado.mie.jp", - "taiki.mie.jp", - "taki.mie.jp", - "tamaki.mie.jp", - "toba.mie.jp", - "tsu.mie.jp", - "udono.mie.jp", - "ureshino.mie.jp", - "watarai.mie.jp", - "yokkaichi.mie.jp", - "furukawa.miyagi.jp", - "higashimatsushima.miyagi.jp", - "ishinomaki.miyagi.jp", - "iwanuma.miyagi.jp", - "kakuda.miyagi.jp", - "kami.miyagi.jp", - "kawasaki.miyagi.jp", - "marumori.miyagi.jp", - "matsushima.miyagi.jp", - "minamisanriku.miyagi.jp", - "misato.miyagi.jp", - "murata.miyagi.jp", - "natori.miyagi.jp", - "ogawara.miyagi.jp", - "ohira.miyagi.jp", - "onagawa.miyagi.jp", - "osaki.miyagi.jp", - "rifu.miyagi.jp", - "semine.miyagi.jp", - "shibata.miyagi.jp", - "shichikashuku.miyagi.jp", - "shikama.miyagi.jp", - "shiogama.miyagi.jp", - "shiroishi.miyagi.jp", - "tagajo.miyagi.jp", - "taiwa.miyagi.jp", - "tome.miyagi.jp", - "tomiya.miyagi.jp", - "wakuya.miyagi.jp", - "watari.miyagi.jp", - "yamamoto.miyagi.jp", - "zao.miyagi.jp", - "aya.miyazaki.jp", - "ebino.miyazaki.jp", - "gokase.miyazaki.jp", - "hyuga.miyazaki.jp", - "kadogawa.miyazaki.jp", - "kawaminami.miyazaki.jp", - "kijo.miyazaki.jp", - "kitagawa.miyazaki.jp", - "kitakata.miyazaki.jp", - "kitaura.miyazaki.jp", - "kobayashi.miyazaki.jp", - "kunitomi.miyazaki.jp", - "kushima.miyazaki.jp", - "mimata.miyazaki.jp", - "miyakonojo.miyazaki.jp", - "miyazaki.miyazaki.jp", - "morotsuka.miyazaki.jp", - "nichinan.miyazaki.jp", - "nishimera.miyazaki.jp", - "nobeoka.miyazaki.jp", - "saito.miyazaki.jp", - "shiiba.miyazaki.jp", - "shintomi.miyazaki.jp", - "takaharu.miyazaki.jp", - "takanabe.miyazaki.jp", - "takazaki.miyazaki.jp", - "tsuno.miyazaki.jp", - "achi.nagano.jp", - "agematsu.nagano.jp", - "anan.nagano.jp", - "aoki.nagano.jp", - "asahi.nagano.jp", - "azumino.nagano.jp", - "chikuhoku.nagano.jp", - "chikuma.nagano.jp", - "chino.nagano.jp", - "fujimi.nagano.jp", - "hakuba.nagano.jp", - "hara.nagano.jp", - "hiraya.nagano.jp", - "iida.nagano.jp", - "iijima.nagano.jp", - "iiyama.nagano.jp", - "iizuna.nagano.jp", - "ikeda.nagano.jp", - "ikusaka.nagano.jp", - "ina.nagano.jp", - "karuizawa.nagano.jp", - "kawakami.nagano.jp", - "kiso.nagano.jp", - "kisofukushima.nagano.jp", - "kitaaiki.nagano.jp", - "komagane.nagano.jp", - "komoro.nagano.jp", - "matsukawa.nagano.jp", - "matsumoto.nagano.jp", - "miasa.nagano.jp", - "minamiaiki.nagano.jp", - "minamimaki.nagano.jp", - "minamiminowa.nagano.jp", - "minowa.nagano.jp", - "miyada.nagano.jp", - "miyota.nagano.jp", - "mochizuki.nagano.jp", - "nagano.nagano.jp", - "nagawa.nagano.jp", - "nagiso.nagano.jp", - "nakagawa.nagano.jp", - "nakano.nagano.jp", - "nozawaonsen.nagano.jp", - "obuse.nagano.jp", - "ogawa.nagano.jp", - "okaya.nagano.jp", - "omachi.nagano.jp", - "omi.nagano.jp", - "ookuwa.nagano.jp", - "ooshika.nagano.jp", - "otaki.nagano.jp", - "otari.nagano.jp", - "sakae.nagano.jp", - "sakaki.nagano.jp", - "saku.nagano.jp", - "sakuho.nagano.jp", - "shimosuwa.nagano.jp", - "shinanomachi.nagano.jp", - "shiojiri.nagano.jp", - "suwa.nagano.jp", - "suzaka.nagano.jp", - "takagi.nagano.jp", - "takamori.nagano.jp", - "takayama.nagano.jp", - "tateshina.nagano.jp", - "tatsuno.nagano.jp", - "togakushi.nagano.jp", - "togura.nagano.jp", - "tomi.nagano.jp", - "ueda.nagano.jp", - "wada.nagano.jp", - "yamagata.nagano.jp", - "yamanouchi.nagano.jp", - "yasaka.nagano.jp", - "yasuoka.nagano.jp", - "chijiwa.nagasaki.jp", - "futsu.nagasaki.jp", - "goto.nagasaki.jp", - "hasami.nagasaki.jp", - "hirado.nagasaki.jp", - "iki.nagasaki.jp", - "isahaya.nagasaki.jp", - "kawatana.nagasaki.jp", - "kuchinotsu.nagasaki.jp", - "matsuura.nagasaki.jp", - "nagasaki.nagasaki.jp", - "obama.nagasaki.jp", - "omura.nagasaki.jp", - "oseto.nagasaki.jp", - "saikai.nagasaki.jp", - "sasebo.nagasaki.jp", - "seihi.nagasaki.jp", - "shimabara.nagasaki.jp", - "shinkamigoto.nagasaki.jp", - "togitsu.nagasaki.jp", - "tsushima.nagasaki.jp", - "unzen.nagasaki.jp", - "ando.nara.jp", - "gose.nara.jp", - "heguri.nara.jp", - "higashiyoshino.nara.jp", - "ikaruga.nara.jp", - "ikoma.nara.jp", - "kamikitayama.nara.jp", - "kanmaki.nara.jp", - "kashiba.nara.jp", - "kashihara.nara.jp", - "katsuragi.nara.jp", - "kawai.nara.jp", - "kawakami.nara.jp", - "kawanishi.nara.jp", - "koryo.nara.jp", - "kurotaki.nara.jp", - "mitsue.nara.jp", - "miyake.nara.jp", - "nara.nara.jp", - "nosegawa.nara.jp", - "oji.nara.jp", - "ouda.nara.jp", - "oyodo.nara.jp", - "sakurai.nara.jp", - "sango.nara.jp", - "shimoichi.nara.jp", - "shimokitayama.nara.jp", - "shinjo.nara.jp", - "soni.nara.jp", - "takatori.nara.jp", - "tawaramoto.nara.jp", - "tenkawa.nara.jp", - "tenri.nara.jp", - "uda.nara.jp", - "yamatokoriyama.nara.jp", - "yamatotakada.nara.jp", - "yamazoe.nara.jp", - "yoshino.nara.jp", - "aga.niigata.jp", - "agano.niigata.jp", - "gosen.niigata.jp", - "itoigawa.niigata.jp", - "izumozaki.niigata.jp", - "joetsu.niigata.jp", - "kamo.niigata.jp", - "kariwa.niigata.jp", - "kashiwazaki.niigata.jp", - "minamiuonuma.niigata.jp", - "mitsuke.niigata.jp", - "muika.niigata.jp", - "murakami.niigata.jp", - "myoko.niigata.jp", - "nagaoka.niigata.jp", - "niigata.niigata.jp", - "ojiya.niigata.jp", - "omi.niigata.jp", - "sado.niigata.jp", - "sanjo.niigata.jp", - "seiro.niigata.jp", - "seirou.niigata.jp", - "sekikawa.niigata.jp", - "shibata.niigata.jp", - "tagami.niigata.jp", - "tainai.niigata.jp", - "tochio.niigata.jp", - "tokamachi.niigata.jp", - "tsubame.niigata.jp", - "tsunan.niigata.jp", - "uonuma.niigata.jp", - "yahiko.niigata.jp", - "yoita.niigata.jp", - "yuzawa.niigata.jp", - "beppu.oita.jp", - "bungoono.oita.jp", - "bungotakada.oita.jp", - "hasama.oita.jp", - "hiji.oita.jp", - "himeshima.oita.jp", - "hita.oita.jp", - "kamitsue.oita.jp", - "kokonoe.oita.jp", - "kuju.oita.jp", - "kunisaki.oita.jp", - "kusu.oita.jp", - "oita.oita.jp", - "saiki.oita.jp", - "taketa.oita.jp", - "tsukumi.oita.jp", - "usa.oita.jp", - "usuki.oita.jp", - "yufu.oita.jp", - "akaiwa.okayama.jp", - "asakuchi.okayama.jp", - "bizen.okayama.jp", - "hayashima.okayama.jp", - "ibara.okayama.jp", - "kagamino.okayama.jp", - "kasaoka.okayama.jp", - "kibichuo.okayama.jp", - "kumenan.okayama.jp", - "kurashiki.okayama.jp", - "maniwa.okayama.jp", - "misaki.okayama.jp", - "nagi.okayama.jp", - "niimi.okayama.jp", - "nishiawakura.okayama.jp", - "okayama.okayama.jp", - "satosho.okayama.jp", - "setouchi.okayama.jp", - "shinjo.okayama.jp", - "shoo.okayama.jp", - "soja.okayama.jp", - "takahashi.okayama.jp", - "tamano.okayama.jp", - "tsuyama.okayama.jp", - "wake.okayama.jp", - "yakage.okayama.jp", - "aguni.okinawa.jp", - "ginowan.okinawa.jp", - "ginoza.okinawa.jp", - "gushikami.okinawa.jp", - "haebaru.okinawa.jp", - "higashi.okinawa.jp", - "hirara.okinawa.jp", - "iheya.okinawa.jp", - "ishigaki.okinawa.jp", - "ishikawa.okinawa.jp", - "itoman.okinawa.jp", - "izena.okinawa.jp", - "kadena.okinawa.jp", - "kin.okinawa.jp", - "kitadaito.okinawa.jp", - "kitanakagusuku.okinawa.jp", - "kumejima.okinawa.jp", - "kunigami.okinawa.jp", - "minamidaito.okinawa.jp", - "motobu.okinawa.jp", - "nago.okinawa.jp", - "naha.okinawa.jp", - "nakagusuku.okinawa.jp", - "nakijin.okinawa.jp", - "nanjo.okinawa.jp", - "nishihara.okinawa.jp", - "ogimi.okinawa.jp", - "okinawa.okinawa.jp", - "onna.okinawa.jp", - "shimoji.okinawa.jp", - "taketomi.okinawa.jp", - "tarama.okinawa.jp", - "tokashiki.okinawa.jp", - "tomigusuku.okinawa.jp", - "tonaki.okinawa.jp", - "urasoe.okinawa.jp", - "uruma.okinawa.jp", - "yaese.okinawa.jp", - "yomitan.okinawa.jp", - "yonabaru.okinawa.jp", - "yonaguni.okinawa.jp", - "zamami.okinawa.jp", - "abeno.osaka.jp", - "chihayaakasaka.osaka.jp", - "chuo.osaka.jp", - "daito.osaka.jp", - "fujiidera.osaka.jp", - "habikino.osaka.jp", - "hannan.osaka.jp", - "higashiosaka.osaka.jp", - "higashisumiyoshi.osaka.jp", - "higashiyodogawa.osaka.jp", - "hirakata.osaka.jp", - "ibaraki.osaka.jp", - "ikeda.osaka.jp", - "izumi.osaka.jp", - "izumiotsu.osaka.jp", - "izumisano.osaka.jp", - "kadoma.osaka.jp", - "kaizuka.osaka.jp", - "kanan.osaka.jp", - "kashiwara.osaka.jp", - "katano.osaka.jp", - "kawachinagano.osaka.jp", - "kishiwada.osaka.jp", - "kita.osaka.jp", - "kumatori.osaka.jp", - "matsubara.osaka.jp", - "minato.osaka.jp", - "minoh.osaka.jp", - "misaki.osaka.jp", - "moriguchi.osaka.jp", - "neyagawa.osaka.jp", - "nishi.osaka.jp", - "nose.osaka.jp", - "osakasayama.osaka.jp", - "sakai.osaka.jp", - "sayama.osaka.jp", - "sennan.osaka.jp", - "settsu.osaka.jp", - "shijonawate.osaka.jp", - "shimamoto.osaka.jp", - "suita.osaka.jp", - "tadaoka.osaka.jp", - "taishi.osaka.jp", - "tajiri.osaka.jp", - "takaishi.osaka.jp", - "takatsuki.osaka.jp", - "tondabayashi.osaka.jp", - "toyonaka.osaka.jp", - "toyono.osaka.jp", - "yao.osaka.jp", - "ariake.saga.jp", - "arita.saga.jp", - "fukudomi.saga.jp", - "genkai.saga.jp", - "hamatama.saga.jp", - "hizen.saga.jp", - "imari.saga.jp", - "kamimine.saga.jp", - "kanzaki.saga.jp", - "karatsu.saga.jp", - "kashima.saga.jp", - "kitagata.saga.jp", - "kitahata.saga.jp", - "kiyama.saga.jp", - "kouhoku.saga.jp", - "kyuragi.saga.jp", - "nishiarita.saga.jp", - "ogi.saga.jp", - "omachi.saga.jp", - "ouchi.saga.jp", - "saga.saga.jp", - "shiroishi.saga.jp", - "taku.saga.jp", - "tara.saga.jp", - "tosu.saga.jp", - "yoshinogari.saga.jp", - "arakawa.saitama.jp", - "asaka.saitama.jp", - "chichibu.saitama.jp", - "fujimi.saitama.jp", - "fujimino.saitama.jp", - "fukaya.saitama.jp", - "hanno.saitama.jp", - "hanyu.saitama.jp", - "hasuda.saitama.jp", - "hatogaya.saitama.jp", - "hatoyama.saitama.jp", - "hidaka.saitama.jp", - "higashichichibu.saitama.jp", - "higashimatsuyama.saitama.jp", - "honjo.saitama.jp", - "ina.saitama.jp", - "iruma.saitama.jp", - "iwatsuki.saitama.jp", - "kamiizumi.saitama.jp", - "kamikawa.saitama.jp", - "kamisato.saitama.jp", - "kasukabe.saitama.jp", - "kawagoe.saitama.jp", - "kawaguchi.saitama.jp", - "kawajima.saitama.jp", - "kazo.saitama.jp", - "kitamoto.saitama.jp", - "koshigaya.saitama.jp", - "kounosu.saitama.jp", - "kuki.saitama.jp", - "kumagaya.saitama.jp", - "matsubushi.saitama.jp", - "minano.saitama.jp", - "misato.saitama.jp", - "miyashiro.saitama.jp", - "miyoshi.saitama.jp", - "moroyama.saitama.jp", - "nagatoro.saitama.jp", - "namegawa.saitama.jp", - "niiza.saitama.jp", - "ogano.saitama.jp", - "ogawa.saitama.jp", - "ogose.saitama.jp", - "okegawa.saitama.jp", - "omiya.saitama.jp", - "otaki.saitama.jp", - "ranzan.saitama.jp", - "ryokami.saitama.jp", - "saitama.saitama.jp", - "sakado.saitama.jp", - "satte.saitama.jp", - "sayama.saitama.jp", - "shiki.saitama.jp", - "shiraoka.saitama.jp", - "soka.saitama.jp", - "sugito.saitama.jp", - "toda.saitama.jp", - "tokigawa.saitama.jp", - "tokorozawa.saitama.jp", - "tsurugashima.saitama.jp", - "urawa.saitama.jp", - "warabi.saitama.jp", - "yashio.saitama.jp", - "yokoze.saitama.jp", - "yono.saitama.jp", - "yorii.saitama.jp", - "yoshida.saitama.jp", - "yoshikawa.saitama.jp", - "yoshimi.saitama.jp", - "aisho.shiga.jp", - "gamo.shiga.jp", - "higashiomi.shiga.jp", - "hikone.shiga.jp", - "koka.shiga.jp", - "konan.shiga.jp", - "kosei.shiga.jp", - "koto.shiga.jp", - "kusatsu.shiga.jp", - "maibara.shiga.jp", - "moriyama.shiga.jp", - "nagahama.shiga.jp", - "nishiazai.shiga.jp", - "notogawa.shiga.jp", - "omihachiman.shiga.jp", - "otsu.shiga.jp", - "ritto.shiga.jp", - "ryuoh.shiga.jp", - "takashima.shiga.jp", - "takatsuki.shiga.jp", - "torahime.shiga.jp", - "toyosato.shiga.jp", - "yasu.shiga.jp", - "akagi.shimane.jp", - "ama.shimane.jp", - "gotsu.shimane.jp", - "hamada.shimane.jp", - "higashiizumo.shimane.jp", - "hikawa.shimane.jp", - "hikimi.shimane.jp", - "izumo.shimane.jp", - "kakinoki.shimane.jp", - "masuda.shimane.jp", - "matsue.shimane.jp", - "misato.shimane.jp", - "nishinoshima.shimane.jp", - "ohda.shimane.jp", - "okinoshima.shimane.jp", - "okuizumo.shimane.jp", - "shimane.shimane.jp", - "tamayu.shimane.jp", - "tsuwano.shimane.jp", - "unnan.shimane.jp", - "yakumo.shimane.jp", - "yasugi.shimane.jp", - "yatsuka.shimane.jp", - "arai.shizuoka.jp", - "atami.shizuoka.jp", - "fuji.shizuoka.jp", - "fujieda.shizuoka.jp", - "fujikawa.shizuoka.jp", - "fujinomiya.shizuoka.jp", - "fukuroi.shizuoka.jp", - "gotemba.shizuoka.jp", - "haibara.shizuoka.jp", - "hamamatsu.shizuoka.jp", - "higashiizu.shizuoka.jp", - "ito.shizuoka.jp", - "iwata.shizuoka.jp", - "izu.shizuoka.jp", - "izunokuni.shizuoka.jp", - "kakegawa.shizuoka.jp", - "kannami.shizuoka.jp", - "kawanehon.shizuoka.jp", - "kawazu.shizuoka.jp", - "kikugawa.shizuoka.jp", - "kosai.shizuoka.jp", - "makinohara.shizuoka.jp", - "matsuzaki.shizuoka.jp", - "minamiizu.shizuoka.jp", - "mishima.shizuoka.jp", - "morimachi.shizuoka.jp", - "nishiizu.shizuoka.jp", - "numazu.shizuoka.jp", - "omaezaki.shizuoka.jp", - "shimada.shizuoka.jp", - "shimizu.shizuoka.jp", - "shimoda.shizuoka.jp", - "shizuoka.shizuoka.jp", - "susono.shizuoka.jp", - "yaizu.shizuoka.jp", - "yoshida.shizuoka.jp", - "ashikaga.tochigi.jp", - "bato.tochigi.jp", - "haga.tochigi.jp", - "ichikai.tochigi.jp", - "iwafune.tochigi.jp", - "kaminokawa.tochigi.jp", - "kanuma.tochigi.jp", - "karasuyama.tochigi.jp", - "kuroiso.tochigi.jp", - "mashiko.tochigi.jp", - "mibu.tochigi.jp", - "moka.tochigi.jp", - "motegi.tochigi.jp", - "nasu.tochigi.jp", - "nasushiobara.tochigi.jp", - "nikko.tochigi.jp", - "nishikata.tochigi.jp", - "nogi.tochigi.jp", - "ohira.tochigi.jp", - "ohtawara.tochigi.jp", - "oyama.tochigi.jp", - "sakura.tochigi.jp", - "sano.tochigi.jp", - "shimotsuke.tochigi.jp", - "shioya.tochigi.jp", - "takanezawa.tochigi.jp", - "tochigi.tochigi.jp", - "tsuga.tochigi.jp", - "ujiie.tochigi.jp", - "utsunomiya.tochigi.jp", - "yaita.tochigi.jp", - "aizumi.tokushima.jp", - "anan.tokushima.jp", - "ichiba.tokushima.jp", - "itano.tokushima.jp", - "kainan.tokushima.jp", - "komatsushima.tokushima.jp", - "matsushige.tokushima.jp", - "mima.tokushima.jp", - "minami.tokushima.jp", - "miyoshi.tokushima.jp", - "mugi.tokushima.jp", - "nakagawa.tokushima.jp", - "naruto.tokushima.jp", - "sanagochi.tokushima.jp", - "shishikui.tokushima.jp", - "tokushima.tokushima.jp", - "wajiki.tokushima.jp", - "adachi.tokyo.jp", - "akiruno.tokyo.jp", - "akishima.tokyo.jp", - "aogashima.tokyo.jp", - "arakawa.tokyo.jp", - "bunkyo.tokyo.jp", - "chiyoda.tokyo.jp", - "chofu.tokyo.jp", - "chuo.tokyo.jp", - "edogawa.tokyo.jp", - "fuchu.tokyo.jp", - "fussa.tokyo.jp", - "hachijo.tokyo.jp", - "hachioji.tokyo.jp", - "hamura.tokyo.jp", - "higashikurume.tokyo.jp", - "higashimurayama.tokyo.jp", - "higashiyamato.tokyo.jp", - "hino.tokyo.jp", - "hinode.tokyo.jp", - "hinohara.tokyo.jp", - "inagi.tokyo.jp", - "itabashi.tokyo.jp", - "katsushika.tokyo.jp", - "kita.tokyo.jp", - "kiyose.tokyo.jp", - "kodaira.tokyo.jp", - "koganei.tokyo.jp", - "kokubunji.tokyo.jp", - "komae.tokyo.jp", - "koto.tokyo.jp", - "kouzushima.tokyo.jp", - "kunitachi.tokyo.jp", - "machida.tokyo.jp", - "meguro.tokyo.jp", - "minato.tokyo.jp", - "mitaka.tokyo.jp", - "mizuho.tokyo.jp", - "musashimurayama.tokyo.jp", - "musashino.tokyo.jp", - "nakano.tokyo.jp", - "nerima.tokyo.jp", - "ogasawara.tokyo.jp", - "okutama.tokyo.jp", - "ome.tokyo.jp", - "oshima.tokyo.jp", - "ota.tokyo.jp", - "setagaya.tokyo.jp", - "shibuya.tokyo.jp", - "shinagawa.tokyo.jp", - "shinjuku.tokyo.jp", - "suginami.tokyo.jp", - "sumida.tokyo.jp", - "tachikawa.tokyo.jp", - "taito.tokyo.jp", - "tama.tokyo.jp", - "toshima.tokyo.jp", - "chizu.tottori.jp", - "hino.tottori.jp", - "kawahara.tottori.jp", - "koge.tottori.jp", - "kotoura.tottori.jp", - "misasa.tottori.jp", - "nanbu.tottori.jp", - "nichinan.tottori.jp", - "sakaiminato.tottori.jp", - "tottori.tottori.jp", - "wakasa.tottori.jp", - "yazu.tottori.jp", - "yonago.tottori.jp", - "asahi.toyama.jp", - "fuchu.toyama.jp", - "fukumitsu.toyama.jp", - "funahashi.toyama.jp", - "himi.toyama.jp", - "imizu.toyama.jp", - "inami.toyama.jp", - "johana.toyama.jp", - "kamiichi.toyama.jp", - "kurobe.toyama.jp", - "nakaniikawa.toyama.jp", - "namerikawa.toyama.jp", - "nanto.toyama.jp", - "nyuzen.toyama.jp", - "oyabe.toyama.jp", - "taira.toyama.jp", - "takaoka.toyama.jp", - "tateyama.toyama.jp", - "toga.toyama.jp", - "tonami.toyama.jp", - "toyama.toyama.jp", - "unazuki.toyama.jp", - "uozu.toyama.jp", - "yamada.toyama.jp", - "arida.wakayama.jp", - "aridagawa.wakayama.jp", - "gobo.wakayama.jp", - "hashimoto.wakayama.jp", - "hidaka.wakayama.jp", - "hirogawa.wakayama.jp", - "inami.wakayama.jp", - "iwade.wakayama.jp", - "kainan.wakayama.jp", - "kamitonda.wakayama.jp", - "katsuragi.wakayama.jp", - "kimino.wakayama.jp", - "kinokawa.wakayama.jp", - "kitayama.wakayama.jp", - "koya.wakayama.jp", - "koza.wakayama.jp", - "kozagawa.wakayama.jp", - "kudoyama.wakayama.jp", - "kushimoto.wakayama.jp", - "mihama.wakayama.jp", - "misato.wakayama.jp", - "nachikatsuura.wakayama.jp", - "shingu.wakayama.jp", - "shirahama.wakayama.jp", - "taiji.wakayama.jp", - "tanabe.wakayama.jp", - "wakayama.wakayama.jp", - "yuasa.wakayama.jp", - "yura.wakayama.jp", - "asahi.yamagata.jp", - "funagata.yamagata.jp", - "higashine.yamagata.jp", - "iide.yamagata.jp", - "kahoku.yamagata.jp", - "kaminoyama.yamagata.jp", - "kaneyama.yamagata.jp", - "kawanishi.yamagata.jp", - "mamurogawa.yamagata.jp", - "mikawa.yamagata.jp", - "murayama.yamagata.jp", - "nagai.yamagata.jp", - "nakayama.yamagata.jp", - "nanyo.yamagata.jp", - "nishikawa.yamagata.jp", - "obanazawa.yamagata.jp", - "oe.yamagata.jp", - "oguni.yamagata.jp", - "ohkura.yamagata.jp", - "oishida.yamagata.jp", - "sagae.yamagata.jp", - "sakata.yamagata.jp", - "sakegawa.yamagata.jp", - "shinjo.yamagata.jp", - "shirataka.yamagata.jp", - "shonai.yamagata.jp", - "takahata.yamagata.jp", - "tendo.yamagata.jp", - "tozawa.yamagata.jp", - "tsuruoka.yamagata.jp", - "yamagata.yamagata.jp", - "yamanobe.yamagata.jp", - "yonezawa.yamagata.jp", - "yuza.yamagata.jp", - "abu.yamaguchi.jp", - "hagi.yamaguchi.jp", - "hikari.yamaguchi.jp", - "hofu.yamaguchi.jp", - "iwakuni.yamaguchi.jp", - "kudamatsu.yamaguchi.jp", - "mitou.yamaguchi.jp", - "nagato.yamaguchi.jp", - "oshima.yamaguchi.jp", - "shimonoseki.yamaguchi.jp", - "shunan.yamaguchi.jp", - "tabuse.yamaguchi.jp", - "tokuyama.yamaguchi.jp", - "toyota.yamaguchi.jp", - "ube.yamaguchi.jp", - "yuu.yamaguchi.jp", - "chuo.yamanashi.jp", - "doshi.yamanashi.jp", - "fuefuki.yamanashi.jp", - "fujikawa.yamanashi.jp", - "fujikawaguchiko.yamanashi.jp", - "fujiyoshida.yamanashi.jp", - "hayakawa.yamanashi.jp", - "hokuto.yamanashi.jp", - "ichikawamisato.yamanashi.jp", - "kai.yamanashi.jp", - "kofu.yamanashi.jp", - "koshu.yamanashi.jp", - "kosuge.yamanashi.jp", - "minami-alps.yamanashi.jp", - "minobu.yamanashi.jp", - "nakamichi.yamanashi.jp", - "nanbu.yamanashi.jp", - "narusawa.yamanashi.jp", - "nirasaki.yamanashi.jp", - "nishikatsura.yamanashi.jp", - "oshino.yamanashi.jp", - "otsuki.yamanashi.jp", - "showa.yamanashi.jp", - "tabayama.yamanashi.jp", - "tsuru.yamanashi.jp", - "uenohara.yamanashi.jp", - "yamanakako.yamanashi.jp", - "yamanashi.yamanashi.jp", - "*.ke", - "kg", - "org.kg", - "net.kg", - "com.kg", - "edu.kg", - "gov.kg", - "mil.kg", - "*.kh", - "ki", - "edu.ki", - "biz.ki", - "net.ki", - "org.ki", - "gov.ki", - "info.ki", - "com.ki", - "km", - "org.km", - "nom.km", - "gov.km", - "prd.km", - "tm.km", - "edu.km", - "mil.km", - "ass.km", - "com.km", - "coop.km", - "asso.km", - "presse.km", - "medecin.km", - "notaires.km", - "pharmaciens.km", - "veterinaire.km", - "gouv.km", - "kn", - "net.kn", - "org.kn", - "edu.kn", - "gov.kn", - "kp", - "com.kp", - "edu.kp", - "gov.kp", - "org.kp", - "rep.kp", - "tra.kp", - "kr", - "ac.kr", - "co.kr", - "es.kr", - "go.kr", - "hs.kr", - "kg.kr", - "mil.kr", - "ms.kr", - "ne.kr", - "or.kr", - "pe.kr", - "re.kr", - "sc.kr", - "busan.kr", - "chungbuk.kr", - "chungnam.kr", - "daegu.kr", - "daejeon.kr", - "gangwon.kr", - "gwangju.kr", - "gyeongbuk.kr", - "gyeonggi.kr", - "gyeongnam.kr", - "incheon.kr", - "jeju.kr", - "jeonbuk.kr", - "jeonnam.kr", - "seoul.kr", - "ulsan.kr", - "*.kw", - "ky", - "edu.ky", - "gov.ky", - "com.ky", - "org.ky", - "net.ky", - "kz", - "org.kz", - "edu.kz", - "net.kz", - "gov.kz", - "mil.kz", - "com.kz", - "la", - "int.la", - "net.la", - "info.la", - "edu.la", - "gov.la", - "per.la", - "com.la", - "org.la", - "lb", - "com.lb", - "edu.lb", - "gov.lb", - "net.lb", - "org.lb", - "lc", - "com.lc", - "net.lc", - "co.lc", - "org.lc", - "edu.lc", - "gov.lc", - "li", - "lk", - "gov.lk", - "sch.lk", - "net.lk", - "int.lk", - "com.lk", - "org.lk", - "edu.lk", - "ngo.lk", - "soc.lk", - "web.lk", - "ltd.lk", - "assn.lk", - "grp.lk", - "hotel.lk", - "ac.lk", - "lr", - "com.lr", - "edu.lr", - "gov.lr", - "org.lr", - "net.lr", - "ls", - "co.ls", - "org.ls", - "lt", - "gov.lt", - "lu", - "lv", - "com.lv", - "edu.lv", - "gov.lv", - "org.lv", - "mil.lv", - "id.lv", - "net.lv", - "asn.lv", - "conf.lv", - "ly", - "com.ly", - "net.ly", - "gov.ly", - "plc.ly", - "edu.ly", - "sch.ly", - "med.ly", - "org.ly", - "id.ly", - "ma", - "co.ma", - "net.ma", - "gov.ma", - "org.ma", - "ac.ma", - "press.ma", - "mc", - "tm.mc", - "asso.mc", - "md", - "me", - "co.me", - "net.me", - "org.me", - "edu.me", - "ac.me", - "gov.me", - "its.me", - "priv.me", - "mg", - "org.mg", - "nom.mg", - "gov.mg", - "prd.mg", - "tm.mg", - "edu.mg", - "mil.mg", - "com.mg", - "co.mg", - "mh", - "mil", - "mk", - "com.mk", - "org.mk", - "net.mk", - "edu.mk", - "gov.mk", - "inf.mk", - "name.mk", - "ml", - "com.ml", - "edu.ml", - "gouv.ml", - "gov.ml", - "net.ml", - "org.ml", - "presse.ml", - "*.mm", - "mn", - "gov.mn", - "edu.mn", - "org.mn", - "mo", - "com.mo", - "net.mo", - "org.mo", - "edu.mo", - "gov.mo", - "mobi", - "mp", - "mq", - "mr", - "gov.mr", - "ms", - "com.ms", - "edu.ms", - "gov.ms", - "net.ms", - "org.ms", - "mt", - "com.mt", - "edu.mt", - "net.mt", - "org.mt", - "mu", - "com.mu", - "net.mu", - "org.mu", - "gov.mu", - "ac.mu", - "co.mu", - "or.mu", - "museum", - "academy.museum", - "agriculture.museum", - "air.museum", - "airguard.museum", - "alabama.museum", - "alaska.museum", - "amber.museum", - "ambulance.museum", - "american.museum", - "americana.museum", - "americanantiques.museum", - "americanart.museum", - "amsterdam.museum", - "and.museum", - "annefrank.museum", - "anthro.museum", - "anthropology.museum", - "antiques.museum", - "aquarium.museum", - "arboretum.museum", - "archaeological.museum", - "archaeology.museum", - "architecture.museum", - "art.museum", - "artanddesign.museum", - "artcenter.museum", - "artdeco.museum", - "arteducation.museum", - "artgallery.museum", - "arts.museum", - "artsandcrafts.museum", - "asmatart.museum", - "assassination.museum", - "assisi.museum", - "association.museum", - "astronomy.museum", - "atlanta.museum", - "austin.museum", - "australia.museum", - "automotive.museum", - "aviation.museum", - "axis.museum", - "badajoz.museum", - "baghdad.museum", - "bahn.museum", - "bale.museum", - "baltimore.museum", - "barcelona.museum", - "baseball.museum", - "basel.museum", - "baths.museum", - "bauern.museum", - "beauxarts.museum", - "beeldengeluid.museum", - "bellevue.museum", - "bergbau.museum", - "berkeley.museum", - "berlin.museum", - "bern.museum", - "bible.museum", - "bilbao.museum", - "bill.museum", - "birdart.museum", - "birthplace.museum", - "bonn.museum", - "boston.museum", - "botanical.museum", - "botanicalgarden.museum", - "botanicgarden.museum", - "botany.museum", - "brandywinevalley.museum", - "brasil.museum", - "bristol.museum", - "british.museum", - "britishcolumbia.museum", - "broadcast.museum", - "brunel.museum", - "brussel.museum", - "brussels.museum", - "bruxelles.museum", - "building.museum", - "burghof.museum", - "bus.museum", - "bushey.museum", - "cadaques.museum", - "california.museum", - "cambridge.museum", - "can.museum", - "canada.museum", - "capebreton.museum", - "carrier.museum", - "cartoonart.museum", - "casadelamoneda.museum", - "castle.museum", - "castres.museum", - "celtic.museum", - "center.museum", - "chattanooga.museum", - "cheltenham.museum", - "chesapeakebay.museum", - "chicago.museum", - "children.museum", - "childrens.museum", - "childrensgarden.museum", - "chiropractic.museum", - "chocolate.museum", - "christiansburg.museum", - "cincinnati.museum", - "cinema.museum", - "circus.museum", - "civilisation.museum", - "civilization.museum", - "civilwar.museum", - "clinton.museum", - "clock.museum", - "coal.museum", - "coastaldefence.museum", - "cody.museum", - "coldwar.museum", - "collection.museum", - "colonialwilliamsburg.museum", - "coloradoplateau.museum", - "columbia.museum", - "columbus.museum", - "communication.museum", - "communications.museum", - "community.museum", - "computer.museum", - "computerhistory.museum", - "xn--comunicaes-v6a2o.museum", - "contemporary.museum", - "contemporaryart.museum", - "convent.museum", - "copenhagen.museum", - "corporation.museum", - "xn--correios-e-telecomunicaes-ghc29a.museum", - "corvette.museum", - "costume.museum", - "countryestate.museum", - "county.museum", - "crafts.museum", - "cranbrook.museum", - "creation.museum", - "cultural.museum", - "culturalcenter.museum", - "culture.museum", - "cyber.museum", - "cymru.museum", - "dali.museum", - "dallas.museum", - "database.museum", - "ddr.museum", - "decorativearts.museum", - "delaware.museum", - "delmenhorst.museum", - "denmark.museum", - "depot.museum", - "design.museum", - "detroit.museum", - "dinosaur.museum", - "discovery.museum", - "dolls.museum", - "donostia.museum", - "durham.museum", - "eastafrica.museum", - "eastcoast.museum", - "education.museum", - "educational.museum", - "egyptian.museum", - "eisenbahn.museum", - "elburg.museum", - "elvendrell.museum", - "embroidery.museum", - "encyclopedic.museum", - "england.museum", - "entomology.museum", - "environment.museum", - "environmentalconservation.museum", - "epilepsy.museum", - "essex.museum", - "estate.museum", - "ethnology.museum", - "exeter.museum", - "exhibition.museum", - "family.museum", - "farm.museum", - "farmequipment.museum", - "farmers.museum", - "farmstead.museum", - "field.museum", - "figueres.museum", - "filatelia.museum", - "film.museum", - "fineart.museum", - "finearts.museum", - "finland.museum", - "flanders.museum", - "florida.museum", - "force.museum", - "fortmissoula.museum", - "fortworth.museum", - "foundation.museum", - "francaise.museum", - "frankfurt.museum", - "franziskaner.museum", - "freemasonry.museum", - "freiburg.museum", - "fribourg.museum", - "frog.museum", - "fundacio.museum", - "furniture.museum", - "gallery.museum", - "garden.museum", - "gateway.museum", - "geelvinck.museum", - "gemological.museum", - "geology.museum", - "georgia.museum", - "giessen.museum", - "glas.museum", - "glass.museum", - "gorge.museum", - "grandrapids.museum", - "graz.museum", - "guernsey.museum", - "halloffame.museum", - "hamburg.museum", - "handson.museum", - "harvestcelebration.museum", - "hawaii.museum", - "health.museum", - "heimatunduhren.museum", - "hellas.museum", - "helsinki.museum", - "hembygdsforbund.museum", - "heritage.museum", - "histoire.museum", - "historical.museum", - "historicalsociety.museum", - "historichouses.museum", - "historisch.museum", - "historisches.museum", - "history.museum", - "historyofscience.museum", - "horology.museum", - "house.museum", - "humanities.museum", - "illustration.museum", - "imageandsound.museum", - "indian.museum", - "indiana.museum", - "indianapolis.museum", - "indianmarket.museum", - "intelligence.museum", - "interactive.museum", - "iraq.museum", - "iron.museum", - "isleofman.museum", - "jamison.museum", - "jefferson.museum", - "jerusalem.museum", - "jewelry.museum", - "jewish.museum", - "jewishart.museum", - "jfk.museum", - "journalism.museum", - "judaica.museum", - "judygarland.museum", - "juedisches.museum", - "juif.museum", - "karate.museum", - "karikatur.museum", - "kids.museum", - "koebenhavn.museum", - "koeln.museum", - "kunst.museum", - "kunstsammlung.museum", - "kunstunddesign.museum", - "labor.museum", - "labour.museum", - "lajolla.museum", - "lancashire.museum", - "landes.museum", - "lans.museum", - "xn--lns-qla.museum", - "larsson.museum", - "lewismiller.museum", - "lincoln.museum", - "linz.museum", - "living.museum", - "livinghistory.museum", - "localhistory.museum", - "london.museum", - "losangeles.museum", - "louvre.museum", - "loyalist.museum", - "lucerne.museum", - "luxembourg.museum", - "luzern.museum", - "mad.museum", - "madrid.museum", - "mallorca.museum", - "manchester.museum", - "mansion.museum", - "mansions.museum", - "manx.museum", - "marburg.museum", - "maritime.museum", - "maritimo.museum", - "maryland.museum", - "marylhurst.museum", - "media.museum", - "medical.museum", - "medizinhistorisches.museum", - "meeres.museum", - "memorial.museum", - "mesaverde.museum", - "michigan.museum", - "midatlantic.museum", - "military.museum", - "mill.museum", - "miners.museum", - "mining.museum", - "minnesota.museum", - "missile.museum", - "missoula.museum", - "modern.museum", - "moma.museum", - "money.museum", - "monmouth.museum", - "monticello.museum", - "montreal.museum", - "moscow.museum", - "motorcycle.museum", - "muenchen.museum", - "muenster.museum", - "mulhouse.museum", - "muncie.museum", - "museet.museum", - "museumcenter.museum", - "museumvereniging.museum", - "music.museum", - "national.museum", - "nationalfirearms.museum", - "nationalheritage.museum", - "nativeamerican.museum", - "naturalhistory.museum", - "naturalhistorymuseum.museum", - "naturalsciences.museum", - "nature.museum", - "naturhistorisches.museum", - "natuurwetenschappen.museum", - "naumburg.museum", - "naval.museum", - "nebraska.museum", - "neues.museum", - "newhampshire.museum", - "newjersey.museum", - "newmexico.museum", - "newport.museum", - "newspaper.museum", - "newyork.museum", - "niepce.museum", - "norfolk.museum", - "north.museum", - "nrw.museum", - "nuernberg.museum", - "nuremberg.museum", - "nyc.museum", - "nyny.museum", - "oceanographic.museum", - "oceanographique.museum", - "omaha.museum", - "online.museum", - "ontario.museum", - "openair.museum", - "oregon.museum", - "oregontrail.museum", - "otago.museum", - "oxford.museum", - "pacific.museum", - "paderborn.museum", - "palace.museum", - "paleo.museum", - "palmsprings.museum", - "panama.museum", - "paris.museum", - "pasadena.museum", - "pharmacy.museum", - "philadelphia.museum", - "philadelphiaarea.museum", - "philately.museum", - "phoenix.museum", - "photography.museum", - "pilots.museum", - "pittsburgh.museum", - "planetarium.museum", - "plantation.museum", - "plants.museum", - "plaza.museum", - "portal.museum", - "portland.museum", - "portlligat.museum", - "posts-and-telecommunications.museum", - "preservation.museum", - "presidio.museum", - "press.museum", - "project.museum", - "public.museum", - "pubol.museum", - "quebec.museum", - "railroad.museum", - "railway.museum", - "research.museum", - "resistance.museum", - "riodejaneiro.museum", - "rochester.museum", - "rockart.museum", - "roma.museum", - "russia.museum", - "saintlouis.museum", - "salem.museum", - "salvadordali.museum", - "salzburg.museum", - "sandiego.museum", - "sanfrancisco.museum", - "santabarbara.museum", - "santacruz.museum", - "santafe.museum", - "saskatchewan.museum", - "satx.museum", - "savannahga.museum", - "schlesisches.museum", - "schoenbrunn.museum", - "schokoladen.museum", - "school.museum", - "schweiz.museum", - "science.museum", - "scienceandhistory.museum", - "scienceandindustry.museum", - "sciencecenter.museum", - "sciencecenters.museum", - "science-fiction.museum", - "sciencehistory.museum", - "sciences.museum", - "sciencesnaturelles.museum", - "scotland.museum", - "seaport.museum", - "settlement.museum", - "settlers.museum", - "shell.museum", - "sherbrooke.museum", - "sibenik.museum", - "silk.museum", - "ski.museum", - "skole.museum", - "society.museum", - "sologne.museum", - "soundandvision.museum", - "southcarolina.museum", - "southwest.museum", - "space.museum", - "spy.museum", - "square.museum", - "stadt.museum", - "stalbans.museum", - "starnberg.museum", - "state.museum", - "stateofdelaware.museum", - "station.museum", - "steam.museum", - "steiermark.museum", - "stjohn.museum", - "stockholm.museum", - "stpetersburg.museum", - "stuttgart.museum", - "suisse.museum", - "surgeonshall.museum", - "surrey.museum", - "svizzera.museum", - "sweden.museum", - "sydney.museum", - "tank.museum", - "tcm.museum", - "technology.museum", - "telekommunikation.museum", - "television.museum", - "texas.museum", - "textile.museum", - "theater.museum", - "time.museum", - "timekeeping.museum", - "topology.museum", - "torino.museum", - "touch.museum", - "town.museum", - "transport.museum", - "tree.museum", - "trolley.museum", - "trust.museum", - "trustee.museum", - "uhren.museum", - "ulm.museum", - "undersea.museum", - "university.museum", - "usa.museum", - "usantiques.museum", - "usarts.museum", - "uscountryestate.museum", - "usculture.museum", - "usdecorativearts.museum", - "usgarden.museum", - "ushistory.museum", - "ushuaia.museum", - "uslivinghistory.museum", - "utah.museum", - "uvic.museum", - "valley.museum", - "vantaa.museum", - "versailles.museum", - "viking.museum", - "village.museum", - "virginia.museum", - "virtual.museum", - "virtuel.museum", - "vlaanderen.museum", - "volkenkunde.museum", - "wales.museum", - "wallonie.museum", - "war.museum", - "washingtondc.museum", - "watchandclock.museum", - "watch-and-clock.museum", - "western.museum", - "westfalen.museum", - "whaling.museum", - "wildlife.museum", - "williamsburg.museum", - "windmill.museum", - "workshop.museum", - "york.museum", - "yorkshire.museum", - "yosemite.museum", - "youth.museum", - "zoological.museum", - "zoology.museum", - "xn--9dbhblg6di.museum", - "xn--h1aegh.museum", - "mv", - "aero.mv", - "biz.mv", - "com.mv", - "coop.mv", - "edu.mv", - "gov.mv", - "info.mv", - "int.mv", - "mil.mv", - "museum.mv", - "name.mv", - "net.mv", - "org.mv", - "pro.mv", - "mw", - "ac.mw", - "biz.mw", - "co.mw", - "com.mw", - "coop.mw", - "edu.mw", - "gov.mw", - "int.mw", - "museum.mw", - "net.mw", - "org.mw", - "mx", - "com.mx", - "org.mx", - "gob.mx", - "edu.mx", - "net.mx", - "my", - "com.my", - "net.my", - "org.my", - "gov.my", - "edu.my", - "mil.my", - "name.my", - "mz", - "ac.mz", - "adv.mz", - "co.mz", - "edu.mz", - "gov.mz", - "mil.mz", - "net.mz", - "org.mz", - "na", - "info.na", - "pro.na", - "name.na", - "school.na", - "or.na", - "dr.na", - "us.na", - "mx.na", - "ca.na", - "in.na", - "cc.na", - "tv.na", - "ws.na", - "mobi.na", - "co.na", - "com.na", - "org.na", - "name", - "nc", - "asso.nc", - "nom.nc", - "ne", - "net", - "nf", - "com.nf", - "net.nf", - "per.nf", - "rec.nf", - "web.nf", - "arts.nf", - "firm.nf", - "info.nf", - "other.nf", - "store.nf", - "ng", - "com.ng", - "edu.ng", - "gov.ng", - "i.ng", - "mil.ng", - "mobi.ng", - "name.ng", - "net.ng", - "org.ng", - "sch.ng", - "ni", - "ac.ni", - "biz.ni", - "co.ni", - "com.ni", - "edu.ni", - "gob.ni", - "in.ni", - "info.ni", - "int.ni", - "mil.ni", - "net.ni", - "nom.ni", - "org.ni", - "web.ni", - "nl", - "bv.nl", - "no", - "fhs.no", - "vgs.no", - "fylkesbibl.no", - "folkebibl.no", - "museum.no", - "idrett.no", - "priv.no", - "mil.no", - "stat.no", - "dep.no", - "kommune.no", - "herad.no", - "aa.no", - "ah.no", - "bu.no", - "fm.no", - "hl.no", - "hm.no", - "jan-mayen.no", - "mr.no", - "nl.no", - "nt.no", - "of.no", - "ol.no", - "oslo.no", - "rl.no", - "sf.no", - "st.no", - "svalbard.no", - "tm.no", - "tr.no", - "va.no", - "vf.no", - "gs.aa.no", - "gs.ah.no", - "gs.bu.no", - "gs.fm.no", - "gs.hl.no", - "gs.hm.no", - "gs.jan-mayen.no", - "gs.mr.no", - "gs.nl.no", - "gs.nt.no", - "gs.of.no", - "gs.ol.no", - "gs.oslo.no", - "gs.rl.no", - "gs.sf.no", - "gs.st.no", - "gs.svalbard.no", - "gs.tm.no", - "gs.tr.no", - "gs.va.no", - "gs.vf.no", - "akrehamn.no", - "xn--krehamn-dxa.no", - "algard.no", - "xn--lgrd-poac.no", - "arna.no", - "brumunddal.no", - "bryne.no", - "bronnoysund.no", - "xn--brnnysund-m8ac.no", - "drobak.no", - "xn--drbak-wua.no", - "egersund.no", - "fetsund.no", - "floro.no", - "xn--flor-jra.no", - "fredrikstad.no", - "hokksund.no", - "honefoss.no", - "xn--hnefoss-q1a.no", - "jessheim.no", - "jorpeland.no", - "xn--jrpeland-54a.no", - "kirkenes.no", - "kopervik.no", - "krokstadelva.no", - "langevag.no", - "xn--langevg-jxa.no", - "leirvik.no", - "mjondalen.no", - "xn--mjndalen-64a.no", - "mo-i-rana.no", - "mosjoen.no", - "xn--mosjen-eya.no", - "nesoddtangen.no", - "orkanger.no", - "osoyro.no", - "xn--osyro-wua.no", - "raholt.no", - "xn--rholt-mra.no", - "sandnessjoen.no", - "xn--sandnessjen-ogb.no", - "skedsmokorset.no", - "slattum.no", - "spjelkavik.no", - "stathelle.no", - "stavern.no", - "stjordalshalsen.no", - "xn--stjrdalshalsen-sqb.no", - "tananger.no", - "tranby.no", - "vossevangen.no", - "afjord.no", - "xn--fjord-lra.no", - "agdenes.no", - "al.no", - "xn--l-1fa.no", - "alesund.no", - "xn--lesund-hua.no", - "alstahaug.no", - "alta.no", - "xn--lt-liac.no", - "alaheadju.no", - "xn--laheadju-7ya.no", - "alvdal.no", - "amli.no", - "xn--mli-tla.no", - "amot.no", - "xn--mot-tla.no", - "andebu.no", - "andoy.no", - "xn--andy-ira.no", - "andasuolo.no", - "ardal.no", - "xn--rdal-poa.no", - "aremark.no", - "arendal.no", - "xn--s-1fa.no", - "aseral.no", - "xn--seral-lra.no", - "asker.no", - "askim.no", - "askvoll.no", - "askoy.no", - "xn--asky-ira.no", - "asnes.no", - "xn--snes-poa.no", - "audnedaln.no", - "aukra.no", - "aure.no", - "aurland.no", - "aurskog-holand.no", - "xn--aurskog-hland-jnb.no", - "austevoll.no", - "austrheim.no", - "averoy.no", - "xn--avery-yua.no", - "balestrand.no", - "ballangen.no", - "balat.no", - "xn--blt-elab.no", - "balsfjord.no", - "bahccavuotna.no", - "xn--bhccavuotna-k7a.no", - "bamble.no", - "bardu.no", - "beardu.no", - "beiarn.no", - "bajddar.no", - "xn--bjddar-pta.no", - "baidar.no", - "xn--bidr-5nac.no", - "berg.no", - "bergen.no", - "berlevag.no", - "xn--berlevg-jxa.no", - "bearalvahki.no", - "xn--bearalvhki-y4a.no", - "bindal.no", - "birkenes.no", - "bjarkoy.no", - "xn--bjarky-fya.no", - "bjerkreim.no", - "bjugn.no", - "bodo.no", - "xn--bod-2na.no", - "badaddja.no", - "xn--bdddj-mrabd.no", - "budejju.no", - "bokn.no", - "bremanger.no", - "bronnoy.no", - "xn--brnny-wuac.no", - "bygland.no", - "bykle.no", - "barum.no", - "xn--brum-voa.no", - "bo.telemark.no", - "xn--b-5ga.telemark.no", - "bo.nordland.no", - "xn--b-5ga.nordland.no", - "bievat.no", - "xn--bievt-0qa.no", - "bomlo.no", - "xn--bmlo-gra.no", - "batsfjord.no", - "xn--btsfjord-9za.no", - "bahcavuotna.no", - "xn--bhcavuotna-s4a.no", - "dovre.no", - "drammen.no", - "drangedal.no", - "dyroy.no", - "xn--dyry-ira.no", - "donna.no", - "xn--dnna-gra.no", - "eid.no", - "eidfjord.no", - "eidsberg.no", - "eidskog.no", - "eidsvoll.no", - "eigersund.no", - "elverum.no", - "enebakk.no", - "engerdal.no", - "etne.no", - "etnedal.no", - "evenes.no", - "evenassi.no", - "xn--eveni-0qa01ga.no", - "evje-og-hornnes.no", - "farsund.no", - "fauske.no", - "fuossko.no", - "fuoisku.no", - "fedje.no", - "fet.no", - "finnoy.no", - "xn--finny-yua.no", - "fitjar.no", - "fjaler.no", - "fjell.no", - "flakstad.no", - "flatanger.no", - "flekkefjord.no", - "flesberg.no", - "flora.no", - "fla.no", - "xn--fl-zia.no", - "folldal.no", - "forsand.no", - "fosnes.no", - "frei.no", - "frogn.no", - "froland.no", - "frosta.no", - "frana.no", - "xn--frna-woa.no", - "froya.no", - "xn--frya-hra.no", - "fusa.no", - "fyresdal.no", - "forde.no", - "xn--frde-gra.no", - "gamvik.no", - "gangaviika.no", - "xn--ggaviika-8ya47h.no", - "gaular.no", - "gausdal.no", - "gildeskal.no", - "xn--gildeskl-g0a.no", - "giske.no", - "gjemnes.no", - "gjerdrum.no", - "gjerstad.no", - "gjesdal.no", - "gjovik.no", - "xn--gjvik-wua.no", - "gloppen.no", - "gol.no", - "gran.no", - "grane.no", - "granvin.no", - "gratangen.no", - "grimstad.no", - "grong.no", - "kraanghke.no", - "xn--kranghke-b0a.no", - "grue.no", - "gulen.no", - "hadsel.no", - "halden.no", - "halsa.no", - "hamar.no", - "hamaroy.no", - "habmer.no", - "xn--hbmer-xqa.no", - "hapmir.no", - "xn--hpmir-xqa.no", - "hammerfest.no", - "hammarfeasta.no", - "xn--hmmrfeasta-s4ac.no", - "haram.no", - "hareid.no", - "harstad.no", - "hasvik.no", - "aknoluokta.no", - "xn--koluokta-7ya57h.no", - "hattfjelldal.no", - "aarborte.no", - "haugesund.no", - "hemne.no", - "hemnes.no", - "hemsedal.no", - "heroy.more-og-romsdal.no", - "xn--hery-ira.xn--mre-og-romsdal-qqb.no", - "heroy.nordland.no", - "xn--hery-ira.nordland.no", - "hitra.no", - "hjartdal.no", - "hjelmeland.no", - "hobol.no", - "xn--hobl-ira.no", - "hof.no", - "hol.no", - "hole.no", - "holmestrand.no", - "holtalen.no", - "xn--holtlen-hxa.no", - "hornindal.no", - "horten.no", - "hurdal.no", - "hurum.no", - "hvaler.no", - "hyllestad.no", - "hagebostad.no", - "xn--hgebostad-g3a.no", - "hoyanger.no", - "xn--hyanger-q1a.no", - "hoylandet.no", - "xn--hylandet-54a.no", - "ha.no", - "xn--h-2fa.no", - "ibestad.no", - "inderoy.no", - "xn--indery-fya.no", - "iveland.no", - "jevnaker.no", - "jondal.no", - "jolster.no", - "xn--jlster-bya.no", - "karasjok.no", - "karasjohka.no", - "xn--krjohka-hwab49j.no", - "karlsoy.no", - "galsa.no", - "xn--gls-elac.no", - "karmoy.no", - "xn--karmy-yua.no", - "kautokeino.no", - "guovdageaidnu.no", - "klepp.no", - "klabu.no", - "xn--klbu-woa.no", - "kongsberg.no", - "kongsvinger.no", - "kragero.no", - "xn--krager-gya.no", - "kristiansand.no", - "kristiansund.no", - "krodsherad.no", - "xn--krdsherad-m8a.no", - "kvalsund.no", - "rahkkeravju.no", - "xn--rhkkervju-01af.no", - "kvam.no", - "kvinesdal.no", - "kvinnherad.no", - "kviteseid.no", - "kvitsoy.no", - "xn--kvitsy-fya.no", - "kvafjord.no", - "xn--kvfjord-nxa.no", - "giehtavuoatna.no", - "kvanangen.no", - "xn--kvnangen-k0a.no", - "navuotna.no", - "xn--nvuotna-hwa.no", - "kafjord.no", - "xn--kfjord-iua.no", - "gaivuotna.no", - "xn--givuotna-8ya.no", - "larvik.no", - "lavangen.no", - "lavagis.no", - "loabat.no", - "xn--loabt-0qa.no", - "lebesby.no", - "davvesiida.no", - "leikanger.no", - "leirfjord.no", - "leka.no", - "leksvik.no", - "lenvik.no", - "leangaviika.no", - "xn--leagaviika-52b.no", - "lesja.no", - "levanger.no", - "lier.no", - "lierne.no", - "lillehammer.no", - "lillesand.no", - "lindesnes.no", - "lindas.no", - "xn--linds-pra.no", - "lom.no", - "loppa.no", - "lahppi.no", - "xn--lhppi-xqa.no", - "lund.no", - "lunner.no", - "luroy.no", - "xn--lury-ira.no", - "luster.no", - "lyngdal.no", - "lyngen.no", - "ivgu.no", - "lardal.no", - "lerdal.no", - "xn--lrdal-sra.no", - "lodingen.no", - "xn--ldingen-q1a.no", - "lorenskog.no", - "xn--lrenskog-54a.no", - "loten.no", - "xn--lten-gra.no", - "malvik.no", - "masoy.no", - "xn--msy-ula0h.no", - "muosat.no", - "xn--muost-0qa.no", - "mandal.no", - "marker.no", - "marnardal.no", - "masfjorden.no", - "meland.no", - "meldal.no", - "melhus.no", - "meloy.no", - "xn--mely-ira.no", - "meraker.no", - "xn--merker-kua.no", - "moareke.no", - "xn--moreke-jua.no", - "midsund.no", - "midtre-gauldal.no", - "modalen.no", - "modum.no", - "molde.no", - "moskenes.no", - "moss.no", - "mosvik.no", - "malselv.no", - "xn--mlselv-iua.no", - "malatvuopmi.no", - "xn--mlatvuopmi-s4a.no", - "namdalseid.no", - "aejrie.no", - "namsos.no", - "namsskogan.no", - "naamesjevuemie.no", - "xn--nmesjevuemie-tcba.no", - "laakesvuemie.no", - "nannestad.no", - "narvik.no", - "narviika.no", - "naustdal.no", - "nedre-eiker.no", - "nes.akershus.no", - "nes.buskerud.no", - "nesna.no", - "nesodden.no", - "nesseby.no", - "unjarga.no", - "xn--unjrga-rta.no", - "nesset.no", - "nissedal.no", - "nittedal.no", - "nord-aurdal.no", - "nord-fron.no", - "nord-odal.no", - "norddal.no", - "nordkapp.no", - "davvenjarga.no", - "xn--davvenjrga-y4a.no", - "nordre-land.no", - "nordreisa.no", - "raisa.no", - "xn--risa-5na.no", - "nore-og-uvdal.no", - "notodden.no", - "naroy.no", - "xn--nry-yla5g.no", - "notteroy.no", - "xn--nttery-byae.no", - "odda.no", - "oksnes.no", - "xn--ksnes-uua.no", - "oppdal.no", - "oppegard.no", - "xn--oppegrd-ixa.no", - "orkdal.no", - "orland.no", - "xn--rland-uua.no", - "orskog.no", - "xn--rskog-uua.no", - "orsta.no", - "xn--rsta-fra.no", - "os.hedmark.no", - "os.hordaland.no", - "osen.no", - "osteroy.no", - "xn--ostery-fya.no", - "ostre-toten.no", - "xn--stre-toten-zcb.no", - "overhalla.no", - "ovre-eiker.no", - "xn--vre-eiker-k8a.no", - "oyer.no", - "xn--yer-zna.no", - "oygarden.no", - "xn--ygarden-p1a.no", - "oystre-slidre.no", - "xn--ystre-slidre-ujb.no", - "porsanger.no", - "porsangu.no", - "xn--porsgu-sta26f.no", - "porsgrunn.no", - "radoy.no", - "xn--rady-ira.no", - "rakkestad.no", - "rana.no", - "ruovat.no", - "randaberg.no", - "rauma.no", - "rendalen.no", - "rennebu.no", - "rennesoy.no", - "xn--rennesy-v1a.no", - "rindal.no", - "ringebu.no", - "ringerike.no", - "ringsaker.no", - "rissa.no", - "risor.no", - "xn--risr-ira.no", - "roan.no", - "rollag.no", - "rygge.no", - "ralingen.no", - "xn--rlingen-mxa.no", - "rodoy.no", - "xn--rdy-0nab.no", - "romskog.no", - "xn--rmskog-bya.no", - "roros.no", - "xn--rros-gra.no", - "rost.no", - "xn--rst-0na.no", - "royken.no", - "xn--ryken-vua.no", - "royrvik.no", - "xn--ryrvik-bya.no", - "rade.no", - "xn--rde-ula.no", - "salangen.no", - "siellak.no", - "saltdal.no", - "salat.no", - "xn--slt-elab.no", - "xn--slat-5na.no", - "samnanger.no", - "sande.more-og-romsdal.no", - "sande.xn--mre-og-romsdal-qqb.no", - "sande.vestfold.no", - "sandefjord.no", - "sandnes.no", - "sandoy.no", - "xn--sandy-yua.no", - "sarpsborg.no", - "sauda.no", - "sauherad.no", - "sel.no", - "selbu.no", - "selje.no", - "seljord.no", - "sigdal.no", - "siljan.no", - "sirdal.no", - "skaun.no", - "skedsmo.no", - "ski.no", - "skien.no", - "skiptvet.no", - "skjervoy.no", - "xn--skjervy-v1a.no", - "skierva.no", - "xn--skierv-uta.no", - "skjak.no", - "xn--skjk-soa.no", - "skodje.no", - "skanland.no", - "xn--sknland-fxa.no", - "skanit.no", - "xn--sknit-yqa.no", - "smola.no", - "xn--smla-hra.no", - "snillfjord.no", - "snasa.no", - "xn--snsa-roa.no", - "snoasa.no", - "snaase.no", - "xn--snase-nra.no", - "sogndal.no", - "sokndal.no", - "sola.no", - "solund.no", - "songdalen.no", - "sortland.no", - "spydeberg.no", - "stange.no", - "stavanger.no", - "steigen.no", - "steinkjer.no", - "stjordal.no", - "xn--stjrdal-s1a.no", - "stokke.no", - "stor-elvdal.no", - "stord.no", - "stordal.no", - "storfjord.no", - "omasvuotna.no", - "strand.no", - "stranda.no", - "stryn.no", - "sula.no", - "suldal.no", - "sund.no", - "sunndal.no", - "surnadal.no", - "sveio.no", - "svelvik.no", - "sykkylven.no", - "sogne.no", - "xn--sgne-gra.no", - "somna.no", - "xn--smna-gra.no", - "sondre-land.no", - "xn--sndre-land-0cb.no", - "sor-aurdal.no", - "xn--sr-aurdal-l8a.no", - "sor-fron.no", - "xn--sr-fron-q1a.no", - "sor-odal.no", - "xn--sr-odal-q1a.no", - "sor-varanger.no", - "xn--sr-varanger-ggb.no", - "matta-varjjat.no", - "xn--mtta-vrjjat-k7af.no", - "sorfold.no", - "xn--srfold-bya.no", - "sorreisa.no", - "xn--srreisa-q1a.no", - "sorum.no", - "xn--srum-gra.no", - "tana.no", - "deatnu.no", - "time.no", - "tingvoll.no", - "tinn.no", - "tjeldsund.no", - "dielddanuorri.no", - "tjome.no", - "xn--tjme-hra.no", - "tokke.no", - "tolga.no", - "torsken.no", - "tranoy.no", - "xn--trany-yua.no", - "tromso.no", - "xn--troms-zua.no", - "tromsa.no", - "romsa.no", - "trondheim.no", - "troandin.no", - "trysil.no", - "trana.no", - "xn--trna-woa.no", - "trogstad.no", - "xn--trgstad-r1a.no", - "tvedestrand.no", - "tydal.no", - "tynset.no", - "tysfjord.no", - "divtasvuodna.no", - "divttasvuotna.no", - "tysnes.no", - "tysvar.no", - "xn--tysvr-vra.no", - "tonsberg.no", - "xn--tnsberg-q1a.no", - "ullensaker.no", - "ullensvang.no", - "ulvik.no", - "utsira.no", - "vadso.no", - "xn--vads-jra.no", - "cahcesuolo.no", - "xn--hcesuolo-7ya35b.no", - "vaksdal.no", - "valle.no", - "vang.no", - "vanylven.no", - "vardo.no", - "xn--vard-jra.no", - "varggat.no", - "xn--vrggt-xqad.no", - "vefsn.no", - "vaapste.no", - "vega.no", - "vegarshei.no", - "xn--vegrshei-c0a.no", - "vennesla.no", - "verdal.no", - "verran.no", - "vestby.no", - "vestnes.no", - "vestre-slidre.no", - "vestre-toten.no", - "vestvagoy.no", - "xn--vestvgy-ixa6o.no", - "vevelstad.no", - "vik.no", - "vikna.no", - "vindafjord.no", - "volda.no", - "voss.no", - "varoy.no", - "xn--vry-yla5g.no", - "vagan.no", - "xn--vgan-qoa.no", - "voagat.no", - "vagsoy.no", - "xn--vgsy-qoa0j.no", - "vaga.no", - "xn--vg-yiab.no", - "valer.ostfold.no", - "xn--vler-qoa.xn--stfold-9xa.no", - "valer.hedmark.no", - "xn--vler-qoa.hedmark.no", - "*.np", - "nr", - "biz.nr", - "info.nr", - "gov.nr", - "edu.nr", - "org.nr", - "net.nr", - "com.nr", - "nu", - "nz", - "ac.nz", - "co.nz", - "cri.nz", - "geek.nz", - "gen.nz", - "govt.nz", - "health.nz", - "iwi.nz", - "kiwi.nz", - "maori.nz", - "mil.nz", - "xn--mori-qsa.nz", - "net.nz", - "org.nz", - "parliament.nz", - "school.nz", - "om", - "co.om", - "com.om", - "edu.om", - "gov.om", - "med.om", - "museum.om", - "net.om", - "org.om", - "pro.om", - "onion", - "org", - "pa", - "ac.pa", - "gob.pa", - "com.pa", - "org.pa", - "sld.pa", - "edu.pa", - "net.pa", - "ing.pa", - "abo.pa", - "med.pa", - "nom.pa", - "pe", - "edu.pe", - "gob.pe", - "nom.pe", - "mil.pe", - "org.pe", - "com.pe", - "net.pe", - "pf", - "com.pf", - "org.pf", - "edu.pf", - "*.pg", - "ph", - "com.ph", - "net.ph", - "org.ph", - "gov.ph", - "edu.ph", - "ngo.ph", - "mil.ph", - "i.ph", - "pk", - "com.pk", - "net.pk", - "edu.pk", - "org.pk", - "fam.pk", - "biz.pk", - "web.pk", - "gov.pk", - "gob.pk", - "gok.pk", - "gon.pk", - "gop.pk", - "gos.pk", - "info.pk", - "pl", - "com.pl", - "net.pl", - "org.pl", - "aid.pl", - "agro.pl", - "atm.pl", - "auto.pl", - "biz.pl", - "edu.pl", - "gmina.pl", - "gsm.pl", - "info.pl", - "mail.pl", - "miasta.pl", - "media.pl", - "mil.pl", - "nieruchomosci.pl", - "nom.pl", - "pc.pl", - "powiat.pl", - "priv.pl", - "realestate.pl", - "rel.pl", - "sex.pl", - "shop.pl", - "sklep.pl", - "sos.pl", - "szkola.pl", - "targi.pl", - "tm.pl", - "tourism.pl", - "travel.pl", - "turystyka.pl", - "gov.pl", - "ap.gov.pl", - "ic.gov.pl", - "is.gov.pl", - "us.gov.pl", - "kmpsp.gov.pl", - "kppsp.gov.pl", - "kwpsp.gov.pl", - "psp.gov.pl", - "wskr.gov.pl", - "kwp.gov.pl", - "mw.gov.pl", - "ug.gov.pl", - "um.gov.pl", - "umig.gov.pl", - "ugim.gov.pl", - "upow.gov.pl", - "uw.gov.pl", - "starostwo.gov.pl", - "pa.gov.pl", - "po.gov.pl", - "psse.gov.pl", - "pup.gov.pl", - "rzgw.gov.pl", - "sa.gov.pl", - "so.gov.pl", - "sr.gov.pl", - "wsa.gov.pl", - "sko.gov.pl", - "uzs.gov.pl", - "wiih.gov.pl", - "winb.gov.pl", - "pinb.gov.pl", - "wios.gov.pl", - "witd.gov.pl", - "wzmiuw.gov.pl", - "piw.gov.pl", - "wiw.gov.pl", - "griw.gov.pl", - "wif.gov.pl", - "oum.gov.pl", - "sdn.gov.pl", - "zp.gov.pl", - "uppo.gov.pl", - "mup.gov.pl", - "wuoz.gov.pl", - "konsulat.gov.pl", - "oirm.gov.pl", - "augustow.pl", - "babia-gora.pl", - "bedzin.pl", - "beskidy.pl", - "bialowieza.pl", - "bialystok.pl", - "bielawa.pl", - "bieszczady.pl", - "boleslawiec.pl", - "bydgoszcz.pl", - "bytom.pl", - "cieszyn.pl", - "czeladz.pl", - "czest.pl", - "dlugoleka.pl", - "elblag.pl", - "elk.pl", - "glogow.pl", - "gniezno.pl", - "gorlice.pl", - "grajewo.pl", - "ilawa.pl", - "jaworzno.pl", - "jelenia-gora.pl", - "jgora.pl", - "kalisz.pl", - "kazimierz-dolny.pl", - "karpacz.pl", - "kartuzy.pl", - "kaszuby.pl", - "katowice.pl", - "kepno.pl", - "ketrzyn.pl", - "klodzko.pl", - "kobierzyce.pl", - "kolobrzeg.pl", - "konin.pl", - "konskowola.pl", - "kutno.pl", - "lapy.pl", - "lebork.pl", - "legnica.pl", - "lezajsk.pl", - "limanowa.pl", - "lomza.pl", - "lowicz.pl", - "lubin.pl", - "lukow.pl", - "malbork.pl", - "malopolska.pl", - "mazowsze.pl", - "mazury.pl", - "mielec.pl", - "mielno.pl", - "mragowo.pl", - "naklo.pl", - "nowaruda.pl", - "nysa.pl", - "olawa.pl", - "olecko.pl", - "olkusz.pl", - "olsztyn.pl", - "opoczno.pl", - "opole.pl", - "ostroda.pl", - "ostroleka.pl", - "ostrowiec.pl", - "ostrowwlkp.pl", - "pila.pl", - "pisz.pl", - "podhale.pl", - "podlasie.pl", - "polkowice.pl", - "pomorze.pl", - "pomorskie.pl", - "prochowice.pl", - "pruszkow.pl", - "przeworsk.pl", - "pulawy.pl", - "radom.pl", - "rawa-maz.pl", - "rybnik.pl", - "rzeszow.pl", - "sanok.pl", - "sejny.pl", - "slask.pl", - "slupsk.pl", - "sosnowiec.pl", - "stalowa-wola.pl", - "skoczow.pl", - "starachowice.pl", - "stargard.pl", - "suwalki.pl", - "swidnica.pl", - "swiebodzin.pl", - "swinoujscie.pl", - "szczecin.pl", - "szczytno.pl", - "tarnobrzeg.pl", - "tgory.pl", - "turek.pl", - "tychy.pl", - "ustka.pl", - "walbrzych.pl", - "warmia.pl", - "warszawa.pl", - "waw.pl", - "wegrow.pl", - "wielun.pl", - "wlocl.pl", - "wloclawek.pl", - "wodzislaw.pl", - "wolomin.pl", - "wroclaw.pl", - "zachpomor.pl", - "zagan.pl", - "zarow.pl", - "zgora.pl", - "zgorzelec.pl", - "pm", - "pn", - "gov.pn", - "co.pn", - "org.pn", - "edu.pn", - "net.pn", - "post", - "pr", - "com.pr", - "net.pr", - "org.pr", - "gov.pr", - "edu.pr", - "isla.pr", - "pro.pr", - "biz.pr", - "info.pr", - "name.pr", - "est.pr", - "prof.pr", - "ac.pr", - "pro", - "aaa.pro", - "aca.pro", - "acct.pro", - "avocat.pro", - "bar.pro", - "cpa.pro", - "eng.pro", - "jur.pro", - "law.pro", - "med.pro", - "recht.pro", - "ps", - "edu.ps", - "gov.ps", - "sec.ps", - "plo.ps", - "com.ps", - "org.ps", - "net.ps", - "pt", - "net.pt", - "gov.pt", - "org.pt", - "edu.pt", - "int.pt", - "publ.pt", - "com.pt", - "nome.pt", - "pw", - "co.pw", - "ne.pw", - "or.pw", - "ed.pw", - "go.pw", - "belau.pw", - "py", - "com.py", - "coop.py", - "edu.py", - "gov.py", - "mil.py", - "net.py", - "org.py", - "qa", - "com.qa", - "edu.qa", - "gov.qa", - "mil.qa", - "name.qa", - "net.qa", - "org.qa", - "sch.qa", - "re", - "asso.re", - "com.re", - "nom.re", - "ro", - "arts.ro", - "com.ro", - "firm.ro", - "info.ro", - "nom.ro", - "nt.ro", - "org.ro", - "rec.ro", - "store.ro", - "tm.ro", - "www.ro", - "rs", - "ac.rs", - "co.rs", - "edu.rs", - "gov.rs", - "in.rs", - "org.rs", - "ru", - "ac.ru", - "edu.ru", - "gov.ru", - "int.ru", - "mil.ru", - "test.ru", - "rw", - "gov.rw", - "net.rw", - "edu.rw", - "ac.rw", - "com.rw", - "co.rw", - "int.rw", - "mil.rw", - "gouv.rw", - "sa", - "com.sa", - "net.sa", - "org.sa", - "gov.sa", - "med.sa", - "pub.sa", - "edu.sa", - "sch.sa", - "sb", - "com.sb", - "edu.sb", - "gov.sb", - "net.sb", - "org.sb", - "sc", - "com.sc", - "gov.sc", - "net.sc", - "org.sc", - "edu.sc", - "sd", - "com.sd", - "net.sd", - "org.sd", - "edu.sd", - "med.sd", - "tv.sd", - "gov.sd", - "info.sd", - "se", - "a.se", - "ac.se", - "b.se", - "bd.se", - "brand.se", - "c.se", - "d.se", - "e.se", - "f.se", - "fh.se", - "fhsk.se", - "fhv.se", - "g.se", - "h.se", - "i.se", - "k.se", - "komforb.se", - "kommunalforbund.se", - "komvux.se", - "l.se", - "lanbib.se", - "m.se", - "n.se", - "naturbruksgymn.se", - "o.se", - "org.se", - "p.se", - "parti.se", - "pp.se", - "press.se", - "r.se", - "s.se", - "t.se", - "tm.se", - "u.se", - "w.se", - "x.se", - "y.se", - "z.se", - "sg", - "com.sg", - "net.sg", - "org.sg", - "gov.sg", - "edu.sg", - "per.sg", - "sh", - "com.sh", - "net.sh", - "gov.sh", - "org.sh", - "mil.sh", - "si", - "sj", - "sk", - "sl", - "com.sl", - "net.sl", - "edu.sl", - "gov.sl", - "org.sl", - "sm", - "sn", - "art.sn", - "com.sn", - "edu.sn", - "gouv.sn", - "org.sn", - "perso.sn", - "univ.sn", - "so", - "com.so", - "net.so", - "org.so", - "sr", - "st", - "co.st", - "com.st", - "consulado.st", - "edu.st", - "embaixada.st", - "gov.st", - "mil.st", - "net.st", - "org.st", - "principe.st", - "saotome.st", - "store.st", - "su", - "sv", - "com.sv", - "edu.sv", - "gob.sv", - "org.sv", - "red.sv", - "sx", - "gov.sx", - "sy", - "edu.sy", - "gov.sy", - "net.sy", - "mil.sy", - "com.sy", - "org.sy", - "sz", - "co.sz", - "ac.sz", - "org.sz", - "tc", - "td", - "tel", - "tf", - "tg", - "th", - "ac.th", - "co.th", - "go.th", - "in.th", - "mi.th", - "net.th", - "or.th", - "tj", - "ac.tj", - "biz.tj", - "co.tj", - "com.tj", - "edu.tj", - "go.tj", - "gov.tj", - "int.tj", - "mil.tj", - "name.tj", - "net.tj", - "nic.tj", - "org.tj", - "test.tj", - "web.tj", - "tk", - "tl", - "gov.tl", - "tm", - "com.tm", - "co.tm", - "org.tm", - "net.tm", - "nom.tm", - "gov.tm", - "mil.tm", - "edu.tm", - "tn", - "com.tn", - "ens.tn", - "fin.tn", - "gov.tn", - "ind.tn", - "intl.tn", - "nat.tn", - "net.tn", - "org.tn", - "info.tn", - "perso.tn", - "tourism.tn", - "edunet.tn", - "rnrt.tn", - "rns.tn", - "rnu.tn", - "mincom.tn", - "agrinet.tn", - "defense.tn", - "turen.tn", - "to", - "com.to", - "gov.to", - "net.to", - "org.to", - "edu.to", - "mil.to", - "tr", - "com.tr", - "info.tr", - "biz.tr", - "net.tr", - "org.tr", - "web.tr", - "gen.tr", - "tv.tr", - "av.tr", - "dr.tr", - "bbs.tr", - "name.tr", - "tel.tr", - "gov.tr", - "bel.tr", - "pol.tr", - "mil.tr", - "k12.tr", - "edu.tr", - "kep.tr", - "nc.tr", - "gov.nc.tr", - "travel", - "tt", - "co.tt", - "com.tt", - "org.tt", - "net.tt", - "biz.tt", - "info.tt", - "pro.tt", - "int.tt", - "coop.tt", - "jobs.tt", - "mobi.tt", - "travel.tt", - "museum.tt", - "aero.tt", - "name.tt", - "gov.tt", - "edu.tt", - "tv", - "tw", - "edu.tw", - "gov.tw", - "mil.tw", - "com.tw", - "net.tw", - "org.tw", - "idv.tw", - "game.tw", - "ebiz.tw", - "club.tw", - "xn--zf0ao64a.tw", - "xn--uc0atv.tw", - "xn--czrw28b.tw", - "tz", - "ac.tz", - "co.tz", - "go.tz", - "hotel.tz", - "info.tz", - "me.tz", - "mil.tz", - "mobi.tz", - "ne.tz", - "or.tz", - "sc.tz", - "tv.tz", - "ua", - "com.ua", - "edu.ua", - "gov.ua", - "in.ua", - "net.ua", - "org.ua", - "cherkassy.ua", - "cherkasy.ua", - "chernigov.ua", - "chernihiv.ua", - "chernivtsi.ua", - "chernovtsy.ua", - "ck.ua", - "cn.ua", - "cr.ua", - "crimea.ua", - "cv.ua", - "dn.ua", - "dnepropetrovsk.ua", - "dnipropetrovsk.ua", - "dominic.ua", - "donetsk.ua", - "dp.ua", - "if.ua", - "ivano-frankivsk.ua", - "kh.ua", - "kharkiv.ua", - "kharkov.ua", - "kherson.ua", - "khmelnitskiy.ua", - "khmelnytskyi.ua", - "kiev.ua", - "kirovograd.ua", - "km.ua", - "kr.ua", - "krym.ua", - "ks.ua", - "kv.ua", - "kyiv.ua", - "lg.ua", - "lt.ua", - "lugansk.ua", - "lutsk.ua", - "lv.ua", - "lviv.ua", - "mk.ua", - "mykolaiv.ua", - "nikolaev.ua", - "od.ua", - "odesa.ua", - "odessa.ua", - "pl.ua", - "poltava.ua", - "rivne.ua", - "rovno.ua", - "rv.ua", - "sb.ua", - "sebastopol.ua", - "sevastopol.ua", - "sm.ua", - "sumy.ua", - "te.ua", - "ternopil.ua", - "uz.ua", - "uzhgorod.ua", - "vinnica.ua", - "vinnytsia.ua", - "vn.ua", - "volyn.ua", - "yalta.ua", - "zaporizhzhe.ua", - "zaporizhzhia.ua", - "zhitomir.ua", - "zhytomyr.ua", - "zp.ua", - "zt.ua", - "ug", - "co.ug", - "or.ug", - "ac.ug", - "sc.ug", - "go.ug", - "ne.ug", - "com.ug", - "org.ug", - "uk", - "ac.uk", - "co.uk", - "gov.uk", - "ltd.uk", - "me.uk", - "net.uk", - "nhs.uk", - "org.uk", - "plc.uk", - "police.uk", - "*.sch.uk", - "us", - "dni.us", - "fed.us", - "isa.us", - "kids.us", - "nsn.us", - "ak.us", - "al.us", - "ar.us", - "as.us", - "az.us", - "ca.us", - "co.us", - "ct.us", - "dc.us", - "de.us", - "fl.us", - "ga.us", - "gu.us", - "hi.us", - "ia.us", - "id.us", - "il.us", - "in.us", - "ks.us", - "ky.us", - "la.us", - "ma.us", - "md.us", - "me.us", - "mi.us", - "mn.us", - "mo.us", - "ms.us", - "mt.us", - "nc.us", - "nd.us", - "ne.us", - "nh.us", - "nj.us", - "nm.us", - "nv.us", - "ny.us", - "oh.us", - "ok.us", - "or.us", - "pa.us", - "pr.us", - "ri.us", - "sc.us", - "sd.us", - "tn.us", - "tx.us", - "ut.us", - "vi.us", - "vt.us", - "va.us", - "wa.us", - "wi.us", - "wv.us", - "wy.us", - "k12.ak.us", - "k12.al.us", - "k12.ar.us", - "k12.as.us", - "k12.az.us", - "k12.ca.us", - "k12.co.us", - "k12.ct.us", - "k12.dc.us", - "k12.de.us", - "k12.fl.us", - "k12.ga.us", - "k12.gu.us", - "k12.ia.us", - "k12.id.us", - "k12.il.us", - "k12.in.us", - "k12.ks.us", - "k12.ky.us", - "k12.la.us", - "k12.ma.us", - "k12.md.us", - "k12.me.us", - "k12.mi.us", - "k12.mn.us", - "k12.mo.us", - "k12.ms.us", - "k12.mt.us", - "k12.nc.us", - "k12.ne.us", - "k12.nh.us", - "k12.nj.us", - "k12.nm.us", - "k12.nv.us", - "k12.ny.us", - "k12.oh.us", - "k12.ok.us", - "k12.or.us", - "k12.pa.us", - "k12.pr.us", - "k12.ri.us", - "k12.sc.us", - "k12.tn.us", - "k12.tx.us", - "k12.ut.us", - "k12.vi.us", - "k12.vt.us", - "k12.va.us", - "k12.wa.us", - "k12.wi.us", - "k12.wy.us", - "cc.ak.us", - "cc.al.us", - "cc.ar.us", - "cc.as.us", - "cc.az.us", - "cc.ca.us", - "cc.co.us", - "cc.ct.us", - "cc.dc.us", - "cc.de.us", - "cc.fl.us", - "cc.ga.us", - "cc.gu.us", - "cc.hi.us", - "cc.ia.us", - "cc.id.us", - "cc.il.us", - "cc.in.us", - "cc.ks.us", - "cc.ky.us", - "cc.la.us", - "cc.ma.us", - "cc.md.us", - "cc.me.us", - "cc.mi.us", - "cc.mn.us", - "cc.mo.us", - "cc.ms.us", - "cc.mt.us", - "cc.nc.us", - "cc.nd.us", - "cc.ne.us", - "cc.nh.us", - "cc.nj.us", - "cc.nm.us", - "cc.nv.us", - "cc.ny.us", - "cc.oh.us", - "cc.ok.us", - "cc.or.us", - "cc.pa.us", - "cc.pr.us", - "cc.ri.us", - "cc.sc.us", - "cc.sd.us", - "cc.tn.us", - "cc.tx.us", - "cc.ut.us", - "cc.vi.us", - "cc.vt.us", - "cc.va.us", - "cc.wa.us", - "cc.wi.us", - "cc.wv.us", - "cc.wy.us", - "lib.ak.us", - "lib.al.us", - "lib.ar.us", - "lib.as.us", - "lib.az.us", - "lib.ca.us", - "lib.co.us", - "lib.ct.us", - "lib.dc.us", - "lib.fl.us", - "lib.ga.us", - "lib.gu.us", - "lib.hi.us", - "lib.ia.us", - "lib.id.us", - "lib.il.us", - "lib.in.us", - "lib.ks.us", - "lib.ky.us", - "lib.la.us", - "lib.ma.us", - "lib.md.us", - "lib.me.us", - "lib.mi.us", - "lib.mn.us", - "lib.mo.us", - "lib.ms.us", - "lib.mt.us", - "lib.nc.us", - "lib.nd.us", - "lib.ne.us", - "lib.nh.us", - "lib.nj.us", - "lib.nm.us", - "lib.nv.us", - "lib.ny.us", - "lib.oh.us", - "lib.ok.us", - "lib.or.us", - "lib.pa.us", - "lib.pr.us", - "lib.ri.us", - "lib.sc.us", - "lib.sd.us", - "lib.tn.us", - "lib.tx.us", - "lib.ut.us", - "lib.vi.us", - "lib.vt.us", - "lib.va.us", - "lib.wa.us", - "lib.wi.us", - "lib.wy.us", - "pvt.k12.ma.us", - "chtr.k12.ma.us", - "paroch.k12.ma.us", - "uy", - "com.uy", - "edu.uy", - "gub.uy", - "mil.uy", - "net.uy", - "org.uy", - "uz", - "co.uz", - "com.uz", - "net.uz", - "org.uz", - "va", - "vc", - "com.vc", - "net.vc", - "org.vc", - "gov.vc", - "mil.vc", - "edu.vc", - "ve", - "arts.ve", - "co.ve", - "com.ve", - "e12.ve", - "edu.ve", - "firm.ve", - "gob.ve", - "gov.ve", - "info.ve", - "int.ve", - "mil.ve", - "net.ve", - "org.ve", - "rec.ve", - "store.ve", - "tec.ve", - "web.ve", - "vg", - "vi", - "co.vi", - "com.vi", - "k12.vi", - "net.vi", - "org.vi", - "vn", - "com.vn", - "net.vn", - "org.vn", - "edu.vn", - "gov.vn", - "int.vn", - "ac.vn", - "biz.vn", - "info.vn", - "name.vn", - "pro.vn", - "health.vn", - "vu", - "com.vu", - "edu.vu", - "net.vu", - "org.vu", - "wf", - "ws", - "com.ws", - "net.ws", - "org.ws", - "gov.ws", - "edu.ws", - "yt", - "xn--mgbaam7a8h", - "xn--y9a3aq", - "xn--54b7fta0cc", - "xn--90ais", - "xn--fiqs8s", - "xn--fiqz9s", - "xn--lgbbat1ad8j", - "xn--wgbh1c", - "xn--e1a4c", - "xn--node", - "xn--qxam", - "xn--j6w193g", - "xn--h2brj9c", - "xn--mgbbh1a71e", - "xn--fpcrj9c3d", - "xn--gecrj9c", - "xn--s9brj9c", - "xn--45brj9c", - "xn--xkc2dl3a5ee0h", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "xn--mgbtx2b", - "xn--mgbayh7gpa", - "xn--3e0b707e", - "xn--80ao21a", - "xn--fzc2c9e2c", - "xn--xkc2al3hye2a", - "xn--mgbc0a9azcg", - "xn--d1alf", - "xn--l1acc", - "xn--mix891f", - "xn--mix082f", - "xn--mgbx4cd0ab", - "xn--mgb9awbf", - "xn--mgbai9azgqp6j", - "xn--mgbai9a5eva00b", - "xn--ygbi2ammx", - "xn--90a3ac", - "xn--o1ac.xn--90a3ac", - "xn--c1avg.xn--90a3ac", - "xn--90azh.xn--90a3ac", - "xn--d1at.xn--90a3ac", - "xn--o1ach.xn--90a3ac", - "xn--80au.xn--90a3ac", - "xn--p1ai", - "xn--wgbl6a", - "xn--mgberp4a5d4ar", - "xn--mgberp4a5d4a87g", - "xn--mgbqly7c0a67fbc", - "xn--mgbqly7cvafr", - "xn--mgbpl2fh", - "xn--yfro4i67o", - "xn--clchc0ea0b2g2a9gcd", - "xn--ogbpf8fl", - "xn--mgbtf8fl", - "xn--o3cw4h", - "xn--12c1fe0br.xn--o3cw4h", - "xn--12co0c3b4eva.xn--o3cw4h", - "xn--h3cuzk1di.xn--o3cw4h", - "xn--o3cyx2a.xn--o3cw4h", - "xn--m3ch0j3a.xn--o3cw4h", - "xn--12cfi8ixb8l.xn--o3cw4h", - "xn--pgbs0dh", - "xn--kpry57d", - "xn--kprw13d", - "xn--nnx388a", - "xn--j1amh", - "xn--mgb2ddes", - "xxx", - "*.ye", - "ac.za", - "agric.za", - "alt.za", - "co.za", - "edu.za", - "gov.za", - "grondar.za", - "law.za", - "mil.za", - "net.za", - "ngo.za", - "nis.za", - "nom.za", - "org.za", - "school.za", - "tm.za", - "web.za", - "zm", - "ac.zm", - "biz.zm", - "co.zm", - "com.zm", - "edu.zm", - "gov.zm", - "info.zm", - "mil.zm", - "net.zm", - "org.zm", - "sch.zm", - "zw", - "ac.zw", - "co.zw", - "gov.zw", - "mil.zw", - "org.zw", - "aaa", - "aarp", - "abarth", - "abb", - "abbott", - "abbvie", - "abc", - "able", - "abogado", - "abudhabi", - "academy", - "accenture", - "accountant", - "accountants", - "aco", - "active", - "actor", - "adac", - "ads", - "adult", - "aeg", - "aetna", - "afamilycompany", - "afl", - "africa", - "agakhan", - "agency", - "aig", - "aigo", - "airbus", - "airforce", - "airtel", - "akdn", - "alfaromeo", - "alibaba", - "alipay", - "allfinanz", - "allstate", - "ally", - "alsace", - "alstom", - "americanexpress", - "americanfamily", - "amex", - "amfam", - "amica", - "amsterdam", - "analytics", - "android", - "anquan", - "anz", - "aol", - "apartments", - "app", - "apple", - "aquarelle", - "arab", - "aramco", - "archi", - "army", - "art", - "arte", - "asda", - "associates", - "athleta", - "attorney", - "auction", - "audi", - "audible", - "audio", - "auspost", - "author", - "auto", - "autos", - "avianca", - "aws", - "axa", - "azure", - "baby", - "baidu", - "banamex", - "bananarepublic", - "band", - "bank", - "bar", - "barcelona", - "barclaycard", - "barclays", - "barefoot", - "bargains", - "baseball", - "basketball", - "bauhaus", - "bayern", - "bbc", - "bbt", - "bbva", - "bcg", - "bcn", - "beats", - "beauty", - "beer", - "bentley", - "berlin", - "best", - "bestbuy", - "bet", - "bharti", - "bible", - "bid", - "bike", - "bing", - "bingo", - "bio", - "black", - "blackfriday", - "blanco", - "blockbuster", - "blog", - "bloomberg", - "blue", - "bms", - "bmw", - "bnl", - "bnpparibas", - "boats", - "boehringer", - "bofa", - "bom", - "bond", - "boo", - "book", - "booking", - "boots", - "bosch", - "bostik", - "boston", - "bot", - "boutique", - "box", - "bradesco", - "bridgestone", - "broadway", - "broker", - "brother", - "brussels", - "budapest", - "bugatti", - "build", - "builders", - "business", - "buy", - "buzz", - "bzh", - "cab", - "cafe", - "cal", - "call", - "calvinklein", - "cam", - "camera", - "camp", - "cancerresearch", - "canon", - "capetown", - "capital", - "capitalone", - "car", - "caravan", - "cards", - "care", - "career", - "careers", - "cars", - "cartier", - "casa", - "case", - "caseih", - "cash", - "casino", - "catering", - "catholic", - "cba", - "cbn", - "cbre", - "cbs", - "ceb", - "center", - "ceo", - "cern", - "cfa", - "cfd", - "chanel", - "channel", - "chase", - "chat", - "cheap", - "chintai", - "chloe", - "christmas", - "chrome", - "chrysler", - "church", - "cipriani", - "circle", - "cisco", - "citadel", - "citi", - "citic", - "city", - "cityeats", - "claims", - "cleaning", - "click", - "clinic", - "clinique", - "clothing", - "cloud", - "club", - "clubmed", - "coach", - "codes", - "coffee", - "college", - "cologne", - "comcast", - "commbank", - "community", - "company", - "compare", - "computer", - "comsec", - "condos", - "construction", - "consulting", - "contact", - "contractors", - "cooking", - "cookingchannel", - "cool", - "corsica", - "country", - "coupon", - "coupons", - "courses", - "credit", - "creditcard", - "creditunion", - "cricket", - "crown", - "crs", - "cruise", - "cruises", - "csc", - "cuisinella", - "cymru", - "cyou", - "dabur", - "dad", - "dance", - "data", - "date", - "dating", - "datsun", - "day", - "dclk", - "dds", - "deal", - "dealer", - "deals", - "degree", - "delivery", - "dell", - "deloitte", - "delta", - "democrat", - "dental", - "dentist", - "desi", - "design", - "dev", - "dhl", - "diamonds", - "diet", - "digital", - "direct", - "directory", - "discount", - "discover", - "dish", - "diy", - "dnp", - "docs", - "doctor", - "dodge", - "dog", - "doha", - "domains", - "dot", - "download", - "drive", - "dtv", - "dubai", - "duck", - "dunlop", - "duns", - "dupont", - "durban", - "dvag", - "dvr", - "earth", - "eat", - "eco", - "edeka", - "education", - "email", - "emerck", - "energy", - "engineer", - "engineering", - "enterprises", - "epost", - "epson", - "equipment", - "ericsson", - "erni", - "esq", - "estate", - "esurance", - "etisalat", - "eurovision", - "eus", - "events", - "everbank", - "exchange", - "expert", - "exposed", - "express", - "extraspace", - "fage", - "fail", - "fairwinds", - "faith", - "family", - "fan", - "fans", - "farm", - "farmers", - "fashion", - "fast", - "fedex", - "feedback", - "ferrari", - "ferrero", - "fiat", - "fidelity", - "fido", - "film", - "final", - "finance", - "financial", - "fire", - "firestone", - "firmdale", - "fish", - "fishing", - "fit", - "fitness", - "flickr", - "flights", - "flir", - "florist", - "flowers", - "fly", - "foo", - "food", - "foodnetwork", - "football", - "ford", - "forex", - "forsale", - "forum", - "foundation", - "fox", - "free", - "fresenius", - "frl", - "frogans", - "frontdoor", - "frontier", - "ftr", - "fujitsu", - "fujixerox", - "fun", - "fund", - "furniture", - "futbol", - "fyi", - "gal", - "gallery", - "gallo", - "gallup", - "game", - "games", - "gap", - "garden", - "gbiz", - "gdn", - "gea", - "gent", - "genting", - "george", - "ggee", - "gift", - "gifts", - "gives", - "giving", - "glade", - "glass", - "gle", - "global", - "globo", - "gmail", - "gmbh", - "gmo", - "gmx", - "godaddy", - "gold", - "goldpoint", - "golf", - "goo", - "goodhands", - "goodyear", - "goog", - "google", - "gop", - "got", - "grainger", - "graphics", - "gratis", - "green", - "gripe", - "grocery", - "group", - "guardian", - "gucci", - "guge", - "guide", - "guitars", - "guru", - "hair", - "hamburg", - "hangout", - "haus", - "hbo", - "hdfc", - "hdfcbank", - "health", - "healthcare", - "help", - "helsinki", - "here", - "hermes", - "hgtv", - "hiphop", - "hisamitsu", - "hitachi", - "hiv", - "hkt", - "hockey", - "holdings", - "holiday", - "homedepot", - "homegoods", - "homes", - "homesense", - "honda", - "honeywell", - "horse", - "hospital", - "host", - "hosting", - "hot", - "hoteles", - "hotels", - "hotmail", - "house", - "how", - "hsbc", - "htc", - "hughes", - "hyatt", - "hyundai", - "ibm", - "icbc", - "ice", - "icu", - "ieee", - "ifm", - "ikano", - "imamat", - "imdb", - "immo", - "immobilien", - "industries", - "infiniti", - "ing", - "ink", - "institute", - "insurance", - "insure", - "intel", - "international", - "intuit", - "investments", - "ipiranga", - "irish", - "iselect", - "ismaili", - "ist", - "istanbul", - "itau", - "itv", - "iveco", - "iwc", - "jaguar", - "java", - "jcb", - "jcp", - "jeep", - "jetzt", - "jewelry", - "jio", - "jlc", - "jll", - "jmp", - "jnj", - "joburg", - "jot", - "joy", - "jpmorgan", - "jprs", - "juegos", - "juniper", - "kaufen", - "kddi", - "kerryhotels", - "kerrylogistics", - "kerryproperties", - "kfh", - "kia", - "kim", - "kinder", - "kindle", - "kitchen", - "kiwi", - "koeln", - "komatsu", - "kosher", - "kpmg", - "kpn", - "krd", - "kred", - "kuokgroup", - "kyoto", - "lacaixa", - "ladbrokes", - "lamborghini", - "lamer", - "lancaster", - "lancia", - "lancome", - "land", - "landrover", - "lanxess", - "lasalle", - "lat", - "latino", - "latrobe", - "law", - "lawyer", - "lds", - "lease", - "leclerc", - "lefrak", - "legal", - "lego", - "lexus", - "lgbt", - "liaison", - "lidl", - "life", - "lifeinsurance", - "lifestyle", - "lighting", - "like", - "lilly", - "limited", - "limo", - "lincoln", - "linde", - "link", - "lipsy", - "live", - "living", - "lixil", - "loan", - "loans", - "locker", - "locus", - "loft", - "lol", - "london", - "lotte", - "lotto", - "love", - "lpl", - "lplfinancial", - "ltd", - "ltda", - "lundbeck", - "lupin", - "luxe", - "luxury", - "macys", - "madrid", - "maif", - "maison", - "makeup", - "man", - "management", - "mango", - "map", - "market", - "marketing", - "markets", - "marriott", - "marshalls", - "maserati", - "mattel", - "mba", - "mcd", - "mcdonalds", - "mckinsey", - "med", - "media", - "meet", - "melbourne", - "meme", - "memorial", - "men", - "menu", - "meo", - "merckmsd", - "metlife", - "miami", - "microsoft", - "mini", - "mint", - "mit", - "mitsubishi", - "mlb", - "mls", - "mma", - "mobile", - "mobily", - "moda", - "moe", - "moi", - "mom", - "monash", - "money", - "monster", - "montblanc", - "mopar", - "mormon", - "mortgage", - "moscow", - "moto", - "motorcycles", - "mov", - "movie", - "movistar", - "msd", - "mtn", - "mtpc", - "mtr", - "mutual", - "nab", - "nadex", - "nagoya", - "nationwide", - "natura", - "navy", - "nba", - "nec", - "netbank", - "netflix", - "network", - "neustar", - "new", - "newholland", - "news", - "next", - "nextdirect", - "nexus", - "nfl", - "ngo", - "nhk", - "nico", - "nike", - "nikon", - "ninja", - "nissan", - "nissay", - "nokia", - "northwesternmutual", - "norton", - "now", - "nowruz", - "nowtv", - "nra", - "nrw", - "ntt", - "nyc", - "obi", - "observer", - "off", - "office", - "okinawa", - "olayan", - "olayangroup", - "oldnavy", - "ollo", - "omega", - "one", - "ong", - "onl", - "online", - "onyourside", - "ooo", - "open", - "oracle", - "orange", - "organic", - "origins", - "osaka", - "otsuka", - "ott", - "ovh", - "page", - "pamperedchef", - "panasonic", - "panerai", - "paris", - "pars", - "partners", - "parts", - "party", - "passagens", - "pay", - "pccw", - "pet", - "pfizer", - "pharmacy", - "phd", - "philips", - "phone", - "photo", - "photography", - "photos", - "physio", - "piaget", - "pics", - "pictet", - "pictures", - "pid", - "pin", - "ping", - "pink", - "pioneer", - "pizza", - "place", - "play", - "playstation", - "plumbing", - "plus", - "pnc", - "pohl", - "poker", - "politie", - "porn", - "pramerica", - "praxi", - "press", - "prime", - "prod", - "productions", - "prof", - "progressive", - "promo", - "properties", - "property", - "protection", - "pru", - "prudential", - "pub", - "pwc", - "qpon", - "quebec", - "quest", - "qvc", - "racing", - "radio", - "raid", - "read", - "realestate", - "realtor", - "realty", - "recipes", - "red", - "redstone", - "redumbrella", - "rehab", - "reise", - "reisen", - "reit", - "reliance", - "ren", - "rent", - "rentals", - "repair", - "report", - "republican", - "rest", - "restaurant", - "review", - "reviews", - "rexroth", - "rich", - "richardli", - "ricoh", - "rightathome", - "ril", - "rio", - "rip", - "rmit", - "rocher", - "rocks", - "rodeo", - "rogers", - "room", - "rsvp", - "rugby", - "ruhr", - "run", - "rwe", - "ryukyu", - "saarland", - "safe", - "safety", - "sakura", - "sale", - "salon", - "samsclub", - "samsung", - "sandvik", - "sandvikcoromant", - "sanofi", - "sap", - "sapo", - "sarl", - "sas", - "save", - "saxo", - "sbi", - "sbs", - "sca", - "scb", - "schaeffler", - "schmidt", - "scholarships", - "school", - "schule", - "schwarz", - "science", - "scjohnson", - "scor", - "scot", - "search", - "seat", - "secure", - "security", - "seek", - "select", - "sener", - "services", - "ses", - "seven", - "sew", - "sex", - "sexy", - "sfr", - "shangrila", - "sharp", - "shaw", - "shell", - "shia", - "shiksha", - "shoes", - "shop", - "shopping", - "shouji", - "show", - "showtime", - "shriram", - "silk", - "sina", - "singles", - "site", - "ski", - "skin", - "sky", - "skype", - "sling", - "smart", - "smile", - "sncf", - "soccer", - "social", - "softbank", - "software", - "sohu", - "solar", - "solutions", - "song", - "sony", - "soy", - "space", - "spiegel", - "spot", - "spreadbetting", - "srl", - "srt", - "stada", - "staples", - "star", - "starhub", - "statebank", - "statefarm", - "statoil", - "stc", - "stcgroup", - "stockholm", - "storage", - "store", - "stream", - "studio", - "study", - "style", - "sucks", - "supplies", - "supply", - "support", - "surf", - "surgery", - "suzuki", - "swatch", - "swiftcover", - "swiss", - "sydney", - "symantec", - "systems", - "tab", - "taipei", - "talk", - "taobao", - "target", - "tatamotors", - "tatar", - "tattoo", - "tax", - "taxi", - "tci", - "tdk", - "team", - "tech", - "technology", - "telecity", - "telefonica", - "temasek", - "tennis", - "teva", - "thd", - "theater", - "theatre", - "tiaa", - "tickets", - "tienda", - "tiffany", - "tips", - "tires", - "tirol", - "tjmaxx", - "tjx", - "tkmaxx", - "tmall", - "today", - "tokyo", - "tools", - "top", - "toray", - "toshiba", - "total", - "tours", - "town", - "toyota", - "toys", - "trade", - "trading", - "training", - "travelchannel", - "travelers", - "travelersinsurance", - "trust", - "trv", - "tube", - "tui", - "tunes", - "tushu", - "tvs", - "ubank", - "ubs", - "uconnect", - "unicom", - "university", - "uno", - "uol", - "ups", - "vacations", - "vana", - "vanguard", - "vegas", - "ventures", - "verisign", - "versicherung", - "vet", - "viajes", - "video", - "vig", - "viking", - "villas", - "vin", - "vip", - "virgin", - "visa", - "vision", - "vista", - "vistaprint", - "viva", - "vivo", - "vlaanderen", - "vodka", - "volkswagen", - "volvo", - "vote", - "voting", - "voto", - "voyage", - "vuelos", - "wales", - "walmart", - "walter", - "wang", - "wanggou", - "warman", - "watch", - "watches", - "weather", - "weatherchannel", - "webcam", - "weber", - "website", - "wed", - "wedding", - "weibo", - "weir", - "whoswho", - "wien", - "wiki", - "williamhill", - "win", - "windows", - "wine", - "winners", - "wme", - "wolterskluwer", - "woodside", - "work", - "works", - "world", - "wow", - "wtc", - "wtf", - "xbox", - "xerox", - "xfinity", - "xihuan", - "xin", - "xn--11b4c3d", - "xn--1ck2e1b", - "xn--1qqw23a", - "xn--30rr7y", - "xn--3bst00m", - "xn--3ds443g", - "xn--3oq18vl8pn36a", - "xn--3pxu8k", - "xn--42c2d9a", - "xn--45q11c", - "xn--4gbrim", - "xn--55qw42g", - "xn--55qx5d", - "xn--5su34j936bgsg", - "xn--5tzm5g", - "xn--6frz82g", - "xn--6qq986b3xl", - "xn--80adxhks", - "xn--80aqecdr1a", - "xn--80asehdb", - "xn--80aswg", - "xn--8y0a063a", - "xn--9dbq2a", - "xn--9et52u", - "xn--9krt00a", - "xn--b4w605ferd", - "xn--bck1b9a5dre4c", - "xn--c1avg", - "xn--c2br7g", - "xn--cck2b3b", - "xn--cg4bki", - "xn--czr694b", - "xn--czrs0t", - "xn--czru2d", - "xn--d1acj3b", - "xn--eckvdtc9d", - "xn--efvy88h", - "xn--estv75g", - "xn--fct429k", - "xn--fhbei", - "xn--fiq228c5hs", - "xn--fiq64b", - "xn--fjq720a", - "xn--flw351e", - "xn--fzys8d69uvgm", - "xn--g2xx48c", - "xn--gckr3f0f", - "xn--gk3at1e", - "xn--hxt814e", - "xn--i1b6b1a6a2e", - "xn--imr513n", - "xn--io0a7i", - "xn--j1aef", - "xn--jlq61u9w7b", - "xn--jvr189m", - "xn--kcrx77d1x4a", - "xn--kpu716f", - "xn--kput3i", - "xn--mgba3a3ejt", - "xn--mgba7c0bbn0a", - "xn--mgbaakc7dvf", - "xn--mgbab2bd", - "xn--mgbb9fbpob", - "xn--mgbca7dzdo", - "xn--mgbi4ecexp", - "xn--mgbt3dhd", - "xn--mk1bu44c", - "xn--mxtq1m", - "xn--ngbc5azd", - "xn--ngbe9e0a", - "xn--ngbrx", - "xn--nqv7f", - "xn--nqv7fs00ema", - "xn--nyqy26a", - "xn--p1acf", - "xn--pbt977c", - "xn--pssy2u", - "xn--q9jyb4c", - "xn--qcka1pmc", - "xn--rhqv96g", - "xn--rovu88b", - "xn--ses554g", - "xn--t60b56a", - "xn--tckwe", - "xn--tiq49xqyj", - "xn--unup4y", - "xn--vermgensberater-ctb", - "xn--vermgensberatung-pwb", - "xn--vhquv", - "xn--vuq861b", - "xn--w4r85el8fhu5dnra", - "xn--w4rs40l", - "xn--xhq521b", - "xn--zfr164b", - "xperia", - "xyz", - "yachts", - "yahoo", - "yamaxun", - "yandex", - "yodobashi", - "yoga", - "yokohama", - "you", - "youtube", - "yun", - "zappos", - "zara", - "zero", - "zip", - "zippo", - "zone", - "zuerich", - "cc.ua", - "inf.ua", - "ltd.ua", - "beep.pl", - "*.compute.estate", - "*.alces.network", - "*.alwaysdata.net", - "cloudfront.net", - "*.compute.amazonaws.com", - "*.compute-1.amazonaws.com", - "*.compute.amazonaws.com.cn", - "us-east-1.amazonaws.com", - "elasticbeanstalk.cn-north-1.amazonaws.com.cn", - "*.elasticbeanstalk.com", - "*.elb.amazonaws.com", - "*.elb.amazonaws.com.cn", - "s3.amazonaws.com", - "s3-ap-northeast-1.amazonaws.com", - "s3-ap-northeast-2.amazonaws.com", - "s3-ap-south-1.amazonaws.com", - "s3-ap-southeast-1.amazonaws.com", - "s3-ap-southeast-2.amazonaws.com", - "s3-ca-central-1.amazonaws.com", - "s3-eu-central-1.amazonaws.com", - "s3-eu-west-1.amazonaws.com", - "s3-eu-west-2.amazonaws.com", - "s3-external-1.amazonaws.com", - "s3-fips-us-gov-west-1.amazonaws.com", - "s3-sa-east-1.amazonaws.com", - "s3-us-gov-west-1.amazonaws.com", - "s3-us-east-2.amazonaws.com", - "s3-us-west-1.amazonaws.com", - "s3-us-west-2.amazonaws.com", - "s3.ap-northeast-2.amazonaws.com", - "s3.ap-south-1.amazonaws.com", - "s3.cn-north-1.amazonaws.com.cn", - "s3.ca-central-1.amazonaws.com", - "s3.eu-central-1.amazonaws.com", - "s3.eu-west-2.amazonaws.com", - "s3.us-east-2.amazonaws.com", - "s3.dualstack.ap-northeast-1.amazonaws.com", - "s3.dualstack.ap-northeast-2.amazonaws.com", - "s3.dualstack.ap-south-1.amazonaws.com", - "s3.dualstack.ap-southeast-1.amazonaws.com", - "s3.dualstack.ap-southeast-2.amazonaws.com", - "s3.dualstack.ca-central-1.amazonaws.com", - "s3.dualstack.eu-central-1.amazonaws.com", - "s3.dualstack.eu-west-1.amazonaws.com", - "s3.dualstack.eu-west-2.amazonaws.com", - "s3.dualstack.sa-east-1.amazonaws.com", - "s3.dualstack.us-east-1.amazonaws.com", - "s3.dualstack.us-east-2.amazonaws.com", - "s3-website-us-east-1.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website.ap-northeast-2.amazonaws.com", - "s3-website.ap-south-1.amazonaws.com", - "s3-website.ca-central-1.amazonaws.com", - "s3-website.eu-central-1.amazonaws.com", - "s3-website.eu-west-2.amazonaws.com", - "s3-website.us-east-2.amazonaws.com", - "t3l3p0rt.net", - "tele.amune.org", - "on-aptible.com", - "user.party.eus", - "pimienta.org", - "poivron.org", - "potager.org", - "sweetpepper.org", - "myasustor.com", - "myfritz.net", - "*.awdev.ca", - "*.advisor.ws", - "backplaneapp.io", - "betainabox.com", - "bnr.la", - "boxfuse.io", - "square7.ch", - "bplaced.com", - "bplaced.de", - "square7.de", - "bplaced.net", - "square7.net", - "browsersafetymark.io", - "mycd.eu", - "ae.org", - "ar.com", - "br.com", - "cn.com", - "com.de", - "com.se", - "de.com", - "eu.com", - "gb.com", - "gb.net", - "hu.com", - "hu.net", - "jp.net", - "jpn.com", - "kr.com", - "mex.com", - "no.com", - "qc.com", - "ru.com", - "sa.com", - "se.com", - "se.net", - "uk.com", - "uk.net", - "us.com", - "uy.com", - "za.bz", - "za.com", - "africa.com", - "gr.com", - "in.net", - "us.org", - "co.com", - "c.la", - "certmgr.org", - "xenapponazure.com", - "virtueeldomein.nl", - "c66.me", - "cloudcontrolled.com", - "cloudcontrolapp.com", - "co.ca", - "co.cz", - "c.cdn77.org", - "cdn77-ssl.net", - "r.cdn77.net", - "rsc.cdn77.org", - "ssl.origin.cdn77-secure.org", - "cloudns.asia", - "cloudns.biz", - "cloudns.club", - "cloudns.cc", - "cloudns.eu", - "cloudns.in", - "cloudns.info", - "cloudns.org", - "cloudns.pro", - "cloudns.pw", - "cloudns.us", - "co.nl", - "co.no", - "dyn.cosidns.de", - "dynamisches-dns.de", - "dnsupdater.de", - "internet-dns.de", - "l-o-g-i-n.de", - "dynamic-dns.info", - "feste-ip.net", - "knx-server.net", - "static-access.net", - "realm.cz", - "*.cryptonomic.net", - "cupcake.is", - "cyon.link", - "cyon.site", - "daplie.me", - "localhost.daplie.me", - "biz.dk", - "co.dk", - "firm.dk", - "reg.dk", - "store.dk", - "dedyn.io", - "dnshome.de", - "dreamhosters.com", - "mydrobo.com", - "drud.io", - "drud.us", - "duckdns.org", - "dy.fi", - "tunk.org", - "dyndns-at-home.com", - "dyndns-at-work.com", - "dyndns-blog.com", - "dyndns-free.com", - "dyndns-home.com", - "dyndns-ip.com", - "dyndns-mail.com", - "dyndns-office.com", - "dyndns-pics.com", - "dyndns-remote.com", - "dyndns-server.com", - "dyndns-web.com", - "dyndns-wiki.com", - "dyndns-work.com", - "dyndns.biz", - "dyndns.info", - "dyndns.org", - "dyndns.tv", - "at-band-camp.net", - "ath.cx", - "barrel-of-knowledge.info", - "barrell-of-knowledge.info", - "better-than.tv", - "blogdns.com", - "blogdns.net", - "blogdns.org", - "blogsite.org", - "boldlygoingnowhere.org", - "broke-it.net", - "buyshouses.net", - "cechire.com", - "dnsalias.com", - "dnsalias.net", - "dnsalias.org", - "dnsdojo.com", - "dnsdojo.net", - "dnsdojo.org", - "does-it.net", - "doesntexist.com", - "doesntexist.org", - "dontexist.com", - "dontexist.net", - "dontexist.org", - "doomdns.com", - "doomdns.org", - "dvrdns.org", - "dyn-o-saur.com", - "dynalias.com", - "dynalias.net", - "dynalias.org", - "dynathome.net", - "dyndns.ws", - "endofinternet.net", - "endofinternet.org", - "endoftheinternet.org", - "est-a-la-maison.com", - "est-a-la-masion.com", - "est-le-patron.com", - "est-mon-blogueur.com", - "for-better.biz", - "for-more.biz", - "for-our.info", - "for-some.biz", - "for-the.biz", - "forgot.her.name", - "forgot.his.name", - "from-ak.com", - "from-al.com", - "from-ar.com", - "from-az.net", - "from-ca.com", - "from-co.net", - "from-ct.com", - "from-dc.com", - "from-de.com", - "from-fl.com", - "from-ga.com", - "from-hi.com", - "from-ia.com", - "from-id.com", - "from-il.com", - "from-in.com", - "from-ks.com", - "from-ky.com", - "from-la.net", - "from-ma.com", - "from-md.com", - "from-me.org", - "from-mi.com", - "from-mn.com", - "from-mo.com", - "from-ms.com", - "from-mt.com", - "from-nc.com", - "from-nd.com", - "from-ne.com", - "from-nh.com", - "from-nj.com", - "from-nm.com", - "from-nv.com", - "from-ny.net", - "from-oh.com", - "from-ok.com", - "from-or.com", - "from-pa.com", - "from-pr.com", - "from-ri.com", - "from-sc.com", - "from-sd.com", - "from-tn.com", - "from-tx.com", - "from-ut.com", - "from-va.com", - "from-vt.com", - "from-wa.com", - "from-wi.com", - "from-wv.com", - "from-wy.com", - "ftpaccess.cc", - "fuettertdasnetz.de", - "game-host.org", - "game-server.cc", - "getmyip.com", - "gets-it.net", - "go.dyndns.org", - "gotdns.com", - "gotdns.org", - "groks-the.info", - "groks-this.info", - "ham-radio-op.net", - "here-for-more.info", - "hobby-site.com", - "hobby-site.org", - "home.dyndns.org", - "homedns.org", - "homeftp.net", - "homeftp.org", - "homeip.net", - "homelinux.com", - "homelinux.net", - "homelinux.org", - "homeunix.com", - "homeunix.net", - "homeunix.org", - "iamallama.com", - "in-the-band.net", - "is-a-anarchist.com", - "is-a-blogger.com", - "is-a-bookkeeper.com", - "is-a-bruinsfan.org", - "is-a-bulls-fan.com", - "is-a-candidate.org", - "is-a-caterer.com", - "is-a-celticsfan.org", - "is-a-chef.com", - "is-a-chef.net", - "is-a-chef.org", - "is-a-conservative.com", - "is-a-cpa.com", - "is-a-cubicle-slave.com", - "is-a-democrat.com", - "is-a-designer.com", - "is-a-doctor.com", - "is-a-financialadvisor.com", - "is-a-geek.com", - "is-a-geek.net", - "is-a-geek.org", - "is-a-green.com", - "is-a-guru.com", - "is-a-hard-worker.com", - "is-a-hunter.com", - "is-a-knight.org", - "is-a-landscaper.com", - "is-a-lawyer.com", - "is-a-liberal.com", - "is-a-libertarian.com", - "is-a-linux-user.org", - "is-a-llama.com", - "is-a-musician.com", - "is-a-nascarfan.com", - "is-a-nurse.com", - "is-a-painter.com", - "is-a-patsfan.org", - "is-a-personaltrainer.com", - "is-a-photographer.com", - "is-a-player.com", - "is-a-republican.com", - "is-a-rockstar.com", - "is-a-socialist.com", - "is-a-soxfan.org", - "is-a-student.com", - "is-a-teacher.com", - "is-a-techie.com", - "is-a-therapist.com", - "is-an-accountant.com", - "is-an-actor.com", - "is-an-actress.com", - "is-an-anarchist.com", - "is-an-artist.com", - "is-an-engineer.com", - "is-an-entertainer.com", - "is-by.us", - "is-certified.com", - "is-found.org", - "is-gone.com", - "is-into-anime.com", - "is-into-cars.com", - "is-into-cartoons.com", - "is-into-games.com", - "is-leet.com", - "is-lost.org", - "is-not-certified.com", - "is-saved.org", - "is-slick.com", - "is-uberleet.com", - "is-very-bad.org", - "is-very-evil.org", - "is-very-good.org", - "is-very-nice.org", - "is-very-sweet.org", - "is-with-theband.com", - "isa-geek.com", - "isa-geek.net", - "isa-geek.org", - "isa-hockeynut.com", - "issmarterthanyou.com", - "isteingeek.de", - "istmein.de", - "kicks-ass.net", - "kicks-ass.org", - "knowsitall.info", - "land-4-sale.us", - "lebtimnetz.de", - "leitungsen.de", - "likes-pie.com", - "likescandy.com", - "merseine.nu", - "mine.nu", - "misconfused.org", - "mypets.ws", - "myphotos.cc", - "neat-url.com", - "office-on-the.net", - "on-the-web.tv", - "podzone.net", - "podzone.org", - "readmyblog.org", - "saves-the-whales.com", - "scrapper-site.net", - "scrapping.cc", - "selfip.biz", - "selfip.com", - "selfip.info", - "selfip.net", - "selfip.org", - "sells-for-less.com", - "sells-for-u.com", - "sells-it.net", - "sellsyourhome.org", - "servebbs.com", - "servebbs.net", - "servebbs.org", - "serveftp.net", - "serveftp.org", - "servegame.org", - "shacknet.nu", - "simple-url.com", - "space-to-rent.com", - "stuff-4-sale.org", - "stuff-4-sale.us", - "teaches-yoga.com", - "thruhere.net", - "traeumtgerade.de", - "webhop.biz", - "webhop.info", - "webhop.net", - "webhop.org", - "worse-than.tv", - "writesthisblog.com", - "ddnss.de", - "dyn.ddnss.de", - "dyndns.ddnss.de", - "dyndns1.de", - "dyn-ip24.de", - "home-webserver.de", - "dyn.home-webserver.de", - "myhome-server.de", - "ddnss.org", - "definima.net", - "definima.io", - "dynv6.net", - "e4.cz", - "enonic.io", - "customer.enonic.io", - "eu.org", - "al.eu.org", - "asso.eu.org", - "at.eu.org", - "au.eu.org", - "be.eu.org", - "bg.eu.org", - "ca.eu.org", - "cd.eu.org", - "ch.eu.org", - "cn.eu.org", - "cy.eu.org", - "cz.eu.org", - "de.eu.org", - "dk.eu.org", - "edu.eu.org", - "ee.eu.org", - "es.eu.org", - "fi.eu.org", - "fr.eu.org", - "gr.eu.org", - "hr.eu.org", - "hu.eu.org", - "ie.eu.org", - "il.eu.org", - "in.eu.org", - "int.eu.org", - "is.eu.org", - "it.eu.org", - "jp.eu.org", - "kr.eu.org", - "lt.eu.org", - "lu.eu.org", - "lv.eu.org", - "mc.eu.org", - "me.eu.org", - "mk.eu.org", - "mt.eu.org", - "my.eu.org", - "net.eu.org", - "ng.eu.org", - "nl.eu.org", - "no.eu.org", - "nz.eu.org", - "paris.eu.org", - "pl.eu.org", - "pt.eu.org", - "q-a.eu.org", - "ro.eu.org", - "ru.eu.org", - "se.eu.org", - "si.eu.org", - "sk.eu.org", - "tr.eu.org", - "uk.eu.org", - "us.eu.org", - "eu-1.evennode.com", - "eu-2.evennode.com", - "eu-3.evennode.com", - "us-1.evennode.com", - "us-2.evennode.com", - "us-3.evennode.com", - "twmail.cc", - "twmail.net", - "twmail.org", - "mymailer.com.tw", - "url.tw", - "apps.fbsbx.com", - "ru.net", - "adygeya.ru", - "bashkiria.ru", - "bir.ru", - "cbg.ru", - "com.ru", - "dagestan.ru", - "grozny.ru", - "kalmykia.ru", - "kustanai.ru", - "marine.ru", - "mordovia.ru", - "msk.ru", - "mytis.ru", - "nalchik.ru", - "nov.ru", - "pyatigorsk.ru", - "spb.ru", - "vladikavkaz.ru", - "vladimir.ru", - "abkhazia.su", - "adygeya.su", - "aktyubinsk.su", - "arkhangelsk.su", - "armenia.su", - "ashgabad.su", - "azerbaijan.su", - "balashov.su", - "bashkiria.su", - "bryansk.su", - "bukhara.su", - "chimkent.su", - "dagestan.su", - "east-kazakhstan.su", - "exnet.su", - "georgia.su", - "grozny.su", - "ivanovo.su", - "jambyl.su", - "kalmykia.su", - "kaluga.su", - "karacol.su", - "karaganda.su", - "karelia.su", - "khakassia.su", - "krasnodar.su", - "kurgan.su", - "kustanai.su", - "lenug.su", - "mangyshlak.su", - "mordovia.su", - "msk.su", - "murmansk.su", - "nalchik.su", - "navoi.su", - "north-kazakhstan.su", - "nov.su", - "obninsk.su", - "penza.su", - "pokrovsk.su", - "sochi.su", - "spb.su", - "tashkent.su", - "termez.su", - "togliatti.su", - "troitsk.su", - "tselinograd.su", - "tula.su", - "tuva.su", - "vladikavkaz.su", - "vladimir.su", - "vologda.su", - "fastlylb.net", - "map.fastlylb.net", - "freetls.fastly.net", - "map.fastly.net", - "a.prod.fastly.net", - "global.prod.fastly.net", - "a.ssl.fastly.net", - "b.ssl.fastly.net", - "global.ssl.fastly.net", - "fhapp.xyz", - "fedorainfracloud.org", - "fedorapeople.org", - "cloud.fedoraproject.org", - "filegear.me", - "firebaseapp.com", - "flynnhub.com", - "freebox-os.com", - "freeboxos.com", - "fbx-os.fr", - "fbxos.fr", - "freebox-os.fr", - "freeboxos.fr", - "myfusion.cloud", - "futurehosting.at", - "futuremailing.at", - "*.ex.ortsinfo.at", - "*.kunden.ortsinfo.at", - "*.statics.cloud", - "service.gov.uk", - "github.io", - "githubusercontent.com", - "githubcloud.com", - "*.api.githubcloud.com", - "*.ext.githubcloud.com", - "gist.githubcloud.com", - "*.githubcloudusercontent.com", - "gitlab.io", - "homeoffice.gov.uk", - "ro.im", - "shop.ro", - "goip.de", - "*.0emm.com", - "appspot.com", - "blogspot.ae", - "blogspot.al", - "blogspot.am", - "blogspot.ba", - "blogspot.be", - "blogspot.bg", - "blogspot.bj", - "blogspot.ca", - "blogspot.cf", - "blogspot.ch", - "blogspot.cl", - "blogspot.co.at", - "blogspot.co.id", - "blogspot.co.il", - "blogspot.co.ke", - "blogspot.co.nz", - "blogspot.co.uk", - "blogspot.co.za", - "blogspot.com", - "blogspot.com.ar", - "blogspot.com.au", - "blogspot.com.br", - "blogspot.com.by", - "blogspot.com.co", - "blogspot.com.cy", - "blogspot.com.ee", - "blogspot.com.eg", - "blogspot.com.es", - "blogspot.com.mt", - "blogspot.com.ng", - "blogspot.com.tr", - "blogspot.com.uy", - "blogspot.cv", - "blogspot.cz", - "blogspot.de", - "blogspot.dk", - "blogspot.fi", - "blogspot.fr", - "blogspot.gr", - "blogspot.hk", - "blogspot.hr", - "blogspot.hu", - "blogspot.ie", - "blogspot.in", - "blogspot.is", - "blogspot.it", - "blogspot.jp", - "blogspot.kr", - "blogspot.li", - "blogspot.lt", - "blogspot.lu", - "blogspot.md", - "blogspot.mk", - "blogspot.mr", - "blogspot.mx", - "blogspot.my", - "blogspot.nl", - "blogspot.no", - "blogspot.pe", - "blogspot.pt", - "blogspot.qa", - "blogspot.re", - "blogspot.ro", - "blogspot.rs", - "blogspot.ru", - "blogspot.se", - "blogspot.sg", - "blogspot.si", - "blogspot.sk", - "blogspot.sn", - "blogspot.td", - "blogspot.tw", - "blogspot.ug", - "blogspot.vn", - "cloudfunctions.net", - "cloud.goog", - "codespot.com", - "googleapis.com", - "googlecode.com", - "pagespeedmobilizer.com", - "publishproxy.com", - "withgoogle.com", - "withyoutube.com", - "hashbang.sh", - "hasura-app.io", - "hepforge.org", - "herokuapp.com", - "herokussl.com", - "moonscale.net", - "iki.fi", - "biz.at", - "info.at", - "ac.leg.br", - "al.leg.br", - "am.leg.br", - "ap.leg.br", - "ba.leg.br", - "ce.leg.br", - "df.leg.br", - "es.leg.br", - "go.leg.br", - "ma.leg.br", - "mg.leg.br", - "ms.leg.br", - "mt.leg.br", - "pa.leg.br", - "pb.leg.br", - "pe.leg.br", - "pi.leg.br", - "pr.leg.br", - "rj.leg.br", - "rn.leg.br", - "ro.leg.br", - "rr.leg.br", - "rs.leg.br", - "sc.leg.br", - "se.leg.br", - "sp.leg.br", - "to.leg.br", - "ipifony.net", - "*.triton.zone", - "*.cns.joyent.com", - "js.org", - "keymachine.de", - "knightpoint.systems", - "co.krd", - "edu.krd", - "barsy.bg", - "barsyonline.com", - "barsy.de", - "barsy.eu", - "barsy.in", - "barsy.net", - "barsy.online", - "barsy.support", - "*.magentosite.cloud", - "hb.cldmail.ru", - "meteorapp.com", - "eu.meteorapp.com", - "co.pl", - "azurewebsites.net", - "azure-mobile.net", - "cloudapp.net", - "bmoattachments.org", - "4u.com", - "ngrok.io", - "nfshost.com", - "nsupdate.info", - "nerdpol.ovh", - "blogsyte.com", - "brasilia.me", - "cable-modem.org", - "ciscofreak.com", - "collegefan.org", - "couchpotatofries.org", - "damnserver.com", - "ddns.me", - "ditchyourip.com", - "dnsfor.me", - "dnsiskinky.com", - "dvrcam.info", - "dynns.com", - "eating-organic.net", - "fantasyleague.cc", - "geekgalaxy.com", - "golffan.us", - "health-carereform.com", - "homesecuritymac.com", - "homesecuritypc.com", - "hopto.me", - "ilovecollege.info", - "loginto.me", - "mlbfan.org", - "mmafan.biz", - "myactivedirectory.com", - "mydissent.net", - "myeffect.net", - "mymediapc.net", - "mypsx.net", - "mysecuritycamera.com", - "mysecuritycamera.net", - "mysecuritycamera.org", - "net-freaks.com", - "nflfan.org", - "nhlfan.net", - "no-ip.ca", - "no-ip.co.uk", - "no-ip.net", - "noip.us", - "onthewifi.com", - "pgafan.net", - "point2this.com", - "pointto.us", - "privatizehealthinsurance.net", - "quicksytes.com", - "read-books.org", - "securitytactics.com", - "serveexchange.com", - "servehumour.com", - "servep2p.com", - "servesarcasm.com", - "stufftoread.com", - "ufcfan.org", - "unusualperson.com", - "workisboring.com", - "3utilities.com", - "bounceme.net", - "ddns.net", - "ddnsking.com", - "gotdns.ch", - "hopto.org", - "myftp.biz", - "myftp.org", - "myvnc.com", - "no-ip.biz", - "no-ip.info", - "no-ip.org", - "noip.me", - "redirectme.net", - "servebeer.com", - "serveblog.net", - "servecounterstrike.com", - "serveftp.com", - "servegame.com", - "servehalflife.com", - "servehttp.com", - "serveirc.com", - "serveminecraft.net", - "servemp3.com", - "servepics.com", - "servequake.com", - "sytes.net", - "webhop.me", - "zapto.org", - "nodum.co", - "nodum.io", - "nyc.mn", - "cya.gg", - "nid.io", - "opencraft.hosting", - "operaunite.com", - "outsystemscloud.com", - "ownprovider.com", - "oy.lc", - "pgfog.com", - "pagefrontapp.com", - "art.pl", - "gliwice.pl", - "krakow.pl", - "poznan.pl", - "wroc.pl", - "zakopane.pl", - "pantheonsite.io", - "gotpantheon.com", - "mypep.link", - "on-web.fr", - "*.platform.sh", - "*.platformsh.site", - "xen.prgmr.com", - "priv.at", - "protonet.io", - "chirurgiens-dentistes-en-france.fr", - "qa2.com", - "dev-myqnapcloud.com", - "alpha-myqnapcloud.com", - "myqnapcloud.com", - "*.quipelements.com", - "vapor.cloud", - "vaporcloud.io", - "rackmaze.com", - "rackmaze.net", - "rhcloud.com", - "hzc.io", - "wellbeingzone.eu", - "ptplus.fit", - "wellbeingzone.co.uk", - "sandcats.io", - "logoip.de", - "logoip.com", - "firewall-gateway.com", - "firewall-gateway.de", - "my-gateway.de", - "my-router.de", - "spdns.de", - "spdns.eu", - "firewall-gateway.net", - "my-firewall.org", - "myfirewall.org", - "spdns.org", - "*.sensiosite.cloud", - "biz.ua", - "co.ua", - "pp.ua", - "shiftedit.io", - "myshopblocks.com", - "1kapp.com", - "appchizi.com", - "applinzi.com", - "sinaapp.com", - "vipsinaapp.com", - "bounty-full.com", - "alpha.bounty-full.com", - "beta.bounty-full.com", - "static.land", - "dev.static.land", - "sites.static.land", - "apps.lair.io", - "*.stolos.io", - "spacekit.io", - "stackspace.space", - "storj.farm", - "diskstation.me", - "dscloud.biz", - "dscloud.me", - "dscloud.mobi", - "dsmynas.com", - "dsmynas.net", - "dsmynas.org", - "familyds.com", - "familyds.net", - "familyds.org", - "i234.me", - "myds.me", - "synology.me", - "vpnplus.to", - "taifun-dns.de", - "gda.pl", - "gdansk.pl", - "gdynia.pl", - "med.pl", - "sopot.pl", - "bloxcms.com", - "townnews-staging.com", - "*.transurl.be", - "*.transurl.eu", - "*.transurl.nl", - "tuxfamily.org", - "dd-dns.de", - "diskstation.eu", - "diskstation.org", - "dray-dns.de", - "draydns.de", - "dyn-vpn.de", - "dynvpn.de", - "mein-vigor.de", - "my-vigor.de", - "my-wan.de", - "syno-ds.de", - "synology-diskstation.de", - "synology-ds.de", - "uber.space", - "hk.com", - "hk.org", - "ltd.hk", - "inc.hk", - "lib.de.us", - "router.management", - "wedeploy.io", - "wedeploy.me", - "remotewd.com", - "wmflabs.org", - "xs4all.space", - "yolasite.com", - "ybo.faith", - "yombo.me", - "homelink.one", - "ybo.party", - "ybo.review", - "ybo.science", - "ybo.trade", - "za.net", - "za.org", - "now.sh", -} - -var nodeLabels = [...]string{ - "aaa", - "aarp", - "abarth", - "abb", - "abbott", - "abbvie", - "abc", - "able", - "abogado", - "abudhabi", - "ac", - "academy", - "accenture", - "accountant", - "accountants", - "aco", - "active", - "actor", - "ad", - "adac", - "ads", - "adult", - "ae", - "aeg", - "aero", - "aetna", - "af", - "afamilycompany", - "afl", - "africa", - "ag", - "agakhan", - "agency", - "ai", - "aig", - "aigo", - "airbus", - "airforce", - "airtel", - "akdn", - "al", - "alfaromeo", - "alibaba", - "alipay", - "allfinanz", - "allstate", - "ally", - "alsace", - "alstom", - "am", - "americanexpress", - "americanfamily", - "amex", - "amfam", - "amica", - "amsterdam", - "analytics", - "android", - "anquan", - "anz", - "ao", - "aol", - "apartments", - "app", - "apple", - "aq", - "aquarelle", - "ar", - "arab", - "aramco", - "archi", - "army", - "arpa", - "art", - "arte", - "as", - "asda", - "asia", - "associates", - "at", - "athleta", - "attorney", - "au", - "auction", - "audi", - "audible", - "audio", - "auspost", - "author", - "auto", - "autos", - "avianca", - "aw", - "aws", - "ax", - "axa", - "az", - "azure", - "ba", - "baby", - "baidu", - "banamex", - "bananarepublic", - "band", - "bank", - "bar", - "barcelona", - "barclaycard", - "barclays", - "barefoot", - "bargains", - "baseball", - "basketball", - "bauhaus", - "bayern", - "bb", - "bbc", - "bbt", - "bbva", - "bcg", - "bcn", - "bd", - "be", - "beats", - "beauty", - "beer", - "bentley", - "berlin", - "best", - "bestbuy", - "bet", - "bf", - "bg", - "bh", - "bharti", - "bi", - "bible", - "bid", - "bike", - "bing", - "bingo", - "bio", - "biz", - "bj", - "black", - "blackfriday", - "blanco", - "blockbuster", - "blog", - "bloomberg", - "blue", - "bm", - "bms", - "bmw", - "bn", - "bnl", - "bnpparibas", - "bo", - "boats", - "boehringer", - "bofa", - "bom", - "bond", - "boo", - "book", - "booking", - "boots", - "bosch", - "bostik", - "boston", - "bot", - "boutique", - "box", - "br", - "bradesco", - "bridgestone", - "broadway", - "broker", - "brother", - "brussels", - "bs", - "bt", - "budapest", - "bugatti", - "build", - "builders", - "business", - "buy", - "buzz", - "bv", - "bw", - "by", - "bz", - "bzh", - "ca", - "cab", - "cafe", - "cal", - "call", - "calvinklein", - "cam", - "camera", - "camp", - "cancerresearch", - "canon", - "capetown", - "capital", - "capitalone", - "car", - "caravan", - "cards", - "care", - "career", - "careers", - "cars", - "cartier", - "casa", - "case", - "caseih", - "cash", - "casino", - "cat", - "catering", - "catholic", - "cba", - "cbn", - "cbre", - "cbs", - "cc", - "cd", - "ceb", - "center", - "ceo", - "cern", - "cf", - "cfa", - "cfd", - "cg", - "ch", - "chanel", - "channel", - "chase", - "chat", - "cheap", - "chintai", - "chloe", - "christmas", - "chrome", - "chrysler", - "church", - "ci", - "cipriani", - "circle", - "cisco", - "citadel", - "citi", - "citic", - "city", - "cityeats", - "ck", - "cl", - "claims", - "cleaning", - "click", - "clinic", - "clinique", - "clothing", - "cloud", - "club", - "clubmed", - "cm", - "cn", - "co", - "coach", - "codes", - "coffee", - "college", - "cologne", - "com", - "comcast", - "commbank", - "community", - "company", - "compare", - "computer", - "comsec", - "condos", - "construction", - "consulting", - "contact", - "contractors", - "cooking", - "cookingchannel", - "cool", - "coop", - "corsica", - "country", - "coupon", - "coupons", - "courses", - "cr", - "credit", - "creditcard", - "creditunion", - "cricket", - "crown", - "crs", - "cruise", - "cruises", - "csc", - "cu", - "cuisinella", - "cv", - "cw", - "cx", - "cy", - "cymru", - "cyou", - "cz", - "dabur", - "dad", - "dance", - "data", - "date", - "dating", - "datsun", - "day", - "dclk", - "dds", - "de", - "deal", - "dealer", - "deals", - "degree", - "delivery", - "dell", - "deloitte", - "delta", - "democrat", - "dental", - "dentist", - "desi", - "design", - "dev", - "dhl", - "diamonds", - "diet", - "digital", - "direct", - "directory", - "discount", - "discover", - "dish", - "diy", - "dj", - "dk", - "dm", - "dnp", - "do", - "docs", - "doctor", - "dodge", - "dog", - "doha", - "domains", - "dot", - "download", - "drive", - "dtv", - "dubai", - "duck", - "dunlop", - "duns", - "dupont", - "durban", - "dvag", - "dvr", - "dz", - "earth", - "eat", - "ec", - "eco", - "edeka", - "edu", - "education", - "ee", - "eg", - "email", - "emerck", - "energy", - "engineer", - "engineering", - "enterprises", - "epost", - "epson", - "equipment", - "er", - "ericsson", - "erni", - "es", - "esq", - "estate", - "esurance", - "et", - "etisalat", - "eu", - "eurovision", - "eus", - "events", - "everbank", - "exchange", - "expert", - "exposed", - "express", - "extraspace", - "fage", - "fail", - "fairwinds", - "faith", - "family", - "fan", - "fans", - "farm", - "farmers", - "fashion", - "fast", - "fedex", - "feedback", - "ferrari", - "ferrero", - "fi", - "fiat", - "fidelity", - "fido", - "film", - "final", - "finance", - "financial", - "fire", - "firestone", - "firmdale", - "fish", - "fishing", - "fit", - "fitness", - "fj", - "fk", - "flickr", - "flights", - "flir", - "florist", - "flowers", - "fly", - "fm", - "fo", - "foo", - "food", - "foodnetwork", - "football", - "ford", - "forex", - "forsale", - "forum", - "foundation", - "fox", - "fr", - "free", - "fresenius", - "frl", - "frogans", - "frontdoor", - "frontier", - "ftr", - "fujitsu", - "fujixerox", - "fun", - "fund", - "furniture", - "futbol", - "fyi", - "ga", - "gal", - "gallery", - "gallo", - "gallup", - "game", - "games", - "gap", - "garden", - "gb", - "gbiz", - "gd", - "gdn", - "ge", - "gea", - "gent", - "genting", - "george", - "gf", - "gg", - "ggee", - "gh", - "gi", - "gift", - "gifts", - "gives", - "giving", - "gl", - "glade", - "glass", - "gle", - "global", - "globo", - "gm", - "gmail", - "gmbh", - "gmo", - "gmx", - "gn", - "godaddy", - "gold", - "goldpoint", - "golf", - "goo", - "goodhands", - "goodyear", - "goog", - "google", - "gop", - "got", - "gov", - "gp", - "gq", - "gr", - "grainger", - "graphics", - "gratis", - "green", - "gripe", - "grocery", - "group", - "gs", - "gt", - "gu", - "guardian", - "gucci", - "guge", - "guide", - "guitars", - "guru", - "gw", - "gy", - "hair", - "hamburg", - "hangout", - "haus", - "hbo", - "hdfc", - "hdfcbank", - "health", - "healthcare", - "help", - "helsinki", - "here", - "hermes", - "hgtv", - "hiphop", - "hisamitsu", - "hitachi", - "hiv", - "hk", - "hkt", - "hm", - "hn", - "hockey", - "holdings", - "holiday", - "homedepot", - "homegoods", - "homes", - "homesense", - "honda", - "honeywell", - "horse", - "hospital", - "host", - "hosting", - "hot", - "hoteles", - "hotels", - "hotmail", - "house", - "how", - "hr", - "hsbc", - "ht", - "htc", - "hu", - "hughes", - "hyatt", - "hyundai", - "ibm", - "icbc", - "ice", - "icu", - "id", - "ie", - "ieee", - "ifm", - "ikano", - "il", - "im", - "imamat", - "imdb", - "immo", - "immobilien", - "in", - "industries", - "infiniti", - "info", - "ing", - "ink", - "institute", - "insurance", - "insure", - "int", - "intel", - "international", - "intuit", - "investments", - "io", - "ipiranga", - "iq", - "ir", - "irish", - "is", - "iselect", - "ismaili", - "ist", - "istanbul", - "it", - "itau", - "itv", - "iveco", - "iwc", - "jaguar", - "java", - "jcb", - "jcp", - "je", - "jeep", - "jetzt", - "jewelry", - "jio", - "jlc", - "jll", - "jm", - "jmp", - "jnj", - "jo", - "jobs", - "joburg", - "jot", - "joy", - "jp", - "jpmorgan", - "jprs", - "juegos", - "juniper", - "kaufen", - "kddi", - "ke", - "kerryhotels", - "kerrylogistics", - "kerryproperties", - "kfh", - "kg", - "kh", - "ki", - "kia", - "kim", - "kinder", - "kindle", - "kitchen", - "kiwi", - "km", - "kn", - "koeln", - "komatsu", - "kosher", - "kp", - "kpmg", - "kpn", - "kr", - "krd", - "kred", - "kuokgroup", - "kw", - "ky", - "kyoto", - "kz", - "la", - "lacaixa", - "ladbrokes", - "lamborghini", - "lamer", - "lancaster", - "lancia", - "lancome", - "land", - "landrover", - "lanxess", - "lasalle", - "lat", - "latino", - "latrobe", - "law", - "lawyer", - "lb", - "lc", - "lds", - "lease", - "leclerc", - "lefrak", - "legal", - "lego", - "lexus", - "lgbt", - "li", - "liaison", - "lidl", - "life", - "lifeinsurance", - "lifestyle", - "lighting", - "like", - "lilly", - "limited", - "limo", - "lincoln", - "linde", - "link", - "lipsy", - "live", - "living", - "lixil", - "lk", - "loan", - "loans", - "locker", - "locus", - "loft", - "lol", - "london", - "lotte", - "lotto", - "love", - "lpl", - "lplfinancial", - "lr", - "ls", - "lt", - "ltd", - "ltda", - "lu", - "lundbeck", - "lupin", - "luxe", - "luxury", - "lv", - "ly", - "ma", - "macys", - "madrid", - "maif", - "maison", - "makeup", - "man", - "management", - "mango", - "map", - "market", - "marketing", - "markets", - "marriott", - "marshalls", - "maserati", - "mattel", - "mba", - "mc", - "mcd", - "mcdonalds", - "mckinsey", - "md", - "me", - "med", - "media", - "meet", - "melbourne", - "meme", - "memorial", - "men", - "menu", - "meo", - "merckmsd", - "metlife", - "mg", - "mh", - "miami", - "microsoft", - "mil", - "mini", - "mint", - "mit", - "mitsubishi", - "mk", - "ml", - "mlb", - "mls", - "mm", - "mma", - "mn", - "mo", - "mobi", - "mobile", - "mobily", - "moda", - "moe", - "moi", - "mom", - "monash", - "money", - "monster", - "montblanc", - "mopar", - "mormon", - "mortgage", - "moscow", - "moto", - "motorcycles", - "mov", - "movie", - "movistar", - "mp", - "mq", - "mr", - "ms", - "msd", - "mt", - "mtn", - "mtpc", - "mtr", - "mu", - "museum", - "mutual", - "mv", - "mw", - "mx", - "my", - "mz", - "na", - "nab", - "nadex", - "nagoya", - "name", - "nationwide", - "natura", - "navy", - "nba", - "nc", - "ne", - "nec", - "net", - "netbank", - "netflix", - "network", - "neustar", - "new", - "newholland", - "news", - "next", - "nextdirect", - "nexus", - "nf", - "nfl", - "ng", - "ngo", - "nhk", - "ni", - "nico", - "nike", - "nikon", - "ninja", - "nissan", - "nissay", - "nl", - "no", - "nokia", - "northwesternmutual", - "norton", - "now", - "nowruz", - "nowtv", - "np", - "nr", - "nra", - "nrw", - "ntt", - "nu", - "nyc", - "nz", - "obi", - "observer", - "off", - "office", - "okinawa", - "olayan", - "olayangroup", - "oldnavy", - "ollo", - "om", - "omega", - "one", - "ong", - "onion", - "onl", - "online", - "onyourside", - "ooo", - "open", - "oracle", - "orange", - "org", - "organic", - "origins", - "osaka", - "otsuka", - "ott", - "ovh", - "pa", - "page", - "pamperedchef", - "panasonic", - "panerai", - "paris", - "pars", - "partners", - "parts", - "party", - "passagens", - "pay", - "pccw", - "pe", - "pet", - "pf", - "pfizer", - "pg", - "ph", - "pharmacy", - "phd", - "philips", - "phone", - "photo", - "photography", - "photos", - "physio", - "piaget", - "pics", - "pictet", - "pictures", - "pid", - "pin", - "ping", - "pink", - "pioneer", - "pizza", - "pk", - "pl", - "place", - "play", - "playstation", - "plumbing", - "plus", - "pm", - "pn", - "pnc", - "pohl", - "poker", - "politie", - "porn", - "post", - "pr", - "pramerica", - "praxi", - "press", - "prime", - "pro", - "prod", - "productions", - "prof", - "progressive", - "promo", - "properties", - "property", - "protection", - "pru", - "prudential", - "ps", - "pt", - "pub", - "pw", - "pwc", - "py", - "qa", - "qpon", - "quebec", - "quest", - "qvc", - "racing", - "radio", - "raid", - "re", - "read", - "realestate", - "realtor", - "realty", - "recipes", - "red", - "redstone", - "redumbrella", - "rehab", - "reise", - "reisen", - "reit", - "reliance", - "ren", - "rent", - "rentals", - "repair", - "report", - "republican", - "rest", - "restaurant", - "review", - "reviews", - "rexroth", - "rich", - "richardli", - "ricoh", - "rightathome", - "ril", - "rio", - "rip", - "rmit", - "ro", - "rocher", - "rocks", - "rodeo", - "rogers", - "room", - "rs", - "rsvp", - "ru", - "rugby", - "ruhr", - "run", - "rw", - "rwe", - "ryukyu", - "sa", - "saarland", - "safe", - "safety", - "sakura", - "sale", - "salon", - "samsclub", - "samsung", - "sandvik", - "sandvikcoromant", - "sanofi", - "sap", - "sapo", - "sarl", - "sas", - "save", - "saxo", - "sb", - "sbi", - "sbs", - "sc", - "sca", - "scb", - "schaeffler", - "schmidt", - "scholarships", - "school", - "schule", - "schwarz", - "science", - "scjohnson", - "scor", - "scot", - "sd", - "se", - "search", - "seat", - "secure", - "security", - "seek", - "select", - "sener", - "services", - "ses", - "seven", - "sew", - "sex", - "sexy", - "sfr", - "sg", - "sh", - "shangrila", - "sharp", - "shaw", - "shell", - "shia", - "shiksha", - "shoes", - "shop", - "shopping", - "shouji", - "show", - "showtime", - "shriram", - "si", - "silk", - "sina", - "singles", - "site", - "sj", - "sk", - "ski", - "skin", - "sky", - "skype", - "sl", - "sling", - "sm", - "smart", - "smile", - "sn", - "sncf", - "so", - "soccer", - "social", - "softbank", - "software", - "sohu", - "solar", - "solutions", - "song", - "sony", - "soy", - "space", - "spiegel", - "spot", - "spreadbetting", - "sr", - "srl", - "srt", - "st", - "stada", - "staples", - "star", - "starhub", - "statebank", - "statefarm", - "statoil", - "stc", - "stcgroup", - "stockholm", - "storage", - "store", - "stream", - "studio", - "study", - "style", - "su", - "sucks", - "supplies", - "supply", - "support", - "surf", - "surgery", - "suzuki", - "sv", - "swatch", - "swiftcover", - "swiss", - "sx", - "sy", - "sydney", - "symantec", - "systems", - "sz", - "tab", - "taipei", - "talk", - "taobao", - "target", - "tatamotors", - "tatar", - "tattoo", - "tax", - "taxi", - "tc", - "tci", - "td", - "tdk", - "team", - "tech", - "technology", - "tel", - "telecity", - "telefonica", - "temasek", - "tennis", - "teva", - "tf", - "tg", - "th", - "thd", - "theater", - "theatre", - "tiaa", - "tickets", - "tienda", - "tiffany", - "tips", - "tires", - "tirol", - "tj", - "tjmaxx", - "tjx", - "tk", - "tkmaxx", - "tl", - "tm", - "tmall", - "tn", - "to", - "today", - "tokyo", - "tools", - "top", - "toray", - "toshiba", - "total", - "tours", - "town", - "toyota", - "toys", - "tr", - "trade", - "trading", - "training", - "travel", - "travelchannel", - "travelers", - "travelersinsurance", - "trust", - "trv", - "tt", - "tube", - "tui", - "tunes", - "tushu", - "tv", - "tvs", - "tw", - "tz", - "ua", - "ubank", - "ubs", - "uconnect", - "ug", - "uk", - "unicom", - "university", - "uno", - "uol", - "ups", - "us", - "uy", - "uz", - "va", - "vacations", - "vana", - "vanguard", - "vc", - "ve", - "vegas", - "ventures", - "verisign", - "versicherung", - "vet", - "vg", - "vi", - "viajes", - "video", - "vig", - "viking", - "villas", - "vin", - "vip", - "virgin", - "visa", - "vision", - "vista", - "vistaprint", - "viva", - "vivo", - "vlaanderen", - "vn", - "vodka", - "volkswagen", - "volvo", - "vote", - "voting", - "voto", - "voyage", - "vu", - "vuelos", - "wales", - "walmart", - "walter", - "wang", - "wanggou", - "warman", - "watch", - "watches", - "weather", - "weatherchannel", - "webcam", - "weber", - "website", - "wed", - "wedding", - "weibo", - "weir", - "wf", - "whoswho", - "wien", - "wiki", - "williamhill", - "win", - "windows", - "wine", - "winners", - "wme", - "wolterskluwer", - "woodside", - "work", - "works", - "world", - "wow", - "ws", - "wtc", - "wtf", - "xbox", - "xerox", - "xfinity", - "xihuan", - "xin", - "xn--11b4c3d", - "xn--1ck2e1b", - "xn--1qqw23a", - "xn--30rr7y", - "xn--3bst00m", - "xn--3ds443g", - "xn--3e0b707e", - "xn--3oq18vl8pn36a", - "xn--3pxu8k", - "xn--42c2d9a", - "xn--45brj9c", - "xn--45q11c", - "xn--4gbrim", - "xn--54b7fta0cc", - "xn--55qw42g", - "xn--55qx5d", - "xn--5su34j936bgsg", - "xn--5tzm5g", - "xn--6frz82g", - "xn--6qq986b3xl", - "xn--80adxhks", - "xn--80ao21a", - "xn--80aqecdr1a", - "xn--80asehdb", - "xn--80aswg", - "xn--8y0a063a", - "xn--90a3ac", - "xn--90ais", - "xn--9dbq2a", - "xn--9et52u", - "xn--9krt00a", - "xn--b4w605ferd", - "xn--bck1b9a5dre4c", - "xn--c1avg", - "xn--c2br7g", - "xn--cck2b3b", - "xn--cg4bki", - "xn--clchc0ea0b2g2a9gcd", - "xn--czr694b", - "xn--czrs0t", - "xn--czru2d", - "xn--d1acj3b", - "xn--d1alf", - "xn--e1a4c", - "xn--eckvdtc9d", - "xn--efvy88h", - "xn--estv75g", - "xn--fct429k", - "xn--fhbei", - "xn--fiq228c5hs", - "xn--fiq64b", - "xn--fiqs8s", - "xn--fiqz9s", - "xn--fjq720a", - "xn--flw351e", - "xn--fpcrj9c3d", - "xn--fzc2c9e2c", - "xn--fzys8d69uvgm", - "xn--g2xx48c", - "xn--gckr3f0f", - "xn--gecrj9c", - "xn--gk3at1e", - "xn--h2brj9c", - "xn--hxt814e", - "xn--i1b6b1a6a2e", - "xn--imr513n", - "xn--io0a7i", - "xn--j1aef", - "xn--j1amh", - "xn--j6w193g", - "xn--jlq61u9w7b", - "xn--jvr189m", - "xn--kcrx77d1x4a", - "xn--kprw13d", - "xn--kpry57d", - "xn--kpu716f", - "xn--kput3i", - "xn--l1acc", - "xn--lgbbat1ad8j", - "xn--mgb2ddes", - "xn--mgb9awbf", - "xn--mgba3a3ejt", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "xn--mgba7c0bbn0a", - "xn--mgbaakc7dvf", - "xn--mgbaam7a8h", - "xn--mgbab2bd", - "xn--mgbai9a5eva00b", - "xn--mgbai9azgqp6j", - "xn--mgbayh7gpa", - "xn--mgbb9fbpob", - "xn--mgbbh1a71e", - "xn--mgbc0a9azcg", - "xn--mgbca7dzdo", - "xn--mgberp4a5d4a87g", - "xn--mgberp4a5d4ar", - "xn--mgbi4ecexp", - "xn--mgbpl2fh", - "xn--mgbqly7c0a67fbc", - "xn--mgbqly7cvafr", - "xn--mgbt3dhd", - "xn--mgbtf8fl", - "xn--mgbtx2b", - "xn--mgbx4cd0ab", - "xn--mix082f", - "xn--mix891f", - "xn--mk1bu44c", - "xn--mxtq1m", - "xn--ngbc5azd", - "xn--ngbe9e0a", - "xn--ngbrx", - "xn--nnx388a", - "xn--node", - "xn--nqv7f", - "xn--nqv7fs00ema", - "xn--nyqy26a", - "xn--o3cw4h", - "xn--ogbpf8fl", - "xn--p1acf", - "xn--p1ai", - "xn--pbt977c", - "xn--pgbs0dh", - "xn--pssy2u", - "xn--q9jyb4c", - "xn--qcka1pmc", - "xn--qxam", - "xn--rhqv96g", - "xn--rovu88b", - "xn--s9brj9c", - "xn--ses554g", - "xn--t60b56a", - "xn--tckwe", - "xn--tiq49xqyj", - "xn--unup4y", - "xn--vermgensberater-ctb", - "xn--vermgensberatung-pwb", - "xn--vhquv", - "xn--vuq861b", - "xn--w4r85el8fhu5dnra", - "xn--w4rs40l", - "xn--wgbh1c", - "xn--wgbl6a", - "xn--xhq521b", - "xn--xkc2al3hye2a", - "xn--xkc2dl3a5ee0h", - "xn--y9a3aq", - "xn--yfro4i67o", - "xn--ygbi2ammx", - "xn--zfr164b", - "xperia", - "xxx", - "xyz", - "yachts", - "yahoo", - "yamaxun", - "yandex", - "ye", - "yodobashi", - "yoga", - "yokohama", - "you", - "youtube", - "yt", - "yun", - "za", - "zappos", - "zara", - "zero", - "zip", - "zippo", - "zm", - "zone", - "zuerich", - "zw", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "nom", - "ac", - "blogspot", - "co", - "gov", - "mil", - "net", - "org", - "sch", - "accident-investigation", - "accident-prevention", - "aerobatic", - "aeroclub", - "aerodrome", - "agents", - "air-surveillance", - "air-traffic-control", - "aircraft", - "airline", - "airport", - "airtraffic", - "ambulance", - "amusement", - "association", - "author", - "ballooning", - "broker", - "caa", - "cargo", - "catering", - "certification", - "championship", - "charter", - "civilaviation", - "club", - "conference", - "consultant", - "consulting", - "control", - "council", - "crew", - "design", - "dgca", - "educator", - "emergency", - "engine", - "engineer", - "entertainment", - "equipment", - "exchange", - "express", - "federation", - "flight", - "freight", - "fuel", - "gliding", - "government", - "groundhandling", - "group", - "hanggliding", - "homebuilt", - "insurance", - "journal", - "journalist", - "leasing", - "logistics", - "magazine", - "maintenance", - "media", - "microlight", - "modelling", - "navigation", - "parachuting", - "paragliding", - "passenger-association", - "pilot", - "press", - "production", - "recreation", - "repbody", - "res", - "research", - "rotorcraft", - "safety", - "scientist", - "services", - "show", - "skydiving", - "software", - "student", - "trader", - "trading", - "trainer", - "union", - "workinggroup", - "works", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "net", - "nom", - "org", - "com", - "net", - "off", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "blogspot", - "co", - "ed", - "gv", - "it", - "og", - "pb", - "com", - "edu", - "gob", - "gov", - "int", - "mil", - "musica", - "net", - "org", - "tur", - "blogspot", - "e164", - "in-addr", - "ip6", - "iris", - "uri", - "urn", - "gov", - "cloudns", - "ac", - "biz", - "co", - "futurehosting", - "futuremailing", - "gv", - "info", - "or", - "ortsinfo", - "priv", - "blogspot", - "ex", - "kunden", - "act", - "asn", - "com", - "conf", - "edu", - "gov", - "id", - "info", - "net", - "nsw", - "nt", - "org", - "oz", - "qld", - "sa", - "tas", - "vic", - "wa", - "blogspot", - "act", - "nsw", - "nt", - "qld", - "sa", - "tas", - "vic", - "wa", - "qld", - "sa", - "tas", - "vic", - "wa", - "com", - "biz", - "com", - "edu", - "gov", - "info", - "int", - "mil", - "name", - "net", - "org", - "pp", - "pro", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "biz", - "co", - "com", - "edu", - "gov", - "info", - "net", - "org", - "store", - "tv", - "ac", - "blogspot", - "transurl", - "gov", - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "a", - "b", - "barsy", - "blogspot", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "x", - "y", - "z", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "edu", - "or", - "org", - "cloudns", - "dscloud", - "dyndns", - "for-better", - "for-more", - "for-some", - "for-the", - "mmafan", - "myftp", - "no-ip", - "selfip", - "webhop", - "asso", - "barreau", - "blogspot", - "gouv", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gob", - "gov", - "int", - "mil", - "net", - "org", - "tv", - "adm", - "adv", - "agr", - "am", - "arq", - "art", - "ato", - "b", - "belem", - "bio", - "blog", - "bmd", - "cim", - "cng", - "cnt", - "com", - "coop", - "cri", - "def", - "ecn", - "eco", - "edu", - "emp", - "eng", - "esp", - "etc", - "eti", - "far", - "flog", - "floripa", - "fm", - "fnd", - "fot", - "fst", - "g12", - "ggf", - "gov", - "imb", - "ind", - "inf", - "jampa", - "jor", - "jus", - "leg", - "lel", - "mat", - "med", - "mil", - "mp", - "mus", - "net", - "nom", - "not", - "ntr", - "odo", - "org", - "poa", - "ppg", - "pro", - "psc", - "psi", - "qsl", - "radio", - "rec", - "recife", - "slg", - "srv", - "taxi", - "teo", - "tmp", - "trd", - "tur", - "tv", - "vet", - "vix", - "vlog", - "wiki", - "zlg", - "blogspot", - "ac", - "al", - "am", - "ap", - "ba", - "ce", - "df", - "es", - "go", - "ma", - "mg", - "ms", - "mt", - "pa", - "pb", - "pe", - "pi", - "pr", - "rj", - "rn", - "ro", - "rr", - "rs", - "sc", - "se", - "sp", - "to", - "ac", - "al", - "am", - "ap", - "ba", - "ce", - "df", - "es", - "go", - "ma", - "mg", - "ms", - "mt", - "pa", - "pb", - "pe", - "pi", - "pr", - "rj", - "rn", - "ro", - "rr", - "rs", - "sc", - "se", - "sp", - "to", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "net", - "org", - "co", - "org", - "com", - "gov", - "mil", - "of", - "blogspot", - "com", - "edu", - "gov", - "net", - "org", - "za", - "ab", - "awdev", - "bc", - "blogspot", - "co", - "gc", - "mb", - "nb", - "nf", - "nl", - "no-ip", - "ns", - "nt", - "nu", - "on", - "pe", - "qc", - "sk", - "yk", - "cloudns", - "fantasyleague", - "ftpaccess", - "game-server", - "myphotos", - "scrapping", - "twmail", - "gov", - "blogspot", - "blogspot", - "gotdns", - "square7", - "ac", - "asso", - "co", - "com", - "ed", - "edu", - "go", - "gouv", - "int", - "md", - "net", - "or", - "org", - "presse", - "xn--aroport-bya", - "www", - "blogspot", - "co", - "gob", - "gov", - "mil", - "magentosite", - "myfusion", - "sensiosite", - "statics", - "vapor", - "cloudns", - "co", - "com", - "gov", - "net", - "ac", - "ah", - "bj", - "com", - "cq", - "edu", - "fj", - "gd", - "gov", - "gs", - "gx", - "gz", - "ha", - "hb", - "he", - "hi", - "hk", - "hl", - "hn", - "jl", - "js", - "jx", - "ln", - "mil", - "mo", - "net", - "nm", - "nx", - "org", - "qh", - "sc", - "sd", - "sh", - "sn", - "sx", - "tj", - "tw", - "xj", - "xn--55qx5d", - "xn--io0a7i", - "xn--od0alg", - "xz", - "yn", - "zj", - "amazonaws", - "cn-north-1", - "compute", - "elb", - "elasticbeanstalk", - "s3", - "arts", - "com", - "edu", - "firm", - "gov", - "info", - "int", - "mil", - "net", - "nodum", - "nom", - "org", - "rec", - "web", - "blogspot", - "0emm", - "1kapp", - "3utilities", - "4u", - "africa", - "alpha-myqnapcloud", - "amazonaws", - "appchizi", - "applinzi", - "appspot", - "ar", - "barsyonline", - "betainabox", - "blogdns", - "blogspot", - "blogsyte", - "bloxcms", - "bounty-full", - "bplaced", - "br", - "cechire", - "ciscofreak", - "cloudcontrolapp", - "cloudcontrolled", - "cn", - "co", - "codespot", - "damnserver", - "ddnsking", - "de", - "dev-myqnapcloud", - "ditchyourip", - "dnsalias", - "dnsdojo", - "dnsiskinky", - "doesntexist", - "dontexist", - "doomdns", - "dreamhosters", - "dsmynas", - "dyn-o-saur", - "dynalias", - "dyndns-at-home", - "dyndns-at-work", - "dyndns-blog", - "dyndns-free", - "dyndns-home", - "dyndns-ip", - "dyndns-mail", - "dyndns-office", - "dyndns-pics", - "dyndns-remote", - "dyndns-server", - "dyndns-web", - "dyndns-wiki", - "dyndns-work", - "dynns", - "elasticbeanstalk", - "est-a-la-maison", - "est-a-la-masion", - "est-le-patron", - "est-mon-blogueur", - "eu", - "evennode", - "familyds", - "fbsbx", - "firebaseapp", - "firewall-gateway", - "flynnhub", - "freebox-os", - "freeboxos", - "from-ak", - "from-al", - "from-ar", - "from-ca", - "from-ct", - "from-dc", - "from-de", - "from-fl", - "from-ga", - "from-hi", - "from-ia", - "from-id", - "from-il", - "from-in", - "from-ks", - "from-ky", - "from-ma", - "from-md", - "from-mi", - "from-mn", - "from-mo", - "from-ms", - "from-mt", - "from-nc", - "from-nd", - "from-ne", - "from-nh", - "from-nj", - "from-nm", - "from-nv", - "from-oh", - "from-ok", - "from-or", - "from-pa", - "from-pr", - "from-ri", - "from-sc", - "from-sd", - "from-tn", - "from-tx", - "from-ut", - "from-va", - "from-vt", - "from-wa", - "from-wi", - "from-wv", - "from-wy", - "gb", - "geekgalaxy", - "getmyip", - "githubcloud", - "githubcloudusercontent", - "githubusercontent", - "googleapis", - "googlecode", - "gotdns", - "gotpantheon", - "gr", - "health-carereform", - "herokuapp", - "herokussl", - "hk", - "hobby-site", - "homelinux", - "homesecuritymac", - "homesecuritypc", - "homeunix", - "hu", - "iamallama", - "is-a-anarchist", - "is-a-blogger", - "is-a-bookkeeper", - "is-a-bulls-fan", - "is-a-caterer", - "is-a-chef", - "is-a-conservative", - "is-a-cpa", - "is-a-cubicle-slave", - "is-a-democrat", - "is-a-designer", - "is-a-doctor", - "is-a-financialadvisor", - "is-a-geek", - "is-a-green", - "is-a-guru", - "is-a-hard-worker", - "is-a-hunter", - "is-a-landscaper", - "is-a-lawyer", - "is-a-liberal", - "is-a-libertarian", - "is-a-llama", - "is-a-musician", - "is-a-nascarfan", - "is-a-nurse", - "is-a-painter", - "is-a-personaltrainer", - "is-a-photographer", - "is-a-player", - "is-a-republican", - "is-a-rockstar", - "is-a-socialist", - "is-a-student", - "is-a-teacher", - "is-a-techie", - "is-a-therapist", - "is-an-accountant", - "is-an-actor", - "is-an-actress", - "is-an-anarchist", - "is-an-artist", - "is-an-engineer", - "is-an-entertainer", - "is-certified", - "is-gone", - "is-into-anime", - "is-into-cars", - "is-into-cartoons", - "is-into-games", - "is-leet", - "is-not-certified", - "is-slick", - "is-uberleet", - "is-with-theband", - "isa-geek", - "isa-hockeynut", - "issmarterthanyou", - "joyent", - "jpn", - "kr", - "likes-pie", - "likescandy", - "logoip", - "meteorapp", - "mex", - "myactivedirectory", - "myasustor", - "mydrobo", - "myqnapcloud", - "mysecuritycamera", - "myshopblocks", - "myvnc", - "neat-url", - "net-freaks", - "nfshost", - "no", - "on-aptible", - "onthewifi", - "operaunite", - "outsystemscloud", - "ownprovider", - "pagefrontapp", - "pagespeedmobilizer", - "pgfog", - "point2this", - "prgmr", - "publishproxy", - "qa2", - "qc", - "quicksytes", - "quipelements", - "rackmaze", - "remotewd", - "rhcloud", - "ru", - "sa", - "saves-the-whales", - "se", - "securitytactics", - "selfip", - "sells-for-less", - "sells-for-u", - "servebbs", - "servebeer", - "servecounterstrike", - "serveexchange", - "serveftp", - "servegame", - "servehalflife", - "servehttp", - "servehumour", - "serveirc", - "servemp3", - "servep2p", - "servepics", - "servequake", - "servesarcasm", - "simple-url", - "sinaapp", - "space-to-rent", - "stufftoread", - "teaches-yoga", - "townnews-staging", - "uk", - "unusualperson", - "us", - "uy", - "vipsinaapp", - "withgoogle", - "withyoutube", - "workisboring", - "writesthisblog", - "xenapponazure", - "yolasite", - "za", - "ap-northeast-1", - "ap-northeast-2", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "compute", - "compute-1", - "elb", - "eu-central-1", - "eu-west-1", - "eu-west-2", - "s3", - "s3-ap-northeast-1", - "s3-ap-northeast-2", - "s3-ap-south-1", - "s3-ap-southeast-1", - "s3-ap-southeast-2", - "s3-ca-central-1", - "s3-eu-central-1", - "s3-eu-west-1", - "s3-eu-west-2", - "s3-external-1", - "s3-fips-us-gov-west-1", - "s3-sa-east-1", - "s3-us-east-2", - "s3-us-gov-west-1", - "s3-us-west-1", - "s3-us-west-2", - "s3-website-ap-northeast-1", - "s3-website-ap-southeast-1", - "s3-website-ap-southeast-2", - "s3-website-eu-west-1", - "s3-website-sa-east-1", - "s3-website-us-east-1", - "s3-website-us-west-1", - "s3-website-us-west-2", - "sa-east-1", - "us-east-1", - "us-east-2", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "alpha", - "beta", - "eu-1", - "eu-2", - "eu-3", - "us-1", - "us-2", - "us-3", - "apps", - "api", - "ext", - "gist", - "cns", - "eu", - "xen", - "ac", - "co", - "ed", - "fi", - "go", - "or", - "sa", - "com", - "edu", - "gov", - "inf", - "net", - "org", - "blogspot", - "com", - "edu", - "net", - "org", - "ath", - "gov", - "ac", - "biz", - "com", - "ekloges", - "gov", - "ltd", - "name", - "net", - "org", - "parliament", - "press", - "pro", - "tm", - "blogspot", - "blogspot", - "co", - "e4", - "realm", - "barsy", - "blogspot", - "bplaced", - "com", - "cosidns", - "dd-dns", - "ddnss", - "dnshome", - "dnsupdater", - "dray-dns", - "draydns", - "dyn-ip24", - "dyn-vpn", - "dynamisches-dns", - "dyndns1", - "dynvpn", - "firewall-gateway", - "fuettertdasnetz", - "goip", - "home-webserver", - "internet-dns", - "isteingeek", - "istmein", - "keymachine", - "l-o-g-i-n", - "lebtimnetz", - "leitungsen", - "logoip", - "mein-vigor", - "my-gateway", - "my-router", - "my-vigor", - "my-wan", - "myhome-server", - "spdns", - "square7", - "syno-ds", - "synology-diskstation", - "synology-ds", - "taifun-dns", - "traeumtgerade", - "dyn", - "dyn", - "dyndns", - "dyn", - "biz", - "blogspot", - "co", - "firm", - "reg", - "store", - "com", - "edu", - "gov", - "net", - "org", - "art", - "com", - "edu", - "gob", - "gov", - "mil", - "net", - "org", - "sld", - "web", - "art", - "asso", - "com", - "edu", - "gov", - "net", - "org", - "pol", - "com", - "edu", - "fin", - "gob", - "gov", - "info", - "k12", - "med", - "mil", - "net", - "org", - "pro", - "aip", - "com", - "edu", - "fie", - "gov", - "lib", - "med", - "org", - "pri", - "riik", - "blogspot", - "com", - "edu", - "eun", - "gov", - "mil", - "name", - "net", - "org", - "sci", - "blogspot", - "com", - "edu", - "gob", - "nom", - "org", - "blogspot", - "compute", - "biz", - "com", - "edu", - "gov", - "info", - "name", - "net", - "org", - "barsy", - "cloudns", - "diskstation", - "mycd", - "spdns", - "transurl", - "wellbeingzone", - "party", - "user", - "ybo", - "storj", - "aland", - "blogspot", - "dy", - "iki", - "ptplus", - "aeroport", - "assedic", - "asso", - "avocat", - "avoues", - "blogspot", - "cci", - "chambagri", - "chirurgiens-dentistes", - "chirurgiens-dentistes-en-france", - "com", - "experts-comptables", - "fbx-os", - "fbxos", - "freebox-os", - "freeboxos", - "geometre-expert", - "gouv", - "greta", - "huissier-justice", - "medecin", - "nom", - "notaires", - "on-web", - "pharmacien", - "port", - "prd", - "presse", - "tm", - "veterinaire", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "pvt", - "co", - "cya", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "org", - "com", - "edu", - "gov", - "ltd", - "mod", - "org", - "co", - "com", - "edu", - "net", - "org", - "ac", - "com", - "edu", - "gov", - "net", - "org", - "cloud", - "asso", - "com", - "edu", - "mobi", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gob", - "ind", - "mil", - "net", - "org", - "co", - "com", - "edu", - "gov", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "idv", - "inc", - "ltd", - "net", - "org", - "xn--55qx5d", - "xn--ciqpn", - "xn--gmq050i", - "xn--gmqw5a", - "xn--io0a7i", - "xn--lcvr32d", - "xn--mk0axi", - "xn--mxtq1m", - "xn--od0alg", - "xn--od0aq3b", - "xn--tn0ag", - "xn--uc0atv", - "xn--uc0ay4a", - "xn--wcvs22d", - "xn--zf0avx", - "com", - "edu", - "gob", - "mil", - "net", - "org", - "opencraft", - "blogspot", - "com", - "from", - "iz", - "name", - "adult", - "art", - "asso", - "com", - "coop", - "edu", - "firm", - "gouv", - "info", - "med", - "net", - "org", - "perso", - "pol", - "pro", - "rel", - "shop", - "2000", - "agrar", - "blogspot", - "bolt", - "casino", - "city", - "co", - "erotica", - "erotika", - "film", - "forum", - "games", - "hotel", - "info", - "ingatlan", - "jogasz", - "konyvelo", - "lakas", - "media", - "news", - "org", - "priv", - "reklam", - "sex", - "shop", - "sport", - "suli", - "szex", - "tm", - "tozsde", - "utazas", - "video", - "ac", - "biz", - "co", - "desa", - "go", - "mil", - "my", - "net", - "or", - "sch", - "web", - "blogspot", - "blogspot", - "gov", - "ac", - "co", - "gov", - "idf", - "k12", - "muni", - "net", - "org", - "blogspot", - "ac", - "co", - "com", - "net", - "org", - "ro", - "tt", - "tv", - "ltd", - "plc", - "ac", - "barsy", - "blogspot", - "cloudns", - "co", - "edu", - "firm", - "gen", - "gov", - "ind", - "mil", - "net", - "nic", - "org", - "res", - "barrel-of-knowledge", - "barrell-of-knowledge", - "cloudns", - "dvrcam", - "dynamic-dns", - "dyndns", - "for-our", - "groks-the", - "groks-this", - "here-for-more", - "ilovecollege", - "knowsitall", - "no-ip", - "nsupdate", - "selfip", - "webhop", - "eu", - "backplaneapp", - "boxfuse", - "browsersafetymark", - "com", - "dedyn", - "definima", - "drud", - "enonic", - "github", - "gitlab", - "hasura-app", - "hzc", - "lair", - "ngrok", - "nid", - "nodum", - "pantheonsite", - "protonet", - "sandcats", - "shiftedit", - "spacekit", - "stolos", - "vaporcloud", - "wedeploy", - "customer", - "apps", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "ac", - "co", - "gov", - "id", - "net", - "org", - "sch", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "blogspot", - "com", - "cupcake", - "edu", - "gov", - "int", - "net", - "org", - "abr", - "abruzzo", - "ag", - "agrigento", - "al", - "alessandria", - "alto-adige", - "altoadige", - "an", - "ancona", - "andria-barletta-trani", - "andria-trani-barletta", - "andriabarlettatrani", - "andriatranibarletta", - "ao", - "aosta", - "aosta-valley", - "aostavalley", - "aoste", - "ap", - "aq", - "aquila", - "ar", - "arezzo", - "ascoli-piceno", - "ascolipiceno", - "asti", - "at", - "av", - "avellino", - "ba", - "balsan", - "bari", - "barletta-trani-andria", - "barlettatraniandria", - "bas", - "basilicata", - "belluno", - "benevento", - "bergamo", - "bg", - "bi", - "biella", - "bl", - "blogspot", - "bn", - "bo", - "bologna", - "bolzano", - "bozen", - "br", - "brescia", - "brindisi", - "bs", - "bt", - "bz", - "ca", - "cagliari", - "cal", - "calabria", - "caltanissetta", - "cam", - "campania", - "campidano-medio", - "campidanomedio", - "campobasso", - "carbonia-iglesias", - "carboniaiglesias", - "carrara-massa", - "carraramassa", - "caserta", - "catania", - "catanzaro", - "cb", - "ce", - "cesena-forli", - "cesenaforli", - "ch", - "chieti", - "ci", - "cl", - "cn", - "co", - "como", - "cosenza", - "cr", - "cremona", - "crotone", - "cs", - "ct", - "cuneo", - "cz", - "dell-ogliastra", - "dellogliastra", - "edu", - "emilia-romagna", - "emiliaromagna", - "emr", - "en", - "enna", - "fc", - "fe", - "fermo", - "ferrara", - "fg", - "fi", - "firenze", - "florence", - "fm", - "foggia", - "forli-cesena", - "forlicesena", - "fr", - "friuli-v-giulia", - "friuli-ve-giulia", - "friuli-vegiulia", - "friuli-venezia-giulia", - "friuli-veneziagiulia", - "friuli-vgiulia", - "friuliv-giulia", - "friulive-giulia", - "friulivegiulia", - "friulivenezia-giulia", - "friuliveneziagiulia", - "friulivgiulia", - "frosinone", - "fvg", - "ge", - "genoa", - "genova", - "go", - "gorizia", - "gov", - "gr", - "grosseto", - "iglesias-carbonia", - "iglesiascarbonia", - "im", - "imperia", - "is", - "isernia", - "kr", - "la-spezia", - "laquila", - "laspezia", - "latina", - "laz", - "lazio", - "lc", - "le", - "lecce", - "lecco", - "li", - "lig", - "liguria", - "livorno", - "lo", - "lodi", - "lom", - "lombardia", - "lombardy", - "lt", - "lu", - "lucania", - "lucca", - "macerata", - "mantova", - "mar", - "marche", - "massa-carrara", - "massacarrara", - "matera", - "mb", - "mc", - "me", - "medio-campidano", - "mediocampidano", - "messina", - "mi", - "milan", - "milano", - "mn", - "mo", - "modena", - "mol", - "molise", - "monza", - "monza-brianza", - "monza-e-della-brianza", - "monzabrianza", - "monzaebrianza", - "monzaedellabrianza", - "ms", - "mt", - "na", - "naples", - "napoli", - "no", - "novara", - "nu", - "nuoro", - "og", - "ogliastra", - "olbia-tempio", - "olbiatempio", - "or", - "oristano", - "ot", - "pa", - "padova", - "padua", - "palermo", - "parma", - "pavia", - "pc", - "pd", - "pe", - "perugia", - "pesaro-urbino", - "pesarourbino", - "pescara", - "pg", - "pi", - "piacenza", - "piedmont", - "piemonte", - "pisa", - "pistoia", - "pmn", - "pn", - "po", - "pordenone", - "potenza", - "pr", - "prato", - "pt", - "pu", - "pug", - "puglia", - "pv", - "pz", - "ra", - "ragusa", - "ravenna", - "rc", - "re", - "reggio-calabria", - "reggio-emilia", - "reggiocalabria", - "reggioemilia", - "rg", - "ri", - "rieti", - "rimini", - "rm", - "rn", - "ro", - "roma", - "rome", - "rovigo", - "sa", - "salerno", - "sar", - "sardegna", - "sardinia", - "sassari", - "savona", - "si", - "sic", - "sicilia", - "sicily", - "siena", - "siracusa", - "so", - "sondrio", - "sp", - "sr", - "ss", - "suedtirol", - "sv", - "ta", - "taa", - "taranto", - "te", - "tempio-olbia", - "tempioolbia", - "teramo", - "terni", - "tn", - "to", - "torino", - "tos", - "toscana", - "tp", - "tr", - "trani-andria-barletta", - "trani-barletta-andria", - "traniandriabarletta", - "tranibarlettaandria", - "trapani", - "trentino", - "trentino-a-adige", - "trentino-aadige", - "trentino-alto-adige", - "trentino-altoadige", - "trentino-s-tirol", - "trentino-stirol", - "trentino-sud-tirol", - "trentino-sudtirol", - "trentino-sued-tirol", - "trentino-suedtirol", - "trentinoa-adige", - "trentinoaadige", - "trentinoalto-adige", - "trentinoaltoadige", - "trentinos-tirol", - "trentinostirol", - "trentinosud-tirol", - "trentinosudtirol", - "trentinosued-tirol", - "trentinosuedtirol", - "trento", - "treviso", - "trieste", - "ts", - "turin", - "tuscany", - "tv", - "ud", - "udine", - "umb", - "umbria", - "urbino-pesaro", - "urbinopesaro", - "va", - "val-d-aosta", - "val-daosta", - "vald-aosta", - "valdaosta", - "valle-aosta", - "valle-d-aosta", - "valle-daosta", - "valleaosta", - "valled-aosta", - "valledaosta", - "vallee-aoste", - "valleeaoste", - "vao", - "varese", - "vb", - "vc", - "vda", - "ve", - "ven", - "veneto", - "venezia", - "venice", - "verbania", - "vercelli", - "verona", - "vi", - "vibo-valentia", - "vibovalentia", - "vicenza", - "viterbo", - "vr", - "vs", - "vt", - "vv", - "co", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "org", - "sch", - "ac", - "ad", - "aichi", - "akita", - "aomori", - "blogspot", - "chiba", - "co", - "ed", - "ehime", - "fukui", - "fukuoka", - "fukushima", - "gifu", - "go", - "gr", - "gunma", - "hiroshima", - "hokkaido", - "hyogo", - "ibaraki", - "ishikawa", - "iwate", - "kagawa", - "kagoshima", - "kanagawa", - "kawasaki", - "kitakyushu", - "kobe", - "kochi", - "kumamoto", - "kyoto", - "lg", - "mie", - "miyagi", - "miyazaki", - "nagano", - "nagasaki", - "nagoya", - "nara", - "ne", - "niigata", - "oita", - "okayama", - "okinawa", - "or", - "osaka", - "saga", - "saitama", - "sapporo", - "sendai", - "shiga", - "shimane", - "shizuoka", - "tochigi", - "tokushima", - "tokyo", - "tottori", - "toyama", - "wakayama", - "xn--0trq7p7nn", - "xn--1ctwo", - "xn--1lqs03n", - "xn--1lqs71d", - "xn--2m4a15e", - "xn--32vp30h", - "xn--4it168d", - "xn--4it797k", - "xn--4pvxs", - "xn--5js045d", - "xn--5rtp49c", - "xn--5rtq34k", - "xn--6btw5a", - "xn--6orx2r", - "xn--7t0a264c", - "xn--8ltr62k", - "xn--8pvr4u", - "xn--c3s14m", - "xn--d5qv7z876c", - "xn--djrs72d6uy", - "xn--djty4k", - "xn--efvn9s", - "xn--ehqz56n", - "xn--elqq16h", - "xn--f6qx53a", - "xn--k7yn95e", - "xn--kbrq7o", - "xn--klt787d", - "xn--kltp7d", - "xn--kltx9a", - "xn--klty5x", - "xn--mkru45i", - "xn--nit225k", - "xn--ntso0iqx3a", - "xn--ntsq17g", - "xn--pssu33l", - "xn--qqqt11m", - "xn--rht27z", - "xn--rht3d", - "xn--rht61e", - "xn--rny31h", - "xn--tor131o", - "xn--uist22h", - "xn--uisz3g", - "xn--uuwu58a", - "xn--vgu402c", - "xn--zbx025d", - "yamagata", - "yamaguchi", - "yamanashi", - "yokohama", - "aisai", - "ama", - "anjo", - "asuke", - "chiryu", - "chita", - "fuso", - "gamagori", - "handa", - "hazu", - "hekinan", - "higashiura", - "ichinomiya", - "inazawa", - "inuyama", - "isshiki", - "iwakura", - "kanie", - "kariya", - "kasugai", - "kira", - "kiyosu", - "komaki", - "konan", - "kota", - "mihama", - "miyoshi", - "nishio", - "nisshin", - "obu", - "oguchi", - "oharu", - "okazaki", - "owariasahi", - "seto", - "shikatsu", - "shinshiro", - "shitara", - "tahara", - "takahama", - "tobishima", - "toei", - "togo", - "tokai", - "tokoname", - "toyoake", - "toyohashi", - "toyokawa", - "toyone", - "toyota", - "tsushima", - "yatomi", - "akita", - "daisen", - "fujisato", - "gojome", - "hachirogata", - "happou", - "higashinaruse", - "honjo", - "honjyo", - "ikawa", - "kamikoani", - "kamioka", - "katagami", - "kazuno", - "kitaakita", - "kosaka", - "kyowa", - "misato", - "mitane", - "moriyoshi", - "nikaho", - "noshiro", - "odate", - "oga", - "ogata", - "semboku", - "yokote", - "yurihonjo", - "aomori", - "gonohe", - "hachinohe", - "hashikami", - "hiranai", - "hirosaki", - "itayanagi", - "kuroishi", - "misawa", - "mutsu", - "nakadomari", - "noheji", - "oirase", - "owani", - "rokunohe", - "sannohe", - "shichinohe", - "shingo", - "takko", - "towada", - "tsugaru", - "tsuruta", - "abiko", - "asahi", - "chonan", - "chosei", - "choshi", - "chuo", - "funabashi", - "futtsu", - "hanamigawa", - "ichihara", - "ichikawa", - "ichinomiya", - "inzai", - "isumi", - "kamagaya", - "kamogawa", - "kashiwa", - "katori", - "katsuura", - "kimitsu", - "kisarazu", - "kozaki", - "kujukuri", - "kyonan", - "matsudo", - "midori", - "mihama", - "minamiboso", - "mobara", - "mutsuzawa", - "nagara", - "nagareyama", - "narashino", - "narita", - "noda", - "oamishirasato", - "omigawa", - "onjuku", - "otaki", - "sakae", - "sakura", - "shimofusa", - "shirako", - "shiroi", - "shisui", - "sodegaura", - "sosa", - "tako", - "tateyama", - "togane", - "tohnosho", - "tomisato", - "urayasu", - "yachimata", - "yachiyo", - "yokaichiba", - "yokoshibahikari", - "yotsukaido", - "ainan", - "honai", - "ikata", - "imabari", - "iyo", - "kamijima", - "kihoku", - "kumakogen", - "masaki", - "matsuno", - "matsuyama", - "namikata", - "niihama", - "ozu", - "saijo", - "seiyo", - "shikokuchuo", - "tobe", - "toon", - "uchiko", - "uwajima", - "yawatahama", - "echizen", - "eiheiji", - "fukui", - "ikeda", - "katsuyama", - "mihama", - "minamiechizen", - "obama", - "ohi", - "ono", - "sabae", - "sakai", - "takahama", - "tsuruga", - "wakasa", - "ashiya", - "buzen", - "chikugo", - "chikuho", - "chikujo", - "chikushino", - "chikuzen", - "chuo", - "dazaifu", - "fukuchi", - "hakata", - "higashi", - "hirokawa", - "hisayama", - "iizuka", - "inatsuki", - "kaho", - "kasuga", - "kasuya", - "kawara", - "keisen", - "koga", - "kurate", - "kurogi", - "kurume", - "minami", - "miyako", - "miyama", - "miyawaka", - "mizumaki", - "munakata", - "nakagawa", - "nakama", - "nishi", - "nogata", - "ogori", - "okagaki", - "okawa", - "oki", - "omuta", - "onga", - "onojo", - "oto", - "saigawa", - "sasaguri", - "shingu", - "shinyoshitomi", - "shonai", - "soeda", - "sue", - "tachiarai", - "tagawa", - "takata", - "toho", - "toyotsu", - "tsuiki", - "ukiha", - "umi", - "usui", - "yamada", - "yame", - "yanagawa", - "yukuhashi", - "aizubange", - "aizumisato", - "aizuwakamatsu", - "asakawa", - "bandai", - "date", - "fukushima", - "furudono", - "futaba", - "hanawa", - "higashi", - "hirata", - "hirono", - "iitate", - "inawashiro", - "ishikawa", - "iwaki", - "izumizaki", - "kagamiishi", - "kaneyama", - "kawamata", - "kitakata", - "kitashiobara", - "koori", - "koriyama", - "kunimi", - "miharu", - "mishima", - "namie", - "nango", - "nishiaizu", - "nishigo", - "okuma", - "omotego", - "ono", - "otama", - "samegawa", - "shimogo", - "shirakawa", - "showa", - "soma", - "sukagawa", - "taishin", - "tamakawa", - "tanagura", - "tenei", - "yabuki", - "yamato", - "yamatsuri", - "yanaizu", - "yugawa", - "anpachi", - "ena", - "gifu", - "ginan", - "godo", - "gujo", - "hashima", - "hichiso", - "hida", - "higashishirakawa", - "ibigawa", - "ikeda", - "kakamigahara", - "kani", - "kasahara", - "kasamatsu", - "kawaue", - "kitagata", - "mino", - "minokamo", - "mitake", - "mizunami", - "motosu", - "nakatsugawa", - "ogaki", - "sakahogi", - "seki", - "sekigahara", - "shirakawa", - "tajimi", - "takayama", - "tarui", - "toki", - "tomika", - "wanouchi", - "yamagata", - "yaotsu", - "yoro", - "annaka", - "chiyoda", - "fujioka", - "higashiagatsuma", - "isesaki", - "itakura", - "kanna", - "kanra", - "katashina", - "kawaba", - "kiryu", - "kusatsu", - "maebashi", - "meiwa", - "midori", - "minakami", - "naganohara", - "nakanojo", - "nanmoku", - "numata", - "oizumi", - "ora", - "ota", - "shibukawa", - "shimonita", - "shinto", - "showa", - "takasaki", - "takayama", - "tamamura", - "tatebayashi", - "tomioka", - "tsukiyono", - "tsumagoi", - "ueno", - "yoshioka", - "asaminami", - "daiwa", - "etajima", - "fuchu", - "fukuyama", - "hatsukaichi", - "higashihiroshima", - "hongo", - "jinsekikogen", - "kaita", - "kui", - "kumano", - "kure", - "mihara", - "miyoshi", - "naka", - "onomichi", - "osakikamijima", - "otake", - "saka", - "sera", - "seranishi", - "shinichi", - "shobara", - "takehara", - "abashiri", - "abira", - "aibetsu", - "akabira", - "akkeshi", - "asahikawa", - "ashibetsu", - "ashoro", - "assabu", - "atsuma", - "bibai", - "biei", - "bifuka", - "bihoro", - "biratori", - "chippubetsu", - "chitose", - "date", - "ebetsu", - "embetsu", - "eniwa", - "erimo", - "esan", - "esashi", - "fukagawa", - "fukushima", - "furano", - "furubira", - "haboro", - "hakodate", - "hamatonbetsu", - "hidaka", - "higashikagura", - "higashikawa", - "hiroo", - "hokuryu", - "hokuto", - "honbetsu", - "horokanai", - "horonobe", - "ikeda", - "imakane", - "ishikari", - "iwamizawa", - "iwanai", - "kamifurano", - "kamikawa", - "kamishihoro", - "kamisunagawa", - "kamoenai", - "kayabe", - "kembuchi", - "kikonai", - "kimobetsu", - "kitahiroshima", - "kitami", - "kiyosato", - "koshimizu", - "kunneppu", - "kuriyama", - "kuromatsunai", - "kushiro", - "kutchan", - "kyowa", - "mashike", - "matsumae", - "mikasa", - "minamifurano", - "mombetsu", - "moseushi", - "mukawa", - "muroran", - "naie", - "nakagawa", - "nakasatsunai", - "nakatombetsu", - "nanae", - "nanporo", - "nayoro", - "nemuro", - "niikappu", - "niki", - "nishiokoppe", - "noboribetsu", - "numata", - "obihiro", - "obira", - "oketo", - "okoppe", - "otaru", - "otobe", - "otofuke", - "otoineppu", - "oumu", - "ozora", - "pippu", - "rankoshi", - "rebun", - "rikubetsu", - "rishiri", - "rishirifuji", - "saroma", - "sarufutsu", - "shakotan", - "shari", - "shibecha", - "shibetsu", - "shikabe", - "shikaoi", - "shimamaki", - "shimizu", - "shimokawa", - "shinshinotsu", - "shintoku", - "shiranuka", - "shiraoi", - "shiriuchi", - "sobetsu", - "sunagawa", - "taiki", - "takasu", - "takikawa", - "takinoue", - "teshikaga", - "tobetsu", - "tohma", - "tomakomai", - "tomari", - "toya", - "toyako", - "toyotomi", - "toyoura", - "tsubetsu", - "tsukigata", - "urakawa", - "urausu", - "uryu", - "utashinai", - "wakkanai", - "wassamu", - "yakumo", - "yoichi", - "aioi", - "akashi", - "ako", - "amagasaki", - "aogaki", - "asago", - "ashiya", - "awaji", - "fukusaki", - "goshiki", - "harima", - "himeji", - "ichikawa", - "inagawa", - "itami", - "kakogawa", - "kamigori", - "kamikawa", - "kasai", - "kasuga", - "kawanishi", - "miki", - "minamiawaji", - "nishinomiya", - "nishiwaki", - "ono", - "sanda", - "sannan", - "sasayama", - "sayo", - "shingu", - "shinonsen", - "shiso", - "sumoto", - "taishi", - "taka", - "takarazuka", - "takasago", - "takino", - "tamba", - "tatsuno", - "toyooka", - "yabu", - "yashiro", - "yoka", - "yokawa", - "ami", - "asahi", - "bando", - "chikusei", - "daigo", - "fujishiro", - "hitachi", - "hitachinaka", - "hitachiomiya", - "hitachiota", - "ibaraki", - "ina", - "inashiki", - "itako", - "iwama", - "joso", - "kamisu", - "kasama", - "kashima", - "kasumigaura", - "koga", - "miho", - "mito", - "moriya", - "naka", - "namegata", - "oarai", - "ogawa", - "omitama", - "ryugasaki", - "sakai", - "sakuragawa", - "shimodate", - "shimotsuma", - "shirosato", - "sowa", - "suifu", - "takahagi", - "tamatsukuri", - "tokai", - "tomobe", - "tone", - "toride", - "tsuchiura", - "tsukuba", - "uchihara", - "ushiku", - "yachiyo", - "yamagata", - "yawara", - "yuki", - "anamizu", - "hakui", - "hakusan", - "kaga", - "kahoku", - "kanazawa", - "kawakita", - "komatsu", - "nakanoto", - "nanao", - "nomi", - "nonoichi", - "noto", - "shika", - "suzu", - "tsubata", - "tsurugi", - "uchinada", - "wajima", - "fudai", - "fujisawa", - "hanamaki", - "hiraizumi", - "hirono", - "ichinohe", - "ichinoseki", - "iwaizumi", - "iwate", - "joboji", - "kamaishi", - "kanegasaki", - "karumai", - "kawai", - "kitakami", - "kuji", - "kunohe", - "kuzumaki", - "miyako", - "mizusawa", - "morioka", - "ninohe", - "noda", - "ofunato", - "oshu", - "otsuchi", - "rikuzentakata", - "shiwa", - "shizukuishi", - "sumita", - "tanohata", - "tono", - "yahaba", - "yamada", - "ayagawa", - "higashikagawa", - "kanonji", - "kotohira", - "manno", - "marugame", - "mitoyo", - "naoshima", - "sanuki", - "tadotsu", - "takamatsu", - "tonosho", - "uchinomi", - "utazu", - "zentsuji", - "akune", - "amami", - "hioki", - "isa", - "isen", - "izumi", - "kagoshima", - "kanoya", - "kawanabe", - "kinko", - "kouyama", - "makurazaki", - "matsumoto", - "minamitane", - "nakatane", - "nishinoomote", - "satsumasendai", - "soo", - "tarumizu", - "yusui", - "aikawa", - "atsugi", - "ayase", - "chigasaki", - "ebina", - "fujisawa", - "hadano", - "hakone", - "hiratsuka", - "isehara", - "kaisei", - "kamakura", - "kiyokawa", - "matsuda", - "minamiashigara", - "miura", - "nakai", - "ninomiya", - "odawara", - "oi", - "oiso", - "sagamihara", - "samukawa", - "tsukui", - "yamakita", - "yamato", - "yokosuka", - "yugawara", - "zama", - "zushi", - "city", - "city", - "city", - "aki", - "geisei", - "hidaka", - "higashitsuno", - "ino", - "kagami", - "kami", - "kitagawa", - "kochi", - "mihara", - "motoyama", - "muroto", - "nahari", - "nakamura", - "nankoku", - "nishitosa", - "niyodogawa", - "ochi", - "okawa", - "otoyo", - "otsuki", - "sakawa", - "sukumo", - "susaki", - "tosa", - "tosashimizu", - "toyo", - "tsuno", - "umaji", - "yasuda", - "yusuhara", - "amakusa", - "arao", - "aso", - "choyo", - "gyokuto", - "kamiamakusa", - "kikuchi", - "kumamoto", - "mashiki", - "mifune", - "minamata", - "minamioguni", - "nagasu", - "nishihara", - "oguni", - "ozu", - "sumoto", - "takamori", - "uki", - "uto", - "yamaga", - "yamato", - "yatsushiro", - "ayabe", - "fukuchiyama", - "higashiyama", - "ide", - "ine", - "joyo", - "kameoka", - "kamo", - "kita", - "kizu", - "kumiyama", - "kyotamba", - "kyotanabe", - "kyotango", - "maizuru", - "minami", - "minamiyamashiro", - "miyazu", - "muko", - "nagaokakyo", - "nakagyo", - "nantan", - "oyamazaki", - "sakyo", - "seika", - "tanabe", - "uji", - "ujitawara", - "wazuka", - "yamashina", - "yawata", - "asahi", - "inabe", - "ise", - "kameyama", - "kawagoe", - "kiho", - "kisosaki", - "kiwa", - "komono", - "kumano", - "kuwana", - "matsusaka", - "meiwa", - "mihama", - "minamiise", - "misugi", - "miyama", - "nabari", - "shima", - "suzuka", - "tado", - "taiki", - "taki", - "tamaki", - "toba", - "tsu", - "udono", - "ureshino", - "watarai", - "yokkaichi", - "furukawa", - "higashimatsushima", - "ishinomaki", - "iwanuma", - "kakuda", - "kami", - "kawasaki", - "marumori", - "matsushima", - "minamisanriku", - "misato", - "murata", - "natori", - "ogawara", - "ohira", - "onagawa", - "osaki", - "rifu", - "semine", - "shibata", - "shichikashuku", - "shikama", - "shiogama", - "shiroishi", - "tagajo", - "taiwa", - "tome", - "tomiya", - "wakuya", - "watari", - "yamamoto", - "zao", - "aya", - "ebino", - "gokase", - "hyuga", - "kadogawa", - "kawaminami", - "kijo", - "kitagawa", - "kitakata", - "kitaura", - "kobayashi", - "kunitomi", - "kushima", - "mimata", - "miyakonojo", - "miyazaki", - "morotsuka", - "nichinan", - "nishimera", - "nobeoka", - "saito", - "shiiba", - "shintomi", - "takaharu", - "takanabe", - "takazaki", - "tsuno", - "achi", - "agematsu", - "anan", - "aoki", - "asahi", - "azumino", - "chikuhoku", - "chikuma", - "chino", - "fujimi", - "hakuba", - "hara", - "hiraya", - "iida", - "iijima", - "iiyama", - "iizuna", - "ikeda", - "ikusaka", - "ina", - "karuizawa", - "kawakami", - "kiso", - "kisofukushima", - "kitaaiki", - "komagane", - "komoro", - "matsukawa", - "matsumoto", - "miasa", - "minamiaiki", - "minamimaki", - "minamiminowa", - "minowa", - "miyada", - "miyota", - "mochizuki", - "nagano", - "nagawa", - "nagiso", - "nakagawa", - "nakano", - "nozawaonsen", - "obuse", - "ogawa", - "okaya", - "omachi", - "omi", - "ookuwa", - "ooshika", - "otaki", - "otari", - "sakae", - "sakaki", - "saku", - "sakuho", - "shimosuwa", - "shinanomachi", - "shiojiri", - "suwa", - "suzaka", - "takagi", - "takamori", - "takayama", - "tateshina", - "tatsuno", - "togakushi", - "togura", - "tomi", - "ueda", - "wada", - "yamagata", - "yamanouchi", - "yasaka", - "yasuoka", - "chijiwa", - "futsu", - "goto", - "hasami", - "hirado", - "iki", - "isahaya", - "kawatana", - "kuchinotsu", - "matsuura", - "nagasaki", - "obama", - "omura", - "oseto", - "saikai", - "sasebo", - "seihi", - "shimabara", - "shinkamigoto", - "togitsu", - "tsushima", - "unzen", - "city", - "ando", - "gose", - "heguri", - "higashiyoshino", - "ikaruga", - "ikoma", - "kamikitayama", - "kanmaki", - "kashiba", - "kashihara", - "katsuragi", - "kawai", - "kawakami", - "kawanishi", - "koryo", - "kurotaki", - "mitsue", - "miyake", - "nara", - "nosegawa", - "oji", - "ouda", - "oyodo", - "sakurai", - "sango", - "shimoichi", - "shimokitayama", - "shinjo", - "soni", - "takatori", - "tawaramoto", - "tenkawa", - "tenri", - "uda", - "yamatokoriyama", - "yamatotakada", - "yamazoe", - "yoshino", - "aga", - "agano", - "gosen", - "itoigawa", - "izumozaki", - "joetsu", - "kamo", - "kariwa", - "kashiwazaki", - "minamiuonuma", - "mitsuke", - "muika", - "murakami", - "myoko", - "nagaoka", - "niigata", - "ojiya", - "omi", - "sado", - "sanjo", - "seiro", - "seirou", - "sekikawa", - "shibata", - "tagami", - "tainai", - "tochio", - "tokamachi", - "tsubame", - "tsunan", - "uonuma", - "yahiko", - "yoita", - "yuzawa", - "beppu", - "bungoono", - "bungotakada", - "hasama", - "hiji", - "himeshima", - "hita", - "kamitsue", - "kokonoe", - "kuju", - "kunisaki", - "kusu", - "oita", - "saiki", - "taketa", - "tsukumi", - "usa", - "usuki", - "yufu", - "akaiwa", - "asakuchi", - "bizen", - "hayashima", - "ibara", - "kagamino", - "kasaoka", - "kibichuo", - "kumenan", - "kurashiki", - "maniwa", - "misaki", - "nagi", - "niimi", - "nishiawakura", - "okayama", - "satosho", - "setouchi", - "shinjo", - "shoo", - "soja", - "takahashi", - "tamano", - "tsuyama", - "wake", - "yakage", - "aguni", - "ginowan", - "ginoza", - "gushikami", - "haebaru", - "higashi", - "hirara", - "iheya", - "ishigaki", - "ishikawa", - "itoman", - "izena", - "kadena", - "kin", - "kitadaito", - "kitanakagusuku", - "kumejima", - "kunigami", - "minamidaito", - "motobu", - "nago", - "naha", - "nakagusuku", - "nakijin", - "nanjo", - "nishihara", - "ogimi", - "okinawa", - "onna", - "shimoji", - "taketomi", - "tarama", - "tokashiki", - "tomigusuku", - "tonaki", - "urasoe", - "uruma", - "yaese", - "yomitan", - "yonabaru", - "yonaguni", - "zamami", - "abeno", - "chihayaakasaka", - "chuo", - "daito", - "fujiidera", - "habikino", - "hannan", - "higashiosaka", - "higashisumiyoshi", - "higashiyodogawa", - "hirakata", - "ibaraki", - "ikeda", - "izumi", - "izumiotsu", - "izumisano", - "kadoma", - "kaizuka", - "kanan", - "kashiwara", - "katano", - "kawachinagano", - "kishiwada", - "kita", - "kumatori", - "matsubara", - "minato", - "minoh", - "misaki", - "moriguchi", - "neyagawa", - "nishi", - "nose", - "osakasayama", - "sakai", - "sayama", - "sennan", - "settsu", - "shijonawate", - "shimamoto", - "suita", - "tadaoka", - "taishi", - "tajiri", - "takaishi", - "takatsuki", - "tondabayashi", - "toyonaka", - "toyono", - "yao", - "ariake", - "arita", - "fukudomi", - "genkai", - "hamatama", - "hizen", - "imari", - "kamimine", - "kanzaki", - "karatsu", - "kashima", - "kitagata", - "kitahata", - "kiyama", - "kouhoku", - "kyuragi", - "nishiarita", - "ogi", - "omachi", - "ouchi", - "saga", - "shiroishi", - "taku", - "tara", - "tosu", - "yoshinogari", - "arakawa", - "asaka", - "chichibu", - "fujimi", - "fujimino", - "fukaya", - "hanno", - "hanyu", - "hasuda", - "hatogaya", - "hatoyama", - "hidaka", - "higashichichibu", - "higashimatsuyama", - "honjo", - "ina", - "iruma", - "iwatsuki", - "kamiizumi", - "kamikawa", - "kamisato", - "kasukabe", - "kawagoe", - "kawaguchi", - "kawajima", - "kazo", - "kitamoto", - "koshigaya", - "kounosu", - "kuki", - "kumagaya", - "matsubushi", - "minano", - "misato", - "miyashiro", - "miyoshi", - "moroyama", - "nagatoro", - "namegawa", - "niiza", - "ogano", - "ogawa", - "ogose", - "okegawa", - "omiya", - "otaki", - "ranzan", - "ryokami", - "saitama", - "sakado", - "satte", - "sayama", - "shiki", - "shiraoka", - "soka", - "sugito", - "toda", - "tokigawa", - "tokorozawa", - "tsurugashima", - "urawa", - "warabi", - "yashio", - "yokoze", - "yono", - "yorii", - "yoshida", - "yoshikawa", - "yoshimi", - "city", - "city", - "aisho", - "gamo", - "higashiomi", - "hikone", - "koka", - "konan", - "kosei", - "koto", - "kusatsu", - "maibara", - "moriyama", - "nagahama", - "nishiazai", - "notogawa", - "omihachiman", - "otsu", - "ritto", - "ryuoh", - "takashima", - "takatsuki", - "torahime", - "toyosato", - "yasu", - "akagi", - "ama", - "gotsu", - "hamada", - "higashiizumo", - "hikawa", - "hikimi", - "izumo", - "kakinoki", - "masuda", - "matsue", - "misato", - "nishinoshima", - "ohda", - "okinoshima", - "okuizumo", - "shimane", - "tamayu", - "tsuwano", - "unnan", - "yakumo", - "yasugi", - "yatsuka", - "arai", - "atami", - "fuji", - "fujieda", - "fujikawa", - "fujinomiya", - "fukuroi", - "gotemba", - "haibara", - "hamamatsu", - "higashiizu", - "ito", - "iwata", - "izu", - "izunokuni", - "kakegawa", - "kannami", - "kawanehon", - "kawazu", - "kikugawa", - "kosai", - "makinohara", - "matsuzaki", - "minamiizu", - "mishima", - "morimachi", - "nishiizu", - "numazu", - "omaezaki", - "shimada", - "shimizu", - "shimoda", - "shizuoka", - "susono", - "yaizu", - "yoshida", - "ashikaga", - "bato", - "haga", - "ichikai", - "iwafune", - "kaminokawa", - "kanuma", - "karasuyama", - "kuroiso", - "mashiko", - "mibu", - "moka", - "motegi", - "nasu", - "nasushiobara", - "nikko", - "nishikata", - "nogi", - "ohira", - "ohtawara", - "oyama", - "sakura", - "sano", - "shimotsuke", - "shioya", - "takanezawa", - "tochigi", - "tsuga", - "ujiie", - "utsunomiya", - "yaita", - "aizumi", - "anan", - "ichiba", - "itano", - "kainan", - "komatsushima", - "matsushige", - "mima", - "minami", - "miyoshi", - "mugi", - "nakagawa", - "naruto", - "sanagochi", - "shishikui", - "tokushima", - "wajiki", - "adachi", - "akiruno", - "akishima", - "aogashima", - "arakawa", - "bunkyo", - "chiyoda", - "chofu", - "chuo", - "edogawa", - "fuchu", - "fussa", - "hachijo", - "hachioji", - "hamura", - "higashikurume", - "higashimurayama", - "higashiyamato", - "hino", - "hinode", - "hinohara", - "inagi", - "itabashi", - "katsushika", - "kita", - "kiyose", - "kodaira", - "koganei", - "kokubunji", - "komae", - "koto", - "kouzushima", - "kunitachi", - "machida", - "meguro", - "minato", - "mitaka", - "mizuho", - "musashimurayama", - "musashino", - "nakano", - "nerima", - "ogasawara", - "okutama", - "ome", - "oshima", - "ota", - "setagaya", - "shibuya", - "shinagawa", - "shinjuku", - "suginami", - "sumida", - "tachikawa", - "taito", - "tama", - "toshima", - "chizu", - "hino", - "kawahara", - "koge", - "kotoura", - "misasa", - "nanbu", - "nichinan", - "sakaiminato", - "tottori", - "wakasa", - "yazu", - "yonago", - "asahi", - "fuchu", - "fukumitsu", - "funahashi", - "himi", - "imizu", - "inami", - "johana", - "kamiichi", - "kurobe", - "nakaniikawa", - "namerikawa", - "nanto", - "nyuzen", - "oyabe", - "taira", - "takaoka", - "tateyama", - "toga", - "tonami", - "toyama", - "unazuki", - "uozu", - "yamada", - "arida", - "aridagawa", - "gobo", - "hashimoto", - "hidaka", - "hirogawa", - "inami", - "iwade", - "kainan", - "kamitonda", - "katsuragi", - "kimino", - "kinokawa", - "kitayama", - "koya", - "koza", - "kozagawa", - "kudoyama", - "kushimoto", - "mihama", - "misato", - "nachikatsuura", - "shingu", - "shirahama", - "taiji", - "tanabe", - "wakayama", - "yuasa", - "yura", - "asahi", - "funagata", - "higashine", - "iide", - "kahoku", - "kaminoyama", - "kaneyama", - "kawanishi", - "mamurogawa", - "mikawa", - "murayama", - "nagai", - "nakayama", - "nanyo", - "nishikawa", - "obanazawa", - "oe", - "oguni", - "ohkura", - "oishida", - "sagae", - "sakata", - "sakegawa", - "shinjo", - "shirataka", - "shonai", - "takahata", - "tendo", - "tozawa", - "tsuruoka", - "yamagata", - "yamanobe", - "yonezawa", - "yuza", - "abu", - "hagi", - "hikari", - "hofu", - "iwakuni", - "kudamatsu", - "mitou", - "nagato", - "oshima", - "shimonoseki", - "shunan", - "tabuse", - "tokuyama", - "toyota", - "ube", - "yuu", - "chuo", - "doshi", - "fuefuki", - "fujikawa", - "fujikawaguchiko", - "fujiyoshida", - "hayakawa", - "hokuto", - "ichikawamisato", - "kai", - "kofu", - "koshu", - "kosuge", - "minami-alps", - "minobu", - "nakamichi", - "nanbu", - "narusawa", - "nirasaki", - "nishikatsura", - "oshino", - "otsuki", - "showa", - "tabayama", - "tsuru", - "uenohara", - "yamanakako", - "yamanashi", - "city", - "co", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "biz", - "com", - "edu", - "gov", - "info", - "net", - "org", - "ass", - "asso", - "com", - "coop", - "edu", - "gouv", - "gov", - "medecin", - "mil", - "nom", - "notaires", - "org", - "pharmaciens", - "prd", - "presse", - "tm", - "veterinaire", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "org", - "rep", - "tra", - "ac", - "blogspot", - "busan", - "chungbuk", - "chungnam", - "co", - "daegu", - "daejeon", - "es", - "gangwon", - "go", - "gwangju", - "gyeongbuk", - "gyeonggi", - "gyeongnam", - "hs", - "incheon", - "jeju", - "jeonbuk", - "jeonnam", - "kg", - "mil", - "ms", - "ne", - "or", - "pe", - "re", - "sc", - "seoul", - "ulsan", - "co", - "edu", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "bnr", - "c", - "com", - "edu", - "gov", - "info", - "int", - "net", - "org", - "per", - "static", - "dev", - "sites", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "edu", - "gov", - "net", - "org", - "oy", - "blogspot", - "cyon", - "mypep", - "ac", - "assn", - "com", - "edu", - "gov", - "grp", - "hotel", - "int", - "ltd", - "net", - "ngo", - "org", - "sch", - "soc", - "web", - "com", - "edu", - "gov", - "net", - "org", - "co", - "org", - "blogspot", - "gov", - "blogspot", - "asn", - "com", - "conf", - "edu", - "gov", - "id", - "mil", - "net", - "org", - "com", - "edu", - "gov", - "id", - "med", - "net", - "org", - "plc", - "sch", - "ac", - "co", - "gov", - "net", - "org", - "press", - "router", - "asso", - "tm", - "blogspot", - "ac", - "brasilia", - "c66", - "co", - "daplie", - "ddns", - "diskstation", - "dnsfor", - "dscloud", - "edu", - "filegear", - "gov", - "hopto", - "i234", - "its", - "loginto", - "myds", - "net", - "noip", - "org", - "priv", - "synology", - "webhop", - "wedeploy", - "yombo", - "localhost", - "co", - "com", - "edu", - "gov", - "mil", - "nom", - "org", - "prd", - "tm", - "blogspot", - "com", - "edu", - "gov", - "inf", - "name", - "net", - "org", - "com", - "edu", - "gouv", - "gov", - "net", - "org", - "presse", - "edu", - "gov", - "nyc", - "org", - "com", - "edu", - "gov", - "net", - "org", - "dscloud", - "blogspot", - "gov", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "net", - "org", - "blogspot", - "ac", - "co", - "com", - "gov", - "net", - "or", - "org", - "academy", - "agriculture", - "air", - "airguard", - "alabama", - "alaska", - "amber", - "ambulance", - "american", - "americana", - "americanantiques", - "americanart", - "amsterdam", - "and", - "annefrank", - "anthro", - "anthropology", - "antiques", - "aquarium", - "arboretum", - "archaeological", - "archaeology", - "architecture", - "art", - "artanddesign", - "artcenter", - "artdeco", - "arteducation", - "artgallery", - "arts", - "artsandcrafts", - "asmatart", - "assassination", - "assisi", - "association", - "astronomy", - "atlanta", - "austin", - "australia", - "automotive", - "aviation", - "axis", - "badajoz", - "baghdad", - "bahn", - "bale", - "baltimore", - "barcelona", - "baseball", - "basel", - "baths", - "bauern", - "beauxarts", - "beeldengeluid", - "bellevue", - "bergbau", - "berkeley", - "berlin", - "bern", - "bible", - "bilbao", - "bill", - "birdart", - "birthplace", - "bonn", - "boston", - "botanical", - "botanicalgarden", - "botanicgarden", - "botany", - "brandywinevalley", - "brasil", - "bristol", - "british", - "britishcolumbia", - "broadcast", - "brunel", - "brussel", - "brussels", - "bruxelles", - "building", - "burghof", - "bus", - "bushey", - "cadaques", - "california", - "cambridge", - "can", - "canada", - "capebreton", - "carrier", - "cartoonart", - "casadelamoneda", - "castle", - "castres", - "celtic", - "center", - "chattanooga", - "cheltenham", - "chesapeakebay", - "chicago", - "children", - "childrens", - "childrensgarden", - "chiropractic", - "chocolate", - "christiansburg", - "cincinnati", - "cinema", - "circus", - "civilisation", - "civilization", - "civilwar", - "clinton", - "clock", - "coal", - "coastaldefence", - "cody", - "coldwar", - "collection", - "colonialwilliamsburg", - "coloradoplateau", - "columbia", - "columbus", - "communication", - "communications", - "community", - "computer", - "computerhistory", - "contemporary", - "contemporaryart", - "convent", - "copenhagen", - "corporation", - "corvette", - "costume", - "countryestate", - "county", - "crafts", - "cranbrook", - "creation", - "cultural", - "culturalcenter", - "culture", - "cyber", - "cymru", - "dali", - "dallas", - "database", - "ddr", - "decorativearts", - "delaware", - "delmenhorst", - "denmark", - "depot", - "design", - "detroit", - "dinosaur", - "discovery", - "dolls", - "donostia", - "durham", - "eastafrica", - "eastcoast", - "education", - "educational", - "egyptian", - "eisenbahn", - "elburg", - "elvendrell", - "embroidery", - "encyclopedic", - "england", - "entomology", - "environment", - "environmentalconservation", - "epilepsy", - "essex", - "estate", - "ethnology", - "exeter", - "exhibition", - "family", - "farm", - "farmequipment", - "farmers", - "farmstead", - "field", - "figueres", - "filatelia", - "film", - "fineart", - "finearts", - "finland", - "flanders", - "florida", - "force", - "fortmissoula", - "fortworth", - "foundation", - "francaise", - "frankfurt", - "franziskaner", - "freemasonry", - "freiburg", - "fribourg", - "frog", - "fundacio", - "furniture", - "gallery", - "garden", - "gateway", - "geelvinck", - "gemological", - "geology", - "georgia", - "giessen", - "glas", - "glass", - "gorge", - "grandrapids", - "graz", - "guernsey", - "halloffame", - "hamburg", - "handson", - "harvestcelebration", - "hawaii", - "health", - "heimatunduhren", - "hellas", - "helsinki", - "hembygdsforbund", - "heritage", - "histoire", - "historical", - "historicalsociety", - "historichouses", - "historisch", - "historisches", - "history", - "historyofscience", - "horology", - "house", - "humanities", - "illustration", - "imageandsound", - "indian", - "indiana", - "indianapolis", - "indianmarket", - "intelligence", - "interactive", - "iraq", - "iron", - "isleofman", - "jamison", - "jefferson", - "jerusalem", - "jewelry", - "jewish", - "jewishart", - "jfk", - "journalism", - "judaica", - "judygarland", - "juedisches", - "juif", - "karate", - "karikatur", - "kids", - "koebenhavn", - "koeln", - "kunst", - "kunstsammlung", - "kunstunddesign", - "labor", - "labour", - "lajolla", - "lancashire", - "landes", - "lans", - "larsson", - "lewismiller", - "lincoln", - "linz", - "living", - "livinghistory", - "localhistory", - "london", - "losangeles", - "louvre", - "loyalist", - "lucerne", - "luxembourg", - "luzern", - "mad", - "madrid", - "mallorca", - "manchester", - "mansion", - "mansions", - "manx", - "marburg", - "maritime", - "maritimo", - "maryland", - "marylhurst", - "media", - "medical", - "medizinhistorisches", - "meeres", - "memorial", - "mesaverde", - "michigan", - "midatlantic", - "military", - "mill", - "miners", - "mining", - "minnesota", - "missile", - "missoula", - "modern", - "moma", - "money", - "monmouth", - "monticello", - "montreal", - "moscow", - "motorcycle", - "muenchen", - "muenster", - "mulhouse", - "muncie", - "museet", - "museumcenter", - "museumvereniging", - "music", - "national", - "nationalfirearms", - "nationalheritage", - "nativeamerican", - "naturalhistory", - "naturalhistorymuseum", - "naturalsciences", - "nature", - "naturhistorisches", - "natuurwetenschappen", - "naumburg", - "naval", - "nebraska", - "neues", - "newhampshire", - "newjersey", - "newmexico", - "newport", - "newspaper", - "newyork", - "niepce", - "norfolk", - "north", - "nrw", - "nuernberg", - "nuremberg", - "nyc", - "nyny", - "oceanographic", - "oceanographique", - "omaha", - "online", - "ontario", - "openair", - "oregon", - "oregontrail", - "otago", - "oxford", - "pacific", - "paderborn", - "palace", - "paleo", - "palmsprings", - "panama", - "paris", - "pasadena", - "pharmacy", - "philadelphia", - "philadelphiaarea", - "philately", - "phoenix", - "photography", - "pilots", - "pittsburgh", - "planetarium", - "plantation", - "plants", - "plaza", - "portal", - "portland", - "portlligat", - "posts-and-telecommunications", - "preservation", - "presidio", - "press", - "project", - "public", - "pubol", - "quebec", - "railroad", - "railway", - "research", - "resistance", - "riodejaneiro", - "rochester", - "rockart", - "roma", - "russia", - "saintlouis", - "salem", - "salvadordali", - "salzburg", - "sandiego", - "sanfrancisco", - "santabarbara", - "santacruz", - "santafe", - "saskatchewan", - "satx", - "savannahga", - "schlesisches", - "schoenbrunn", - "schokoladen", - "school", - "schweiz", - "science", - "science-fiction", - "scienceandhistory", - "scienceandindustry", - "sciencecenter", - "sciencecenters", - "sciencehistory", - "sciences", - "sciencesnaturelles", - "scotland", - "seaport", - "settlement", - "settlers", - "shell", - "sherbrooke", - "sibenik", - "silk", - "ski", - "skole", - "society", - "sologne", - "soundandvision", - "southcarolina", - "southwest", - "space", - "spy", - "square", - "stadt", - "stalbans", - "starnberg", - "state", - "stateofdelaware", - "station", - "steam", - "steiermark", - "stjohn", - "stockholm", - "stpetersburg", - "stuttgart", - "suisse", - "surgeonshall", - "surrey", - "svizzera", - "sweden", - "sydney", - "tank", - "tcm", - "technology", - "telekommunikation", - "television", - "texas", - "textile", - "theater", - "time", - "timekeeping", - "topology", - "torino", - "touch", - "town", - "transport", - "tree", - "trolley", - "trust", - "trustee", - "uhren", - "ulm", - "undersea", - "university", - "usa", - "usantiques", - "usarts", - "uscountryestate", - "usculture", - "usdecorativearts", - "usgarden", - "ushistory", - "ushuaia", - "uslivinghistory", - "utah", - "uvic", - "valley", - "vantaa", - "versailles", - "viking", - "village", - "virginia", - "virtual", - "virtuel", - "vlaanderen", - "volkenkunde", - "wales", - "wallonie", - "war", - "washingtondc", - "watch-and-clock", - "watchandclock", - "western", - "westfalen", - "whaling", - "wildlife", - "williamsburg", - "windmill", - "workshop", - "xn--9dbhblg6di", - "xn--comunicaes-v6a2o", - "xn--correios-e-telecomunicaes-ghc29a", - "xn--h1aegh", - "xn--lns-qla", - "york", - "yorkshire", - "yosemite", - "youth", - "zoological", - "zoology", - "aero", - "biz", - "com", - "coop", - "edu", - "gov", - "info", - "int", - "mil", - "museum", - "name", - "net", - "org", - "pro", - "ac", - "biz", - "co", - "com", - "coop", - "edu", - "gov", - "int", - "museum", - "net", - "org", - "blogspot", - "com", - "edu", - "gob", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "org", - "ac", - "adv", - "co", - "edu", - "gov", - "mil", - "net", - "org", - "ca", - "cc", - "co", - "com", - "dr", - "in", - "info", - "mobi", - "mx", - "name", - "or", - "org", - "pro", - "school", - "tv", - "us", - "ws", - "her", - "his", - "forgot", - "forgot", - "asso", - "nom", - "alwaysdata", - "at-band-camp", - "azure-mobile", - "azurewebsites", - "barsy", - "blogdns", - "bounceme", - "bplaced", - "broke-it", - "buyshouses", - "cdn77", - "cdn77-ssl", - "cloudapp", - "cloudfront", - "cloudfunctions", - "cryptonomic", - "ddns", - "definima", - "dnsalias", - "dnsdojo", - "does-it", - "dontexist", - "dsmynas", - "dynalias", - "dynathome", - "dynv6", - "eating-organic", - "endofinternet", - "familyds", - "fastly", - "fastlylb", - "feste-ip", - "firewall-gateway", - "from-az", - "from-co", - "from-la", - "from-ny", - "gb", - "gets-it", - "ham-radio-op", - "homeftp", - "homeip", - "homelinux", - "homeunix", - "hu", - "in", - "in-the-band", - "ipifony", - "is-a-chef", - "is-a-geek", - "isa-geek", - "jp", - "kicks-ass", - "knx-server", - "moonscale", - "mydissent", - "myeffect", - "myfritz", - "mymediapc", - "mypsx", - "mysecuritycamera", - "nhlfan", - "no-ip", - "office-on-the", - "pgafan", - "podzone", - "privatizehealthinsurance", - "rackmaze", - "redirectme", - "ru", - "scrapper-site", - "se", - "selfip", - "sells-it", - "servebbs", - "serveblog", - "serveftp", - "serveminecraft", - "square7", - "static-access", - "sytes", - "t3l3p0rt", - "thruhere", - "twmail", - "uk", - "webhop", - "za", - "r", - "freetls", - "map", - "prod", - "ssl", - "a", - "global", - "a", - "b", - "global", - "map", - "alces", - "arts", - "com", - "firm", - "info", - "net", - "other", - "per", - "rec", - "store", - "web", - "com", - "edu", - "gov", - "i", - "mil", - "mobi", - "name", - "net", - "org", - "sch", - "blogspot", - "ac", - "biz", - "co", - "com", - "edu", - "gob", - "in", - "info", - "int", - "mil", - "net", - "nom", - "org", - "web", - "blogspot", - "bv", - "co", - "transurl", - "virtueeldomein", - "aa", - "aarborte", - "aejrie", - "afjord", - "agdenes", - "ah", - "akershus", - "aknoluokta", - "akrehamn", - "al", - "alaheadju", - "alesund", - "algard", - "alstahaug", - "alta", - "alvdal", - "amli", - "amot", - "andasuolo", - "andebu", - "andoy", - "ardal", - "aremark", - "arendal", - "arna", - "aseral", - "asker", - "askim", - "askoy", - "askvoll", - "asnes", - "audnedaln", - "aukra", - "aure", - "aurland", - "aurskog-holand", - "austevoll", - "austrheim", - "averoy", - "badaddja", - "bahcavuotna", - "bahccavuotna", - "baidar", - "bajddar", - "balat", - "balestrand", - "ballangen", - "balsfjord", - "bamble", - "bardu", - "barum", - "batsfjord", - "bearalvahki", - "beardu", - "beiarn", - "berg", - "bergen", - "berlevag", - "bievat", - "bindal", - "birkenes", - "bjarkoy", - "bjerkreim", - "bjugn", - "blogspot", - "bodo", - "bokn", - "bomlo", - "bremanger", - "bronnoy", - "bronnoysund", - "brumunddal", - "bryne", - "bu", - "budejju", - "buskerud", - "bygland", - "bykle", - "cahcesuolo", - "co", - "davvenjarga", - "davvesiida", - "deatnu", - "dep", - "dielddanuorri", - "divtasvuodna", - "divttasvuotna", - "donna", - "dovre", - "drammen", - "drangedal", - "drobak", - "dyroy", - "egersund", - "eid", - "eidfjord", - "eidsberg", - "eidskog", - "eidsvoll", - "eigersund", - "elverum", - "enebakk", - "engerdal", - "etne", - "etnedal", - "evenassi", - "evenes", - "evje-og-hornnes", - "farsund", - "fauske", - "fedje", - "fet", - "fetsund", - "fhs", - "finnoy", - "fitjar", - "fjaler", - "fjell", - "fla", - "flakstad", - "flatanger", - "flekkefjord", - "flesberg", - "flora", - "floro", - "fm", - "folkebibl", - "folldal", - "forde", - "forsand", - "fosnes", - "frana", - "fredrikstad", - "frei", - "frogn", - "froland", - "frosta", - "froya", - "fuoisku", - "fuossko", - "fusa", - "fylkesbibl", - "fyresdal", - "gaivuotna", - "galsa", - "gamvik", - "gangaviika", - "gaular", - "gausdal", - "giehtavuoatna", - "gildeskal", - "giske", - "gjemnes", - "gjerdrum", - "gjerstad", - "gjesdal", - "gjovik", - "gloppen", - "gol", - "gran", - "grane", - "granvin", - "gratangen", - "grimstad", - "grong", - "grue", - "gulen", - "guovdageaidnu", - "ha", - "habmer", - "hadsel", - "hagebostad", - "halden", - "halsa", - "hamar", - "hamaroy", - "hammarfeasta", - "hammerfest", - "hapmir", - "haram", - "hareid", - "harstad", - "hasvik", - "hattfjelldal", - "haugesund", - "hedmark", - "hemne", - "hemnes", - "hemsedal", - "herad", - "hitra", - "hjartdal", - "hjelmeland", - "hl", - "hm", - "hobol", - "hof", - "hokksund", - "hol", - "hole", - "holmestrand", - "holtalen", - "honefoss", - "hordaland", - "hornindal", - "horten", - "hoyanger", - "hoylandet", - "hurdal", - "hurum", - "hvaler", - "hyllestad", - "ibestad", - "idrett", - "inderoy", - "iveland", - "ivgu", - "jan-mayen", - "jessheim", - "jevnaker", - "jolster", - "jondal", - "jorpeland", - "kafjord", - "karasjohka", - "karasjok", - "karlsoy", - "karmoy", - "kautokeino", - "kirkenes", - "klabu", - "klepp", - "kommune", - "kongsberg", - "kongsvinger", - "kopervik", - "kraanghke", - "kragero", - "kristiansand", - "kristiansund", - "krodsherad", - "krokstadelva", - "kvafjord", - "kvalsund", - "kvam", - "kvanangen", - "kvinesdal", - "kvinnherad", - "kviteseid", - "kvitsoy", - "laakesvuemie", - "lahppi", - "langevag", - "lardal", - "larvik", - "lavagis", - "lavangen", - "leangaviika", - "lebesby", - "leikanger", - "leirfjord", - "leirvik", - "leka", - "leksvik", - "lenvik", - "lerdal", - "lesja", - "levanger", - "lier", - "lierne", - "lillehammer", - "lillesand", - "lindas", - "lindesnes", - "loabat", - "lodingen", - "lom", - "loppa", - "lorenskog", - "loten", - "lund", - "lunner", - "luroy", - "luster", - "lyngdal", - "lyngen", - "malatvuopmi", - "malselv", - "malvik", - "mandal", - "marker", - "marnardal", - "masfjorden", - "masoy", - "matta-varjjat", - "meland", - "meldal", - "melhus", - "meloy", - "meraker", - "midsund", - "midtre-gauldal", - "mil", - "mjondalen", - "mo-i-rana", - "moareke", - "modalen", - "modum", - "molde", - "more-og-romsdal", - "mosjoen", - "moskenes", - "moss", - "mosvik", - "mr", - "muosat", - "museum", - "naamesjevuemie", - "namdalseid", - "namsos", - "namsskogan", - "nannestad", - "naroy", - "narviika", - "narvik", - "naustdal", - "navuotna", - "nedre-eiker", - "nesna", - "nesodden", - "nesoddtangen", - "nesseby", - "nesset", - "nissedal", - "nittedal", - "nl", - "nord-aurdal", - "nord-fron", - "nord-odal", - "norddal", - "nordkapp", - "nordland", - "nordre-land", - "nordreisa", - "nore-og-uvdal", - "notodden", - "notteroy", - "nt", - "odda", - "of", - "oksnes", - "ol", - "omasvuotna", - "oppdal", - "oppegard", - "orkanger", - "orkdal", - "orland", - "orskog", - "orsta", - "osen", - "oslo", - "osoyro", - "osteroy", - "ostfold", - "ostre-toten", - "overhalla", - "ovre-eiker", - "oyer", - "oygarden", - "oystre-slidre", - "porsanger", - "porsangu", - "porsgrunn", - "priv", - "rade", - "radoy", - "rahkkeravju", - "raholt", - "raisa", - "rakkestad", - "ralingen", - "rana", - "randaberg", - "rauma", - "rendalen", - "rennebu", - "rennesoy", - "rindal", - "ringebu", - "ringerike", - "ringsaker", - "risor", - "rissa", - "rl", - "roan", - "rodoy", - "rollag", - "romsa", - "romskog", - "roros", - "rost", - "royken", - "royrvik", - "ruovat", - "rygge", - "salangen", - "salat", - "saltdal", - "samnanger", - "sandefjord", - "sandnes", - "sandnessjoen", - "sandoy", - "sarpsborg", - "sauda", - "sauherad", - "sel", - "selbu", - "selje", - "seljord", - "sf", - "siellak", - "sigdal", - "siljan", - "sirdal", - "skanit", - "skanland", - "skaun", - "skedsmo", - "skedsmokorset", - "ski", - "skien", - "skierva", - "skiptvet", - "skjak", - "skjervoy", - "skodje", - "slattum", - "smola", - "snaase", - "snasa", - "snillfjord", - "snoasa", - "sogndal", - "sogne", - "sokndal", - "sola", - "solund", - "somna", - "sondre-land", - "songdalen", - "sor-aurdal", - "sor-fron", - "sor-odal", - "sor-varanger", - "sorfold", - "sorreisa", - "sortland", - "sorum", - "spjelkavik", - "spydeberg", - "st", - "stange", - "stat", - "stathelle", - "stavanger", - "stavern", - "steigen", - "steinkjer", - "stjordal", - "stjordalshalsen", - "stokke", - "stor-elvdal", - "stord", - "stordal", - "storfjord", - "strand", - "stranda", - "stryn", - "sula", - "suldal", - "sund", - "sunndal", - "surnadal", - "svalbard", - "sveio", - "svelvik", - "sykkylven", - "tana", - "tananger", - "telemark", - "time", - "tingvoll", - "tinn", - "tjeldsund", - "tjome", - "tm", - "tokke", - "tolga", - "tonsberg", - "torsken", - "tr", - "trana", - "tranby", - "tranoy", - "troandin", - "trogstad", - "tromsa", - "tromso", - "trondheim", - "trysil", - "tvedestrand", - "tydal", - "tynset", - "tysfjord", - "tysnes", - "tysvar", - "ullensaker", - "ullensvang", - "ulvik", - "unjarga", - "utsira", - "va", - "vaapste", - "vadso", - "vaga", - "vagan", - "vagsoy", - "vaksdal", - "valle", - "vang", - "vanylven", - "vardo", - "varggat", - "varoy", - "vefsn", - "vega", - "vegarshei", - "vennesla", - "verdal", - "verran", - "vestby", - "vestfold", - "vestnes", - "vestre-slidre", - "vestre-toten", - "vestvagoy", - "vevelstad", - "vf", - "vgs", - "vik", - "vikna", - "vindafjord", - "voagat", - "volda", - "voss", - "vossevangen", - "xn--andy-ira", - "xn--asky-ira", - "xn--aurskog-hland-jnb", - "xn--avery-yua", - "xn--bdddj-mrabd", - "xn--bearalvhki-y4a", - "xn--berlevg-jxa", - "xn--bhcavuotna-s4a", - "xn--bhccavuotna-k7a", - "xn--bidr-5nac", - "xn--bievt-0qa", - "xn--bjarky-fya", - "xn--bjddar-pta", - "xn--blt-elab", - "xn--bmlo-gra", - "xn--bod-2na", - "xn--brnny-wuac", - "xn--brnnysund-m8ac", - "xn--brum-voa", - "xn--btsfjord-9za", - "xn--davvenjrga-y4a", - "xn--dnna-gra", - "xn--drbak-wua", - "xn--dyry-ira", - "xn--eveni-0qa01ga", - "xn--finny-yua", - "xn--fjord-lra", - "xn--fl-zia", - "xn--flor-jra", - "xn--frde-gra", - "xn--frna-woa", - "xn--frya-hra", - "xn--ggaviika-8ya47h", - "xn--gildeskl-g0a", - "xn--givuotna-8ya", - "xn--gjvik-wua", - "xn--gls-elac", - "xn--h-2fa", - "xn--hbmer-xqa", - "xn--hcesuolo-7ya35b", - "xn--hgebostad-g3a", - "xn--hmmrfeasta-s4ac", - "xn--hnefoss-q1a", - "xn--hobl-ira", - "xn--holtlen-hxa", - "xn--hpmir-xqa", - "xn--hyanger-q1a", - "xn--hylandet-54a", - "xn--indery-fya", - "xn--jlster-bya", - "xn--jrpeland-54a", - "xn--karmy-yua", - "xn--kfjord-iua", - "xn--klbu-woa", - "xn--koluokta-7ya57h", - "xn--krager-gya", - "xn--kranghke-b0a", - "xn--krdsherad-m8a", - "xn--krehamn-dxa", - "xn--krjohka-hwab49j", - "xn--ksnes-uua", - "xn--kvfjord-nxa", - "xn--kvitsy-fya", - "xn--kvnangen-k0a", - "xn--l-1fa", - "xn--laheadju-7ya", - "xn--langevg-jxa", - "xn--ldingen-q1a", - "xn--leagaviika-52b", - "xn--lesund-hua", - "xn--lgrd-poac", - "xn--lhppi-xqa", - "xn--linds-pra", - "xn--loabt-0qa", - "xn--lrdal-sra", - "xn--lrenskog-54a", - "xn--lt-liac", - "xn--lten-gra", - "xn--lury-ira", - "xn--mely-ira", - "xn--merker-kua", - "xn--mjndalen-64a", - "xn--mlatvuopmi-s4a", - "xn--mli-tla", - "xn--mlselv-iua", - "xn--moreke-jua", - "xn--mosjen-eya", - "xn--mot-tla", - "xn--mre-og-romsdal-qqb", - "xn--msy-ula0h", - "xn--mtta-vrjjat-k7af", - "xn--muost-0qa", - "xn--nmesjevuemie-tcba", - "xn--nry-yla5g", - "xn--nttery-byae", - "xn--nvuotna-hwa", - "xn--oppegrd-ixa", - "xn--ostery-fya", - "xn--osyro-wua", - "xn--porsgu-sta26f", - "xn--rady-ira", - "xn--rdal-poa", - "xn--rde-ula", - "xn--rdy-0nab", - "xn--rennesy-v1a", - "xn--rhkkervju-01af", - "xn--rholt-mra", - "xn--risa-5na", - "xn--risr-ira", - "xn--rland-uua", - "xn--rlingen-mxa", - "xn--rmskog-bya", - "xn--rros-gra", - "xn--rskog-uua", - "xn--rst-0na", - "xn--rsta-fra", - "xn--ryken-vua", - "xn--ryrvik-bya", - "xn--s-1fa", - "xn--sandnessjen-ogb", - "xn--sandy-yua", - "xn--seral-lra", - "xn--sgne-gra", - "xn--skierv-uta", - "xn--skjervy-v1a", - "xn--skjk-soa", - "xn--sknit-yqa", - "xn--sknland-fxa", - "xn--slat-5na", - "xn--slt-elab", - "xn--smla-hra", - "xn--smna-gra", - "xn--snase-nra", - "xn--sndre-land-0cb", - "xn--snes-poa", - "xn--snsa-roa", - "xn--sr-aurdal-l8a", - "xn--sr-fron-q1a", - "xn--sr-odal-q1a", - "xn--sr-varanger-ggb", - "xn--srfold-bya", - "xn--srreisa-q1a", - "xn--srum-gra", - "xn--stfold-9xa", - "xn--stjrdal-s1a", - "xn--stjrdalshalsen-sqb", - "xn--stre-toten-zcb", - "xn--tjme-hra", - "xn--tnsberg-q1a", - "xn--trany-yua", - "xn--trgstad-r1a", - "xn--trna-woa", - "xn--troms-zua", - "xn--tysvr-vra", - "xn--unjrga-rta", - "xn--vads-jra", - "xn--vard-jra", - "xn--vegrshei-c0a", - "xn--vestvgy-ixa6o", - "xn--vg-yiab", - "xn--vgan-qoa", - "xn--vgsy-qoa0j", - "xn--vre-eiker-k8a", - "xn--vrggt-xqad", - "xn--vry-yla5g", - "xn--yer-zna", - "xn--ygarden-p1a", - "xn--ystre-slidre-ujb", - "gs", - "gs", - "nes", - "gs", - "nes", - "gs", - "os", - "valer", - "xn--vler-qoa", - "gs", - "gs", - "os", - "gs", - "heroy", - "sande", - "gs", - "gs", - "bo", - "heroy", - "xn--b-5ga", - "xn--hery-ira", - "gs", - "gs", - "gs", - "gs", - "valer", - "gs", - "gs", - "gs", - "gs", - "bo", - "xn--b-5ga", - "gs", - "gs", - "gs", - "sande", - "gs", - "sande", - "xn--hery-ira", - "xn--vler-qoa", - "biz", - "com", - "edu", - "gov", - "info", - "net", - "org", - "merseine", - "mine", - "shacknet", - "ac", - "co", - "cri", - "geek", - "gen", - "govt", - "health", - "iwi", - "kiwi", - "maori", - "mil", - "net", - "org", - "parliament", - "school", - "xn--mori-qsa", - "blogspot", - "co", - "com", - "edu", - "gov", - "med", - "museum", - "net", - "org", - "pro", - "homelink", - "barsy", - "ae", - "amune", - "blogdns", - "blogsite", - "bmoattachments", - "boldlygoingnowhere", - "cable-modem", - "cdn77", - "cdn77-secure", - "certmgr", - "cloudns", - "collegefan", - "couchpotatofries", - "ddnss", - "diskstation", - "dnsalias", - "dnsdojo", - "doesntexist", - "dontexist", - "doomdns", - "dsmynas", - "duckdns", - "dvrdns", - "dynalias", - "dyndns", - "endofinternet", - "endoftheinternet", - "eu", - "familyds", - "fedorainfracloud", - "fedorapeople", - "fedoraproject", - "from-me", - "game-host", - "gotdns", - "hepforge", - "hk", - "hobby-site", - "homedns", - "homeftp", - "homelinux", - "homeunix", - "hopto", - "is-a-bruinsfan", - "is-a-candidate", - "is-a-celticsfan", - "is-a-chef", - "is-a-geek", - "is-a-knight", - "is-a-linux-user", - "is-a-patsfan", - "is-a-soxfan", - "is-found", - "is-lost", - "is-saved", - "is-very-bad", - "is-very-evil", - "is-very-good", - "is-very-nice", - "is-very-sweet", - "isa-geek", - "js", - "kicks-ass", - "misconfused", - "mlbfan", - "my-firewall", - "myfirewall", - "myftp", - "mysecuritycamera", - "nflfan", - "no-ip", - "pimienta", - "podzone", - "poivron", - "potager", - "read-books", - "readmyblog", - "selfip", - "sellsyourhome", - "servebbs", - "serveftp", - "servegame", - "spdns", - "stuff-4-sale", - "sweetpepper", - "tunk", - "tuxfamily", - "twmail", - "ufcfan", - "us", - "webhop", - "wmflabs", - "za", - "zapto", - "tele", - "c", - "rsc", - "origin", - "ssl", - "go", - "home", - "al", - "asso", - "at", - "au", - "be", - "bg", - "ca", - "cd", - "ch", - "cn", - "cy", - "cz", - "de", - "dk", - "edu", - "ee", - "es", - "fi", - "fr", - "gr", - "hr", - "hu", - "ie", - "il", - "in", - "int", - "is", - "it", - "jp", - "kr", - "lt", - "lu", - "lv", - "mc", - "me", - "mk", - "mt", - "my", - "net", - "ng", - "nl", - "no", - "nz", - "paris", - "pl", - "pt", - "q-a", - "ro", - "ru", - "se", - "si", - "sk", - "tr", - "uk", - "us", - "cloud", - "nerdpol", - "abo", - "ac", - "com", - "edu", - "gob", - "ing", - "med", - "net", - "nom", - "org", - "sld", - "ybo", - "blogspot", - "com", - "edu", - "gob", - "mil", - "net", - "nom", - "org", - "com", - "edu", - "org", - "com", - "edu", - "gov", - "i", - "mil", - "net", - "ngo", - "org", - "biz", - "com", - "edu", - "fam", - "gob", - "gok", - "gon", - "gop", - "gos", - "gov", - "info", - "net", - "org", - "web", - "agro", - "aid", - "art", - "atm", - "augustow", - "auto", - "babia-gora", - "bedzin", - "beep", - "beskidy", - "bialowieza", - "bialystok", - "bielawa", - "bieszczady", - "biz", - "boleslawiec", - "bydgoszcz", - "bytom", - "cieszyn", - "co", - "com", - "czeladz", - "czest", - "dlugoleka", - "edu", - "elblag", - "elk", - "gda", - "gdansk", - "gdynia", - "gliwice", - "glogow", - "gmina", - "gniezno", - "gorlice", - "gov", - "grajewo", - "gsm", - "ilawa", - "info", - "jaworzno", - "jelenia-gora", - "jgora", - "kalisz", - "karpacz", - "kartuzy", - "kaszuby", - "katowice", - "kazimierz-dolny", - "kepno", - "ketrzyn", - "klodzko", - "kobierzyce", - "kolobrzeg", - "konin", - "konskowola", - "krakow", - "kutno", - "lapy", - "lebork", - "legnica", - "lezajsk", - "limanowa", - "lomza", - "lowicz", - "lubin", - "lukow", - "mail", - "malbork", - "malopolska", - "mazowsze", - "mazury", - "med", - "media", - "miasta", - "mielec", - "mielno", - "mil", - "mragowo", - "naklo", - "net", - "nieruchomosci", - "nom", - "nowaruda", - "nysa", - "olawa", - "olecko", - "olkusz", - "olsztyn", - "opoczno", - "opole", - "org", - "ostroda", - "ostroleka", - "ostrowiec", - "ostrowwlkp", - "pc", - "pila", - "pisz", - "podhale", - "podlasie", - "polkowice", - "pomorskie", - "pomorze", - "powiat", - "poznan", - "priv", - "prochowice", - "pruszkow", - "przeworsk", - "pulawy", - "radom", - "rawa-maz", - "realestate", - "rel", - "rybnik", - "rzeszow", - "sanok", - "sejny", - "sex", - "shop", - "sklep", - "skoczow", - "slask", - "slupsk", - "sopot", - "sos", - "sosnowiec", - "stalowa-wola", - "starachowice", - "stargard", - "suwalki", - "swidnica", - "swiebodzin", - "swinoujscie", - "szczecin", - "szczytno", - "szkola", - "targi", - "tarnobrzeg", - "tgory", - "tm", - "tourism", - "travel", - "turek", - "turystyka", - "tychy", - "ustka", - "walbrzych", - "warmia", - "warszawa", - "waw", - "wegrow", - "wielun", - "wlocl", - "wloclawek", - "wodzislaw", - "wolomin", - "wroc", - "wroclaw", - "zachpomor", - "zagan", - "zakopane", - "zarow", - "zgora", - "zgorzelec", - "ap", - "griw", - "ic", - "is", - "kmpsp", - "konsulat", - "kppsp", - "kwp", - "kwpsp", - "mup", - "mw", - "oirm", - "oum", - "pa", - "pinb", - "piw", - "po", - "psp", - "psse", - "pup", - "rzgw", - "sa", - "sdn", - "sko", - "so", - "sr", - "starostwo", - "ug", - "ugim", - "um", - "umig", - "upow", - "uppo", - "us", - "uw", - "uzs", - "wif", - "wiih", - "winb", - "wios", - "witd", - "wiw", - "wsa", - "wskr", - "wuoz", - "wzmiuw", - "zp", - "co", - "edu", - "gov", - "net", - "org", - "ac", - "biz", - "com", - "edu", - "est", - "gov", - "info", - "isla", - "name", - "net", - "org", - "pro", - "prof", - "aaa", - "aca", - "acct", - "avocat", - "bar", - "cloudns", - "cpa", - "eng", - "jur", - "law", - "med", - "recht", - "com", - "edu", - "gov", - "net", - "org", - "plo", - "sec", - "blogspot", - "com", - "edu", - "gov", - "int", - "net", - "nome", - "org", - "publ", - "belau", - "cloudns", - "co", - "ed", - "go", - "ne", - "or", - "com", - "coop", - "edu", - "gov", - "mil", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "org", - "sch", - "asso", - "blogspot", - "com", - "nom", - "ybo", - "arts", - "blogspot", - "com", - "firm", - "info", - "nom", - "nt", - "org", - "rec", - "shop", - "store", - "tm", - "www", - "ac", - "blogspot", - "co", - "edu", - "gov", - "in", - "org", - "ac", - "adygeya", - "bashkiria", - "bir", - "blogspot", - "cbg", - "cldmail", - "com", - "dagestan", - "edu", - "gov", - "grozny", - "int", - "kalmykia", - "kustanai", - "marine", - "mil", - "mordovia", - "msk", - "mytis", - "nalchik", - "nov", - "pyatigorsk", - "spb", - "test", - "vladikavkaz", - "vladimir", - "hb", - "ac", - "co", - "com", - "edu", - "gouv", - "gov", - "int", - "mil", - "net", - "com", - "edu", - "gov", - "med", - "net", - "org", - "pub", - "sch", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "net", - "org", - "ybo", - "com", - "edu", - "gov", - "info", - "med", - "net", - "org", - "tv", - "a", - "ac", - "b", - "bd", - "blogspot", - "brand", - "c", - "com", - "d", - "e", - "f", - "fh", - "fhsk", - "fhv", - "g", - "h", - "i", - "k", - "komforb", - "kommunalforbund", - "komvux", - "l", - "lanbib", - "m", - "n", - "naturbruksgymn", - "o", - "org", - "p", - "parti", - "pp", - "press", - "r", - "s", - "t", - "tm", - "u", - "w", - "x", - "y", - "z", - "blogspot", - "com", - "edu", - "gov", - "net", - "org", - "per", - "com", - "gov", - "hashbang", - "mil", - "net", - "now", - "org", - "platform", - "blogspot", - "cyon", - "platformsh", - "blogspot", - "com", - "edu", - "gov", - "net", - "org", - "art", - "blogspot", - "com", - "edu", - "gouv", - "org", - "perso", - "univ", - "com", - "net", - "org", - "stackspace", - "uber", - "xs4all", - "co", - "com", - "consulado", - "edu", - "embaixada", - "gov", - "mil", - "net", - "org", - "principe", - "saotome", - "store", - "abkhazia", - "adygeya", - "aktyubinsk", - "arkhangelsk", - "armenia", - "ashgabad", - "azerbaijan", - "balashov", - "bashkiria", - "bryansk", - "bukhara", - "chimkent", - "dagestan", - "east-kazakhstan", - "exnet", - "georgia", - "grozny", - "ivanovo", - "jambyl", - "kalmykia", - "kaluga", - "karacol", - "karaganda", - "karelia", - "khakassia", - "krasnodar", - "kurgan", - "kustanai", - "lenug", - "mangyshlak", - "mordovia", - "msk", - "murmansk", - "nalchik", - "navoi", - "north-kazakhstan", - "nov", - "obninsk", - "penza", - "pokrovsk", - "sochi", - "spb", - "tashkent", - "termez", - "togliatti", - "troitsk", - "tselinograd", - "tula", - "tuva", - "vladikavkaz", - "vladimir", - "vologda", - "barsy", - "com", - "edu", - "gob", - "org", - "red", - "gov", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "knightpoint", - "ac", - "co", - "org", - "blogspot", - "ac", - "co", - "go", - "in", - "mi", - "net", - "or", - "ac", - "biz", - "co", - "com", - "edu", - "go", - "gov", - "int", - "mil", - "name", - "net", - "nic", - "org", - "test", - "web", - "gov", - "co", - "com", - "edu", - "gov", - "mil", - "net", - "nom", - "org", - "agrinet", - "com", - "defense", - "edunet", - "ens", - "fin", - "gov", - "ind", - "info", - "intl", - "mincom", - "nat", - "net", - "org", - "perso", - "rnrt", - "rns", - "rnu", - "tourism", - "turen", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "vpnplus", - "av", - "bbs", - "bel", - "biz", - "com", - "dr", - "edu", - "gen", - "gov", - "info", - "k12", - "kep", - "mil", - "name", - "nc", - "net", - "org", - "pol", - "tel", - "tv", - "web", - "blogspot", - "gov", - "ybo", - "aero", - "biz", - "co", - "com", - "coop", - "edu", - "gov", - "info", - "int", - "jobs", - "mobi", - "museum", - "name", - "net", - "org", - "pro", - "travel", - "better-than", - "dyndns", - "on-the-web", - "worse-than", - "blogspot", - "club", - "com", - "ebiz", - "edu", - "game", - "gov", - "idv", - "mil", - "net", - "org", - "url", - "xn--czrw28b", - "xn--uc0atv", - "xn--zf0ao64a", - "mymailer", - "ac", - "co", - "go", - "hotel", - "info", - "me", - "mil", - "mobi", - "ne", - "or", - "sc", - "tv", - "biz", - "cc", - "cherkassy", - "cherkasy", - "chernigov", - "chernihiv", - "chernivtsi", - "chernovtsy", - "ck", - "cn", - "co", - "com", - "cr", - "crimea", - "cv", - "dn", - "dnepropetrovsk", - "dnipropetrovsk", - "dominic", - "donetsk", - "dp", - "edu", - "gov", - "if", - "in", - "inf", - "ivano-frankivsk", - "kh", - "kharkiv", - "kharkov", - "kherson", - "khmelnitskiy", - "khmelnytskyi", - "kiev", - "kirovograd", - "km", - "kr", - "krym", - "ks", - "kv", - "kyiv", - "lg", - "lt", - "ltd", - "lugansk", - "lutsk", - "lv", - "lviv", - "mk", - "mykolaiv", - "net", - "nikolaev", - "od", - "odesa", - "odessa", - "org", - "pl", - "poltava", - "pp", - "rivne", - "rovno", - "rv", - "sb", - "sebastopol", - "sevastopol", - "sm", - "sumy", - "te", - "ternopil", - "uz", - "uzhgorod", - "vinnica", - "vinnytsia", - "vn", - "volyn", - "yalta", - "zaporizhzhe", - "zaporizhzhia", - "zhitomir", - "zhytomyr", - "zp", - "zt", - "ac", - "blogspot", - "co", - "com", - "go", - "ne", - "or", - "org", - "sc", - "ac", - "co", - "gov", - "ltd", - "me", - "net", - "nhs", - "org", - "plc", - "police", - "sch", - "blogspot", - "no-ip", - "wellbeingzone", - "homeoffice", - "service", - "ak", - "al", - "ar", - "as", - "az", - "ca", - "cloudns", - "co", - "ct", - "dc", - "de", - "dni", - "drud", - "fed", - "fl", - "ga", - "golffan", - "gu", - "hi", - "ia", - "id", - "il", - "in", - "is-by", - "isa", - "kids", - "ks", - "ky", - "la", - "land-4-sale", - "ma", - "md", - "me", - "mi", - "mn", - "mo", - "ms", - "mt", - "nc", - "nd", - "ne", - "nh", - "nj", - "nm", - "noip", - "nsn", - "nv", - "ny", - "oh", - "ok", - "or", - "pa", - "pointto", - "pr", - "ri", - "sc", - "sd", - "stuff-4-sale", - "tn", - "tx", - "ut", - "va", - "vi", - "vt", - "wa", - "wi", - "wv", - "wy", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "chtr", - "paroch", - "pvt", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "cc", - "k12", - "lib", - "com", - "edu", - "gub", - "mil", - "net", - "org", - "blogspot", - "co", - "com", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "arts", - "co", - "com", - "e12", - "edu", - "firm", - "gob", - "gov", - "info", - "int", - "mil", - "net", - "org", - "rec", - "store", - "tec", - "web", - "co", - "com", - "k12", - "net", - "org", - "ac", - "biz", - "blogspot", - "com", - "edu", - "gov", - "health", - "info", - "int", - "name", - "net", - "org", - "pro", - "com", - "edu", - "net", - "org", - "advisor", - "com", - "dyndns", - "edu", - "gov", - "mypets", - "net", - "org", - "xn--80au", - "xn--90azh", - "xn--c1avg", - "xn--d1at", - "xn--o1ac", - "xn--o1ach", - "xn--12c1fe0br", - "xn--12cfi8ixb8l", - "xn--12co0c3b4eva", - "xn--h3cuzk1di", - "xn--m3ch0j3a", - "xn--o3cyx2a", - "fhapp", - "ac", - "agric", - "alt", - "co", - "edu", - "gov", - "grondar", - "law", - "mil", - "net", - "ngo", - "nis", - "nom", - "org", - "school", - "tm", - "web", - "blogspot", - "ac", - "biz", - "co", - "com", - "edu", - "gov", - "info", - "mil", - "net", - "org", - "sch", - "triton", - "ac", - "co", - "gov", - "mil", - "org", -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/AUTHORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTING.md b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTING.md deleted file mode 100644 index 88dff59bc..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/PATENTS b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/README b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/README deleted file mode 100644 index 144e347b4..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/README +++ /dev/null @@ -1 +0,0 @@ -This repository provides supplementary Go time packages. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate.go deleted file mode 100644 index eabcd1147..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rate provides a rate limiter. -package rate - -import ( - "fmt" - "math" - "sync" - "time" -) - -// Limit defines the maximum frequency of some events. -// Limit is represented as number of events per second. -// A zero Limit allows no events. -type Limit float64 - -// Inf is the infinite rate limit; it allows all events (even if burst is zero). -const Inf = Limit(math.MaxFloat64) - -// Every converts a minimum time interval between events to a Limit. -func Every(interval time.Duration) Limit { - if interval <= 0 { - return Inf - } - return 1 / Limit(interval.Seconds()) -} - -// A Limiter controls how frequently events are allowed to happen. -// It implements a "token bucket" of size b, initially full and refilled -// at rate r tokens per second. -// Informally, in any large enough time interval, the Limiter limits the -// rate to r tokens per second, with a maximum burst size of b events. -// As a special case, if r == Inf (the infinite rate), b is ignored. -// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. -// -// The zero value is a valid Limiter, but it will reject all events. -// Use NewLimiter to create non-zero Limiters. -// -// Limiter has three main methods, Allow, Reserve, and Wait. -// Most callers should use Wait. -// -// Each of the three methods consumes a single token. -// They differ in their behavior when no token is available. -// If no token is available, Allow returns false. -// If no token is available, Reserve returns a reservation for a future token -// and the amount of time the caller must wait before using it. -// If no token is available, Wait blocks until one can be obtained -// or its associated context.Context is canceled. -// -// The methods AllowN, ReserveN, and WaitN consume n tokens. -type Limiter struct { - limit Limit - burst int - - mu sync.Mutex - tokens float64 - // last is the last time the limiter's tokens field was updated - last time.Time - // lastEvent is the latest time of a rate-limited event (past or future) - lastEvent time.Time -} - -// Limit returns the maximum overall event rate. -func (lim *Limiter) Limit() Limit { - lim.mu.Lock() - defer lim.mu.Unlock() - return lim.limit -} - -// Burst returns the maximum burst size. Burst is the maximum number of tokens -// that can be consumed in a single call to Allow, Reserve, or Wait, so higher -// Burst values allow more events to happen at once. -// A zero Burst allows no events, unless limit == Inf. -func (lim *Limiter) Burst() int { - return lim.burst -} - -// NewLimiter returns a new Limiter that allows events up to rate r and permits -// bursts of at most b tokens. -func NewLimiter(r Limit, b int) *Limiter { - return &Limiter{ - limit: r, - burst: b, - } -} - -// Allow is shorthand for AllowN(time.Now(), 1). -func (lim *Limiter) Allow() bool { - return lim.AllowN(time.Now(), 1) -} - -// AllowN reports whether n events may happen at time now. -// Use this method if you intend to drop / skip events that exceed the rate limit. -// Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok -} - -// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. -// A Reservation may be canceled, which may enable the Limiter to permit additional events. -type Reservation struct { - ok bool - lim *Limiter - tokens int - timeToAct time.Time - // This is the Limit at reservation time, it can change later. - limit Limit -} - -// OK returns whether the limiter can provide the requested number of tokens -// within the maximum wait time. If OK is false, Delay returns InfDuration, and -// Cancel does nothing. -func (r *Reservation) OK() bool { - return r.ok -} - -// Delay is shorthand for DelayFrom(time.Now()). -func (r *Reservation) Delay() time.Duration { - return r.DelayFrom(time.Now()) -} - -// InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) - -// DelayFrom returns the duration for which the reservation holder must wait -// before taking the reserved action. Zero duration means act immediately. -// InfDuration means the limiter cannot grant the tokens requested in this -// Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { - if !r.ok { - return InfDuration - } - delay := r.timeToAct.Sub(now) - if delay < 0 { - return 0 - } - return delay -} - -// Cancel is shorthand for CancelAt(time.Now()). -func (r *Reservation) Cancel() { - r.CancelAt(time.Now()) - return -} - -// CancelAt indicates that the reservation holder will not perform the reserved action -// and reverses the effects of this Reservation on the rate limit as much as possible, -// considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { - if !r.ok { - return - } - - r.lim.mu.Lock() - defer r.lim.mu.Unlock() - - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { - return - } - - // calculate tokens to restore - // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved - // after r was obtained. These tokens should not be restored. - restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) - if restoreTokens <= 0 { - return - } - // advance time to now - now, _, tokens := r.lim.advance(now) - // calculate new number of tokens - tokens += restoreTokens - if burst := float64(r.lim.burst); tokens > burst { - tokens = burst - } - // update state - r.lim.last = now - r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { - prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { - r.lim.lastEvent = prevEvent - } - } - - return -} - -// Reserve is shorthand for ReserveN(time.Now(), 1). -func (lim *Limiter) Reserve() *Reservation { - return lim.ReserveN(time.Now(), 1) -} - -// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. -// The Limiter takes this Reservation into account when allowing future events. -// ReserveN returns false if n exceeds the Limiter's burst size. -// Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() -// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. -// If you need to respect a deadline or cancel the delay, use Wait instead. -// To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) - return &r -} - -// contextContext is a temporary(?) copy of the context.Context type -// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+ -// with the built-in context package. If people ever stop using Go 1.6 -// we can remove this. -type contextContext interface { - Deadline() (deadline time.Time, ok bool) - Done() <-chan struct{} - Err() error - Value(key interface{}) interface{} -} - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) wait(ctx contextContext) (err error) { - return lim.WaitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -// The burst limit is ignored if the rate limit is Inf. -func (lim *Limiter) waitN(ctx contextContext, n int) (err error) { - if n > lim.burst && lim.limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) - } - // Check if ctx is already cancelled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - // Determine wait limit - now := time.Now() - waitLimit := InfDuration - if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) - } - // Reserve - r := lim.reserveN(now, n, waitLimit) - if !r.ok { - return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) - } - // Wait - t := time.NewTimer(r.DelayFrom(now)) - defer t.Stop() - select { - case <-t.C: - // We can proceed. - return nil - case <-ctx.Done(): - // Context was canceled before we could proceed. Cancel the - // reservation, which may permit other events to proceed sooner. - r.Cancel() - return ctx.Err() - } -} - -// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). -func (lim *Limiter) SetLimit(newLimit Limit) { - lim.SetLimitAt(time.Now(), newLimit) -} - -// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated -// or underutilized by those which reserved (using Reserve or Wait) but did not yet act -// before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { - lim.mu.Lock() - defer lim.mu.Unlock() - - now, _, tokens := lim.advance(now) - - lim.last = now - lim.tokens = tokens - lim.limit = newLimit -} - -// reserveN is a helper method for AllowN, ReserveN, and WaitN. -// maxFutureReserve specifies the maximum reservation wait duration allowed. -// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { - lim.mu.Lock() - - if lim.limit == Inf { - lim.mu.Unlock() - return Reservation{ - ok: true, - lim: lim, - tokens: n, - timeToAct: now, - } - } - - now, last, tokens := lim.advance(now) - - // Calculate the remaining number of tokens resulting from the request. - tokens -= float64(n) - - // Calculate the wait duration - var waitDuration time.Duration - if tokens < 0 { - waitDuration = lim.limit.durationFromTokens(-tokens) - } - - // Decide result - ok := n <= lim.burst && waitDuration <= maxFutureReserve - - // Prepare reservation - r := Reservation{ - ok: ok, - lim: lim, - limit: lim.limit, - } - if ok { - r.tokens = n - r.timeToAct = now.Add(waitDuration) - } - - // Update state - if ok { - lim.last = now - lim.tokens = tokens - lim.lastEvent = r.timeToAct - } else { - lim.last = last - } - - lim.mu.Unlock() - return r -} - -// advance calculates and returns an updated state for lim resulting from the passage of time. -// lim is not changed. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { - last := lim.last - if now.Before(last) { - last = now - } - - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - - // Calculate the new number of tokens, due to time that passed. - delta := lim.limit.tokensFromDuration(elapsed) - tokens := lim.tokens + delta - if burst := float64(lim.burst); tokens > burst { - tokens = burst - } - - return now, last, tokens -} - -// durationFromTokens is a unit conversion function from the number of tokens to the duration -// of time it takes to accumulate them at a rate of limit tokens per second. -func (limit Limit) durationFromTokens(tokens float64) time.Duration { - seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) -} - -// tokensFromDuration is a unit conversion function from a time duration to the number of tokens -// which could be accumulated during that duration at a rate of limit tokens per second. -func (limit Limit) tokensFromDuration(d time.Duration) float64 { - return d.Seconds() * float64(limit) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go16.go deleted file mode 100644 index 6bab1850f..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package rate - -import "golang.org/x/net/context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go17.go deleted file mode 100644 index f90d85f51..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package rate - -import "context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_test.go b/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_test.go deleted file mode 100644 index e8add694f..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/golang.org/x/time/rate/rate_test.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package rate - -import ( - "context" - "math" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" -) - -func TestLimit(t *testing.T) { - if Limit(10) == Inf { - t.Errorf("Limit(10) == Inf should be false") - } -} - -func closeEnough(a, b Limit) bool { - return (math.Abs(float64(a)/float64(b)) - 1.0) < 1e-9 -} - -func TestEvery(t *testing.T) { - cases := []struct { - interval time.Duration - lim Limit - }{ - {0, Inf}, - {-1, Inf}, - {1 * time.Nanosecond, Limit(1e9)}, - {1 * time.Microsecond, Limit(1e6)}, - {1 * time.Millisecond, Limit(1e3)}, - {10 * time.Millisecond, Limit(100)}, - {100 * time.Millisecond, Limit(10)}, - {1 * time.Second, Limit(1)}, - {2 * time.Second, Limit(0.5)}, - {time.Duration(2.5 * float64(time.Second)), Limit(0.4)}, - {4 * time.Second, Limit(0.25)}, - {10 * time.Second, Limit(0.1)}, - {time.Duration(math.MaxInt64), Limit(1e9 / float64(math.MaxInt64))}, - } - for _, tc := range cases { - lim := Every(tc.interval) - if !closeEnough(lim, tc.lim) { - t.Errorf("Every(%v) = %v want %v", tc.interval, lim, tc.lim) - } - } -} - -const ( - d = 100 * time.Millisecond -) - -var ( - t0 = time.Now() - t1 = t0.Add(time.Duration(1) * d) - t2 = t0.Add(time.Duration(2) * d) - t3 = t0.Add(time.Duration(3) * d) - t4 = t0.Add(time.Duration(4) * d) - t5 = t0.Add(time.Duration(5) * d) - t9 = t0.Add(time.Duration(9) * d) -) - -type allow struct { - t time.Time - n int - ok bool -} - -func run(t *testing.T, lim *Limiter, allows []allow) { - for i, allow := range allows { - ok := lim.AllowN(allow.t, allow.n) - if ok != allow.ok { - t.Errorf("step %d: lim.AllowN(%v, %v) = %v want %v", - i, allow.t, allow.n, ok, allow.ok) - } - } -} - -func TestLimiterBurst1(t *testing.T) { - run(t, NewLimiter(10, 1), []allow{ - {t0, 1, true}, - {t0, 1, false}, - {t0, 1, false}, - {t1, 1, true}, - {t1, 1, false}, - {t1, 1, false}, - {t2, 2, false}, // burst size is 1, so n=2 always fails - {t2, 1, true}, - {t2, 1, false}, - }) -} - -func TestLimiterBurst3(t *testing.T) { - run(t, NewLimiter(10, 3), []allow{ - {t0, 2, true}, - {t0, 2, false}, - {t0, 1, true}, - {t0, 1, false}, - {t1, 4, false}, - {t2, 1, true}, - {t3, 1, true}, - {t4, 1, true}, - {t4, 1, true}, - {t4, 1, false}, - {t4, 1, false}, - {t9, 3, true}, - {t9, 0, true}, - }) -} - -func TestLimiterJumpBackwards(t *testing.T) { - run(t, NewLimiter(10, 3), []allow{ - {t1, 1, true}, // start at t1 - {t0, 1, true}, // jump back to t0, two tokens remain - {t0, 1, true}, - {t0, 1, false}, - {t0, 1, false}, - {t1, 1, true}, // got a token - {t1, 1, false}, - {t1, 1, false}, - {t2, 1, true}, // got another token - {t2, 1, false}, - {t2, 1, false}, - }) -} - -func TestSimultaneousRequests(t *testing.T) { - const ( - limit = 1 - burst = 5 - numRequests = 15 - ) - var ( - wg sync.WaitGroup - numOK = uint32(0) - ) - - // Very slow replenishing bucket. - lim := NewLimiter(limit, burst) - - // Tries to take a token, atomically updates the counter and decreases the wait - // group counter. - f := func() { - defer wg.Done() - if ok := lim.Allow(); ok { - atomic.AddUint32(&numOK, 1) - } - } - - wg.Add(numRequests) - for i := 0; i < numRequests; i++ { - go f() - } - wg.Wait() - if numOK != burst { - t.Errorf("numOK = %d, want %d", numOK, burst) - } -} - -func TestLongRunningQPS(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - if runtime.GOOS == "openbsd" { - t.Skip("low resolution time.Sleep invalidates test (golang.org/issue/14183)") - return - } - - // The test runs for a few seconds executing many requests and then checks - // that overall number of requests is reasonable. - const ( - limit = 100 - burst = 100 - ) - var numOK = int32(0) - - lim := NewLimiter(limit, burst) - - var wg sync.WaitGroup - f := func() { - if ok := lim.Allow(); ok { - atomic.AddInt32(&numOK, 1) - } - wg.Done() - } - - start := time.Now() - end := start.Add(5 * time.Second) - for time.Now().Before(end) { - wg.Add(1) - go f() - - // This will still offer ~500 requests per second, but won't consume - // outrageous amount of CPU. - time.Sleep(2 * time.Millisecond) - } - wg.Wait() - elapsed := time.Since(start) - ideal := burst + (limit * float64(elapsed) / float64(time.Second)) - - // We should never get more requests than allowed. - if want := int32(ideal + 1); numOK > want { - t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) - } - // We should get very close to the number of requests allowed. - if want := int32(0.999 * ideal); numOK < want { - t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) - } -} - -type request struct { - t time.Time - n int - act time.Time - ok bool -} - -// dFromDuration converts a duration to a multiple of the global constant d -func dFromDuration(dur time.Duration) int { - // Adding a millisecond to be swallowed by the integer division - // because we don't care about small inaccuracies - return int((dur + time.Millisecond) / d) -} - -// dSince returns multiples of d since t0 -func dSince(t time.Time) int { - return dFromDuration(t.Sub(t0)) -} - -func runReserve(t *testing.T, lim *Limiter, req request) *Reservation { - return runReserveMax(t, lim, req, InfDuration) -} - -func runReserveMax(t *testing.T, lim *Limiter, req request, maxReserve time.Duration) *Reservation { - r := lim.reserveN(req.t, req.n, maxReserve) - if r.ok && (dSince(r.timeToAct) != dSince(req.act)) || r.ok != req.ok { - t.Errorf("lim.reserveN(t%d, %v, %v) = (t%d, %v) want (t%d, %v)", - dSince(req.t), req.n, maxReserve, dSince(r.timeToAct), r.ok, dSince(req.act), req.ok) - } - return &r -} - -func TestSimpleReserve(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - runReserve(t, lim, request{t0, 2, t2, true}) - runReserve(t, lim, request{t3, 2, t4, true}) -} - -func TestMix(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst - runReserve(t, lim, request{t0, 2, t0, true}) - run(t, lim, []allow{{t1, 2, false}}) // not enought tokens - don't allow - runReserve(t, lim, request{t1, 2, t2, true}) - run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow - run(t, lim, []allow{{t3, 1, true}}) -} - -func TestCancelInvalid(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 3, t3, false}) - r.CancelAt(t0) // should have no effect - runReserve(t, lim, request{t0, 2, t2, true}) // did not get extra tokens -} - -func TestCancelLast(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 2, t2, true}) - r.CancelAt(t1) // got 2 tokens back - runReserve(t, lim, request{t1, 2, t2, true}) -} - -func TestCancelTooLate(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 2, t2, true}) - r.CancelAt(t3) // too late to cancel - should have no effect - runReserve(t, lim, request{t3, 2, t4, true}) -} - -func TestCancel0Tokens(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 1, t1, true}) - runReserve(t, lim, request{t0, 1, t2, true}) - r.CancelAt(t0) // got 0 tokens back - runReserve(t, lim, request{t0, 1, t3, true}) -} - -func TestCancel1Token(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 2, t2, true}) - runReserve(t, lim, request{t0, 1, t3, true}) - r.CancelAt(t2) // got 1 token back - runReserve(t, lim, request{t2, 2, t4, true}) -} - -func TestCancelMulti(t *testing.T) { - lim := NewLimiter(10, 4) - - runReserve(t, lim, request{t0, 4, t0, true}) - rA := runReserve(t, lim, request{t0, 3, t3, true}) - runReserve(t, lim, request{t0, 1, t4, true}) - rC := runReserve(t, lim, request{t0, 1, t5, true}) - rC.CancelAt(t1) // get 1 token back - rA.CancelAt(t1) // get 2 tokens back, as if C was never reserved - runReserve(t, lim, request{t1, 3, t5, true}) -} - -func TestReserveJumpBack(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 - runReserve(t, lim, request{t0, 1, t1, true}) // should violate Limit,Burst - runReserve(t, lim, request{t2, 2, t3, true}) -} - -func TestReserveJumpBackCancel(t *testing.T) { - lim := NewLimiter(10, 2) - - runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 - r := runReserve(t, lim, request{t1, 2, t3, true}) - runReserve(t, lim, request{t1, 1, t4, true}) - r.CancelAt(t0) // cancel at t0, get 1 token back - runReserve(t, lim, request{t1, 2, t4, true}) // should violate Limit,Burst -} - -func TestReserveSetLimit(t *testing.T) { - lim := NewLimiter(5, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - runReserve(t, lim, request{t0, 2, t4, true}) - lim.SetLimitAt(t2, 10) - runReserve(t, lim, request{t2, 1, t4, true}) // violates Limit and Burst -} - -func TestReserveSetLimitCancel(t *testing.T) { - lim := NewLimiter(5, 2) - - runReserve(t, lim, request{t0, 2, t0, true}) - r := runReserve(t, lim, request{t0, 2, t4, true}) - lim.SetLimitAt(t2, 10) - r.CancelAt(t2) // 2 tokens back - runReserve(t, lim, request{t2, 2, t3, true}) -} - -func TestReserveMax(t *testing.T) { - lim := NewLimiter(10, 2) - maxT := d - - runReserveMax(t, lim, request{t0, 2, t0, true}, maxT) - runReserveMax(t, lim, request{t0, 1, t1, true}, maxT) // reserve for close future - runReserveMax(t, lim, request{t0, 1, t2, false}, maxT) // time to act too far in the future -} - -type wait struct { - name string - ctx context.Context - n int - delay int // in multiples of d - nilErr bool -} - -func runWait(t *testing.T, lim *Limiter, w wait) { - start := time.Now() - err := lim.WaitN(w.ctx, w.n) - delay := time.Now().Sub(start) - if (w.nilErr && err != nil) || (!w.nilErr && err == nil) || w.delay != dFromDuration(delay) { - errString := "" - if !w.nilErr { - errString = "" - } - t.Errorf("lim.WaitN(%v, lim, %v) = %v with delay %v ; want %v with delay %v", - w.name, w.n, err, delay, errString, d*time.Duration(w.delay)) - } -} - -func TestWaitSimple(t *testing.T) { - lim := NewLimiter(10, 3) - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - runWait(t, lim, wait{"already-cancelled", ctx, 1, 0, false}) - - runWait(t, lim, wait{"exceed-burst-error", context.Background(), 4, 0, false}) - - runWait(t, lim, wait{"act-now", context.Background(), 2, 0, true}) - runWait(t, lim, wait{"act-later", context.Background(), 3, 2, true}) -} - -func TestWaitCancel(t *testing.T) { - lim := NewLimiter(10, 3) - - ctx, cancel := context.WithCancel(context.Background()) - runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) // after this lim.tokens = 1 - go func() { - time.Sleep(d) - cancel() - }() - runWait(t, lim, wait{"will-cancel", ctx, 3, 1, false}) - // should get 3 tokens back, and have lim.tokens = 2 - t.Logf("tokens:%v last:%v lastEvent:%v", lim.tokens, lim.last, lim.lastEvent) - runWait(t, lim, wait{"act-now-after-cancel", context.Background(), 2, 0, true}) -} - -func TestWaitTimeout(t *testing.T) { - lim := NewLimiter(10, 3) - - ctx, cancel := context.WithTimeout(context.Background(), d) - defer cancel() - runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) - runWait(t, lim, wait{"w-timeout-err", ctx, 3, 0, false}) -} - -func TestWaitInf(t *testing.T) { - lim := NewLimiter(Inf, 0) - - runWait(t, lim, wait{"exceed-burst-no-error", context.Background(), 3, 0, true}) -} - -func BenchmarkAllowN(b *testing.B) { - lim := NewLimiter(Every(1*time.Second), 1) - now := time.Now() - b.ReportAllocs() - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - lim.AllowN(now, 1) - } - }) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc deleted file mode 100644 index 730e569b0..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitignore b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitignore deleted file mode 100644 index 5b4d73b68..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*~ -.*.swp -*.out -*.test -*.pem -*.cov -jose-util/jose-util diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.travis.yml b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.travis.yml deleted file mode 100644 index c38cd007d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -sudo: false - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: -- 1.3 -- 1.4 -- 1.5 -- 1.6 -- 1.7 -- tip - -go_import_path: gopkg.in/square/go-jose.v1 - -before_script: -- export PATH=$HOME/.local/bin:$PATH - -before_install: -# Install encrypted gitcookies to get around bandwidth-limits -# that is causing Travis-CI builds to fail. For more info, see -# https://github.com/golang/go/issues/12933 -- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true -- bash .gitcookies.sh || true -- go get github.com/wadey/gocovmerge -- go get github.com/mattn/goveralls -- go get golang.org/x/tools/cmd/cover || true -- go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user `whoami` - -script: -- go test . -v -covermode=count -coverprofile=profile.cov -- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov -- go test ./json -v # no coverage for forked encoding/json package -- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t -- cd .. - -after_success: -- gocovmerge *.cov */*.cov > merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md deleted file mode 100644 index 97e61dbb6..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md deleted file mode 100644 index 61b183651..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md deleted file mode 100644 index 60293ffa2..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md +++ /dev/null @@ -1,212 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![release](https://img.shields.io/github/release/square/go-jose.svg?style=flat)](https://github.com/square/go-jose/releases) -[![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go deleted file mode 100644 index cd36c21da..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go +++ /dev/null @@ -1,520 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "gopkg.in/square/go-jose.v1/cipher" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - headers := rawHeader{ - Epk: &JsonWebKey{ - Key: &priv.PublicKey, - }, - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - if headers.Epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("square/go-jose: invalid public key in epk header") - } - - apuData := headers.Apu.bytes() - apvData := headers.Apv.bytes() - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) - } - - var keySize int - - switch KeyAlgorithm(headers.Alg) { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.Enc), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(headers.Alg, keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go deleted file mode 100644 index 018ad2e2d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go +++ /dev/null @@ -1,468 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "errors" - "io" - "math/big" - "testing" -) - -func TestVectorsRSA(t *testing.T) { - // Sources: - // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm - // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: fromHexInt(` - a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 - ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c - bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd - bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 - ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), - E: 65537, - }, - D: fromHexInt(` - 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 - 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d - 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 - 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb - 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), - Primes: []*big.Int{ - fromHexInt(` - d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 - 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c - 2f26a471dcad212eac7ca39d`), - fromHexInt(` - cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 - 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af - 72bfe9a030e860b0288b5d77`), - }, - } - - input := fromHexBytes( - "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34") - - expectedPKCS := fromHexBytes(` - 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808 - 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d - 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb - 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b - cd23b528ceab3c31`) - - expectedOAEP := fromHexBytes(` - 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4 - 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618 - c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6 - 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5 - 210035d47ac72e8a`) - - // Mock random reader - randReader = bytes.NewReader(fromHexBytes(` - 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3 - a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5 - 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98 - 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`)) - defer resetRandReader() - - // RSA-PKCS1v1.5 encrypt - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - encryptedPKCS, err := enc.encrypt(input, RSA1_5) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP encrypt - encryptedOAEP, err := enc.encrypt(input, RSA_OAEP) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 { - t.Error("Output does not match expected value (OAEP)") - } - - // Need fake cipher for PKCS1v1.5 decrypt - resetRandReader() - aes := newAESGCM(len(input)) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - // RSA-PKCS1v1.5 decrypt - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen) - if err != nil { - t.Error("Decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP decrypt - decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen) - if err != nil { - t.Error("decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedOAEP) != 0 { - t.Error("output does not match expected value (OAEP)") - } -} - -func TestInvalidAlgorithmsRSA(t *testing.T) { - _, err := newRSARecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newRSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &rsaTestKey.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - err = enc.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16}) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = dec.signPayload([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -type failingKeyGenerator struct{} - -func (ctx failingKeyGenerator) keySize() int { - return 0 -} - -func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) { - return nil, rawHeader{}, errors.New("failed to generate key") -} - -func TestPKCSKeyGeneratorFailure(t *testing.T) { - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - generator := failingKeyGenerator{} - _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) - if err != ErrCryptoFailure { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidAlgorithmsEC(t *testing.T) { - _, err := newECDHRecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newECDSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(ecEncrypterVerifier) - enc.publicKey = &ecTestKey256.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidECKeyGen(t *testing.T) { - gen := ecKeyGenerator{ - size: 16, - algID: "A128GCM", - publicKey: &ecTestKey256.PublicKey, - } - - if gen.keySize() != 16 { - t.Error("ec key generator reported incorrect key size") - } - - _, _, err := gen.genKey() - if err != nil { - t.Error("ec key generator failed to generate key", err) - } -} - -func TestInvalidECDecrypt(t *testing.T) { - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - generator := randomKeyGenerator{size: 16} - - // Missing epk header - headers := rawHeader{ - Alg: string(ECDH_ES), - } - - _, err := dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with missing epk header") - } - - // Invalid epk header - headers.Epk = &JsonWebKey{} - - _, err = dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with invalid epk header") - } -} - -func TestDecryptWithIncorrectSize(t *testing.T) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - payload := make([]byte, 254) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } - - payload = make([]byte, 257) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } -} - -func TestPKCSDecryptNeverFails(t *testing.T) { - // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent - // side-channel timing attacks (Bleichenbacher attack in particular). - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - for i := 1; i < 50; i++ { - payload := make([]byte, 256) - _, err := io.ReadFull(rand.Reader, payload) - if err != nil { - t.Error("Unable to get random data:", err) - return - } - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err != nil { - t.Error("PKCS1v1.5 decrypt should never fail:", err) - return - } - } -} - -func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(32) - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 32) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 16) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - // Do some simple scrambling - ciphertext[128] ^= 0xFF - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func TestInvalidEllipticCurve(t *testing.T) { - signer256 := ecDecrypterSigner{privateKey: ecTestKey256} - signer384 := ecDecrypterSigner{privateKey: ecTestKey384} - signer521 := ecDecrypterSigner{privateKey: ecTestKey521} - - _, err := signer256.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-256 key") - } - _, err = signer256.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-256 key") - } - _, err = signer384.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-384 key") - } - _, err = signer384.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-384 key") - } - _, err = signer521.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-521 key") - } - _, err = signer521.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-521 key") - } -} - -func TestInvalidECPublicKey(t *testing.T) { - // Invalid key - invalid := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("MTEx"), - Y: fromBase64Int("MTEx"), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), - } - - headers := rawHeader{ - Alg: string(ECDH_ES), - Epk: &JsonWebKey{ - Key: &invalid.PublicKey, - }, - } - - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - _, err := dec.decryptKey(headers, nil, randomKeyGenerator{size: 16}) - if err == nil { - t.Fatal("decrypter accepted JWS with invalid ECDH public key") - } -} - -func TestInvalidAlgorithmEC(t *testing.T) { - err := ecEncrypterVerifier{publicKey: &ecTestKey256.PublicKey}.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Fatal("should not accept invalid/unsupported algorithm") - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go deleted file mode 100644 index 126b85ce2..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures the the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go deleted file mode 100644 index 40bcb20fa..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "io" - "strings" - "testing" -) - -func TestInvalidInputs(t *testing.T) { - key := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - nonce := []byte{ - 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad")) - - // Changed AAD, must fail - _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID")) - if err == nil { - t.Error("must detect invalid aad") - } - - // Empty ciphertext, must fail - _, err = aead.Open(nil, nonce, []byte{}, []byte("aad")) - if err == nil { - t.Error("must detect invalid/empty ciphertext") - } - - // Corrupt ciphertext, must fail - corrupt := make([]byte, len(ciphertext)) - copy(corrupt, ciphertext) - corrupt[0] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt ciphertext") - } - - // Corrupt authtag, must fail - copy(corrupt, ciphertext) - corrupt[len(ciphertext)-1] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } - - // Truncated data, must fail - _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } -} - -func TestVectorsAESCBC128(t *testing.T) { - // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2 - plaintext := []byte{ - 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32, - 112, 114, 111, 115, 112, 101, 114, 46} - - aad := []byte{ - 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, - 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105, - 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85, - 50, 73, 110, 48} - - expectedCiphertext := []byte{ - 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6, - 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143, - 112, 56, 102} - - expectedAuthtag := []byte{ - 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100, - 191} - - key := []byte{ - 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206, - 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207} - - nonce := []byte{ - 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match") - } - if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match") - } -} - -func TestVectorsAESCBC256(t *testing.T) { - // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4 - plaintext := []byte{ - 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, - 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, - 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69, - 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, - 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65} - - aad := []byte{ - 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20, - 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73} - - expectedCiphertext := []byte{ - 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd, - 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd, - 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2, - 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b, - 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1, - 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3, - 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e, - 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b, - 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6} - - expectedAuthtag := []byte{ - 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf, - 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5} - - key := []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f} - - nonce := []byte{ - 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext) - } - if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag) - } -} - -func TestAESCBCRoundtrip(t *testing.T) { - key128 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - key192 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7} - - key256 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - RunRoundtrip(t, key128, nonce) - RunRoundtrip(t, key192, nonce) - RunRoundtrip(t, key256, nonce) -} - -func RunRoundtrip(t *testing.T, key, nonce []byte) { - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.NonceSize() != len(nonce) { - panic("invalid nonce") - } - - // Test pre-existing data in dst buffer - dst := []byte{15, 15, 15, 15} - plaintext := []byte{0, 0, 0, 0} - aad := []byte{4, 3, 2, 1} - - result := aead.Seal(dst, nonce, plaintext, aad) - if bytes.Compare(dst, result[:4]) != 0 { - t.Error("Existing data in dst not preserved") - } - - // Test pre-existing (empty) dst buffer with sufficient capacity - dst = make([]byte, 256)[:0] - result, err = aead.Open(dst, nonce, result[4:], aad) - if err != nil { - panic(err) - } - - if bytes.Compare(result, plaintext) != 0 { - t.Error("Plaintext does not match output") - } -} - -func TestAESCBCOverhead(t *testing.T) { - aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.Overhead() != 32 { - t.Error("CBC-HMAC reports incorrect overhead value") - } -} - -func TestPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - unpadded, err := unpadBuffer(padded, 16) - if err != nil || len(unpadded) != i { - t.Error("failed to unpad slice properly", i) - return - } - } -} - -func TestInvalidKey(t *testing.T) { - key := make([]byte, 30) - _, err := NewCBCHMAC(key, aes.NewCipher) - if err == nil { - t.Error("should not be able to instantiate CBC-HMAC with invalid key") - } -} - -func TestTruncatedCiphertext(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - data := make([]byte, 32) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - ctx := aead.(*cbcAEAD) - ct := aead.Seal(nil, nonce, data, nil) - - // Truncated ciphertext, but with correct auth tag - truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], uint64(len(ct))-2) - copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes])) - - // Open should fail - _, err = aead.Open(nil, nonce, truncated, nil) - if err == nil { - t.Error("open on truncated ciphertext should fail") - } -} - -func TestInvalidPaddingOpen(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - - // Plaintext with invalid padding - plaintext := padBuffer(make([]byte, 28), aes.BlockSize) - plaintext[len(plaintext)-1] = 0xFF - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - block, _ := aes.NewCipher(key) - cbc := cipher.NewCBCEncrypter(block, nonce) - buffer := append([]byte{}, plaintext...) - cbc.CryptBlocks(buffer, buffer) - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ctx := aead.(*cbcAEAD) - - // Mutated ciphertext, but with correct auth tag - size := uint64(len(buffer)) - ciphertext, tail := resize(buffer, size+(uint64(len(key))/2)) - copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size])) - - // Open should fail (b/c of invalid padding, even though tag matches) - _, err := aead.Open(nil, nonce, ciphertext, nil) - if err == nil || !strings.Contains(err.Error(), "invalid padding") { - t.Error("no or unexpected error on open with invalid padding:", err) - } -} - -func TestInvalidPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - - paddingBytes := 16 - (i % 16) - - // Mutate padding for testing - for j := 1; j <= paddingBytes; j++ { - mutated := make([]byte, len(padded)) - copy(mutated, padded) - mutated[len(mutated)-j] ^= 0xFF - - _, err := unpadBuffer(mutated, 16) - if err == nil { - t.Error("unpad on invalid padding should fail", i) - return - } - } - - // Test truncated padding - _, err := unpadBuffer(padded[:len(padded)-1], 16) - if err == nil { - t.Error("unpad on truncated padding should fail", i) - return - } - } -} - -func TestZeroLengthPadding(t *testing.T) { - data := make([]byte, 16) - data, err := unpadBuffer(data, 16) - if err == nil { - t.Error("padding with 0x00 should never be valid") - } -} - -func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Seal(nil, nonce, chunk, nil) - } -} - -func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - out := aead.Seal(nil, nonce, chunk, nil) - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Open(nil, nonce, out, nil) - } -} - -func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1024) -} - -func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 67108864) -} - -func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1032) -} - -func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 67108864) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go deleted file mode 100644 index 48219b3e1..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "testing" -) - -// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt -func TestVectorConcatKDF(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77} - - ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101} - ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98} - - supPubInfo := []byte{0, 0, 0, 128} - supPrivInfo := []byte{} - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - out0 := make([]byte, 9) - out1 := make([]byte, 7) - - read0, err := ckdf.Read(out0) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - read1, err := ckdf.Read(out1) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - if read0+read1 != len(out0)+len(out1) { - t.Error("did not receive enough bytes from concat kdf reader") - return - } - - out := []byte{} - out = append(out, out0...) - out = append(out, out1...) - - if bytes.Compare(out, expected) != 0 { - t.Error("did not receive expected output from concat kdf reader") - return - } -} - -func TestCache(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - outputs := [][]byte{} - - // Read the same amount of data in different chunk sizes - chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512} - - for _, c := range chunkSizes { - out := make([]byte, 1024) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - for i := 0; i < 1024; i += c { - _, _ = reader.Read(out[i : i+c]) - } - - outputs = append(outputs, out) - } - - for i := range outputs { - if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 { - t.Error("not all outputs from KDF matched") - } - } -} - -func benchmarkKDF(b *testing.B, total int) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - out := make([]byte, total) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - b.ResetTimer() - b.SetBytes(int64(total)) - for i := 0; i < b.N; i++ { - _, _ = reader.Read(out) - } -} - -func BenchmarkConcatKDF_1k(b *testing.B) { - benchmarkKDF(b, 1024) -} - -func BenchmarkConcatKDF_64k(b *testing.B) { - benchmarkKDF(b, 65536) -} - -func BenchmarkConcatKDF_1MB(b *testing.B) { - benchmarkKDF(b, 1048576) -} - -func BenchmarkConcatKDF_64MB(b *testing.B) { - benchmarkKDF(b, 67108864) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go deleted file mode 100644 index f23d49e1f..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go +++ /dev/null @@ -1,62 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "crypto/ecdsa" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - return key -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go deleted file mode 100644 index ca2c508dd..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go +++ /dev/null @@ -1,115 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/base64" - "math/big" - "testing" -) - -// Example keys from JWA, Appendix C -var aliceKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="), - Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), -} - -var bobKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="), - Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="), - }, - D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="), -} - -// Build big int from base64-encoded string. Strips whitespace (for testing). -func fromBase64Int(data string) *big.Int { - val, err := base64.URLEncoding.DecodeString(data) - if err != nil { - panic("Invalid test data") - } - return new(big.Int).SetBytes(val) -} - -func TestVectorECDHES(t *testing.T) { - apuData := []byte("Alice") - apvData := []byte("Bob") - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - - if bytes.Compare(output, expected) != 0 { - t.Error("output did not match what we expect, got", output, "wanted", expected) - } -} - -func TestInvalidECPublicKey(t *testing.T) { - defer func() { recover() }() - - // Invalid key - invalid := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("MTEx"), - Y: fromBase64Int("MTEx"), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), - } - - DeriveECDHES("A128GCM", []byte{}, []byte{}, bobKey, &invalid.PublicKey, 16) - t.Fatal("should panic if public key was invalid") -} - -func BenchmarkECDHES_128(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - } -} - -func BenchmarkECDHES_192(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24) - } -} - -func BenchmarkECDHES_256(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go deleted file mode 100644 index 1d36d5015..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go deleted file mode 100644 index ceecf812b..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "encoding/hex" - "testing" -) - -func TestAesKeyWrap(t *testing.T) { - // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf - kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617") - cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D") - - kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F") - cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607") - - expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1") - - block0, _ := aes.NewCipher(kek0) - block1, _ := aes.NewCipher(kek1) - block2, _ := aes.NewCipher(kek2) - - out0, _ := KeyWrap(block0, cek0) - out1, _ := KeyWrap(block1, cek1) - out2, _ := KeyWrap(block2, cek2) - - if bytes.Compare(out0, expected0) != 0 { - t.Error("output 0 not as expected, got", out0, "wanted", expected0) - } - - if bytes.Compare(out1, expected1) != 0 { - t.Error("output 1 not as expected, got", out1, "wanted", expected1) - } - - if bytes.Compare(out2, expected2) != 0 { - t.Error("output 2 not as expected, got", out2, "wanted", expected2) - } - - unwrap0, _ := KeyUnwrap(block0, out0) - unwrap1, _ := KeyUnwrap(block1, out1) - unwrap2, _ := KeyUnwrap(block2, out2) - - if bytes.Compare(unwrap0, cek0) != 0 { - t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0) - } - - if bytes.Compare(unwrap1, cek1) != 0 { - t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1) - } - - if bytes.Compare(unwrap2, cek2) != 0 { - t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2) - } -} - -func TestAesKeyWrapInvalid(t *testing.T) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - - // Invalid unwrap input (bit flipped) - input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - _, err := KeyUnwrap(block, input0) - if err == nil { - t.Error("key unwrap failed to detect invalid input") - } - - // Invalid unwrap input (truncated) - input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF") - - _, err = KeyUnwrap(block, input1) - if err == nil { - t.Error("key unwrap failed to detect truncated input") - } - - // Invalid wrap input (not multiple of 8) - input2, _ := hex.DecodeString("0123456789ABCD") - - _, err = KeyWrap(block, input2) - if err == nil { - t.Error("key wrap accepted invalid input") - } - -} - -func BenchmarkAesKeyWrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyWrap(block, key) - } -} - -func BenchmarkAesKeyUnwrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyUnwrap(block, input) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go deleted file mode 100644 index b3bdaec80..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go +++ /dev/null @@ -1,416 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) -} - -// MultiEncrypter represents an encrypter which supports multiple recipients. -type MultiEncrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) - AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// SetCompression sets a compression algorithm to be applied before encryption. -func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { - ctx.compressionAlg = compressionAlg -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := encryptionKey.(type) { - case *JsonWebKey: - keyID = encryptionKey.KeyID - rawKey = encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch alg { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.AddRecipient(alg, encryptionKey) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { - var recipient recipientKeyInfo - - switch alg { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) - } - - recipient, err = makeJWERecipient(alg, encryptionKey) - - if err == nil { - ctx.recipients = append(ctx.recipients, recipient) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case *JsonWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - if err == nil && encryptionKey.KeyID != "" { - recipient.keyID = encryptionKey.KeyID - } - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case *JsonWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{ - Enc: ctx.contentAlg, - } - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - recipient.header.Alg = string(info.keyAlg) - if info.keyID != "" { - recipient.header.Kid = info.keyID - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - obj.protected.Zip = ctx.compressionAlg - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") - } - - if len(headers.Crit) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.Enc) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -func (obj JsonWebEncryption) DecryptMulti(decryptionKey interface{}) (int, JoseHeader, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - if len(globalHeaders.Crit) > 0 { - return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, JoseHeader{}, nil, err - } - - cipher := getContentCipher(globalHeaders.Enc) - if cipher == nil { - return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(globalHeaders.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, JoseHeader{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return index, headers.sanitized(), plaintext, err -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go deleted file mode 100644 index 431f65378..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go +++ /dev/null @@ -1,785 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "fmt" - "io" - "testing" -) - -// We generate only a single RSA and EC key for testing, speeds up tests. -var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048) - -var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) -var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - -func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - return fmt.Errorf("error on new encrypter: %s", err) - } - - enc.SetCompression(compressionAlg) - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.EncryptWithAuthData(input, aad) - if err != nil { - return fmt.Errorf("error in encrypt: %s", err) - } - - msg, err := serializer(obj) - if err != nil { - return fmt.Errorf("error in serializer: %s", err) - } - - parsed, err := ParseEncrypted(msg) - if err != nil { - return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg) - } - - // (Maybe) mangle object - skip := corrupter(parsed) - if skip { - return fmt.Errorf("corrupter indicated message should be skipped") - } - - if bytes.Compare(parsed.GetAuthData(), aad) != 0 { - return fmt.Errorf("auth data in parsed object does not match") - } - - output, err := parsed.Decrypt(decryptionKey) - if err != nil { - return fmt.Errorf("error on decrypt: %s", err) - } - - if bytes.Compare(input, output) != 0 { - return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input) - } - - return nil -} - -func TestRoundtripsJWE(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{ - DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW, - RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err != nil { - t.Error(err, alg, enc, zip, i) - } - } - } - } - } - } -} - -func TestRoundtripsJWECorrupted(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - bitflip := func(slice []byte) bool { - if len(slice) > 0 { - slice[0] ^= 0xFF - return false - } - return true - } - - corrupters := []func(*JsonWebEncryption) bool{ - func(obj *JsonWebEncryption) bool { - // Set invalid ciphertext - return bitflip(obj.ciphertext) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid auth tag - return bitflip(obj.tag) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid AAD - return bitflip(obj.aad) - }, - func(obj *JsonWebEncryption) bool { - // Mess with encrypted key - return bitflip(obj.recipients[0].encryptedKey) - }, - func(obj *JsonWebEncryption) bool { - // Mess with GCM-KW auth tag - return bitflip(obj.protected.Tag.bytes()) - }, - } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - for j, corrupter := range corrupters { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err == nil { - t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j) - } - } - } - } - } - } - } -} - -func TestEncrypterWithJWKAndKeyID(t *testing.T) { - enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{ - KeyID: "test-id", - Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }) - if err != nil { - t.Error(err) - } - - ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet")) - - serialized1, _ := ciphertext.CompactSerialize() - serialized2 := ciphertext.FullSerialize() - - parsed1, _ := ParseEncrypted(serialized1) - parsed2, _ := ParseEncrypted(serialized2) - - if parsed1.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID) - } - if parsed2.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID) - } -} - -func TestEncrypterWithBrokenRand(t *testing.T) { - keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - - serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() } - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Break rand reader - readers := []func() io.Reader{ - // Totally broken - func() io.Reader { return bytes.NewReader([]byte{}) }, - // Not enough bytes - func() io.Reader { return io.LimitReader(rand.Reader, 20) }, - } - - defer resetRandReader() - - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for i, getReader := range readers { - randReader = getReader() - err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec) - if err == nil { - t.Error("encrypter should fail if rand is broken", i) - } - } - } - } - } -} - -func TestNewEncrypterErrors(t *testing.T) { - _, err := NewEncrypter("XYZ", "XYZ", nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid cipher") - } - - _, err = NewMultiEncrypter("XYZ") - if err == nil { - t.Error("was able to instantiate multi-encrypter with invalid cipher") - } - - _, err = NewEncrypter(DIRECT, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid direct key") - } - - _, err = NewEncrypter(ECDH_ES, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid EC key") - } -} - -func TestMultiRecipientJWE(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey) - if err != nil { - t.Fatal("error when adding RSA recipient", err) - } - - sharedKey := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - err = enc.AddRecipient(A256GCMKW, sharedKey) - if err != nil { - t.Fatal("error when adding AES recipient: ", err) - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.Encrypt(input) - if err != nil { - t.Fatal("error in encrypt: ", err) - } - - msg := obj.FullSerialize() - - parsed, err := ParseEncrypted(msg) - if err != nil { - t.Fatal("error in parse: ", err) - } - - i, _, output, err := parsed.DecryptMulti(rsaTestKey) - if err != nil { - t.Fatal("error on decrypt with RSA: ", err) - } - - if i != 0 { - t.Fatal("recipient index should be 0 for RSA key") - } - - if bytes.Compare(input, output) != 0 { - t.Fatal("Decrypted output does not match input: ", output, input) - } - - i, _, output, err = parsed.DecryptMulti(sharedKey) - if err != nil { - t.Fatal("error on decrypt with AES: ", err) - } - - if i != 1 { - t.Fatal("recipient index should be 1 for shared key") - } - - if bytes.Compare(input, output) != 0 { - t.Fatal("Decrypted output does not match input", output, input) - } -} - -func TestMultiRecipientErrors(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - input := []byte("Lorem ipsum dolor sit amet") - _, err = enc.Encrypt(input) - if err == nil { - t.Error("should fail when encrypting to zero recipients") - } - - err = enc.AddRecipient(DIRECT, nil) - if err == nil { - t.Error("should reject DIRECT mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(ECDH_ES, nil) - if err == nil { - t.Error("should reject ECDH_ES mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(RSA1_5, nil) - if err == nil { - t.Error("should reject invalid recipient key") - } -} - -type testKey struct { - enc, dec interface{} -} - -func symmetricTestKey(size int) []testKey { - key, _, _ := randomKeyGenerator{size: size}.genKey() - - return []testKey{ - testKey{ - enc: key, - dec: key, - }, - testKey{ - enc: &JsonWebKey{KeyID: "test", Key: key}, - dec: &JsonWebKey{KeyID: "test", Key: key}, - }, - } -} - -func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey { - switch keyAlg { - case DIRECT: - return symmetricTestKey(getContentCipher(encAlg).keySize()) - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return []testKey{ - testKey{ - dec: ecTestKey256, - enc: &ecTestKey256.PublicKey, - }, - testKey{ - dec: ecTestKey384, - enc: &ecTestKey384.PublicKey, - }, - testKey{ - dec: ecTestKey521, - enc: &ecTestKey521.PublicKey, - }, - testKey{ - dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256}, - enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey}, - }, - } - case A128GCMKW, A128KW: - return symmetricTestKey(16) - case A192GCMKW, A192KW: - return symmetricTestKey(24) - case A256GCMKW, A256KW: - return symmetricTestKey(32) - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return []testKey{testKey{ - dec: rsaTestKey, - enc: &rsaTestKey.PublicKey, - }} - } - - panic("Must update test case") -} - -func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) { - serializer := func(obj *JsonWebEncryption) (string, error) { - return obj.CompactSerialize() - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv) - if err != nil { - b.Error(err) - } - } -} - -var ( - chunks = map[string][]byte{ - "1B": make([]byte, 1), - "64B": make([]byte, 64), - "1KB": make([]byte, 1024), - "64KB": make([]byte, 65536), - "1MB": make([]byte, 1048576), - "64MB": make([]byte, 67108864), - } - - symKey, _, _ = randomKeyGenerator{size: 32}.genKey() - - encrypters = map[string]Encrypter{ - "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey), - "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey), - "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey), - "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey), - "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey), - "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey), - "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey), - "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey), - "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey), - "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey), - "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey), - "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey), - "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey), - } -) - -func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) } -func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) } -func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) } -func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) } -func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) } -func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) } -func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) } -func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) } -func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) } -func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) } -func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) } -func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) } -func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) } -func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) } - -func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) } -func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) } -func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) } -func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) } -func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) } -func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) } - -func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) } -func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) } -func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) } -func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) } -func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) } -func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) } - -func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) } -func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) } -func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) } -func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) } -func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) } -func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) } - -func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchEncrypt(chunkKey, primKey string, b *testing.B) { - data, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - enc.Encrypt(data) - } -} - -var ( - decryptionKeys = map[string]interface{}{ - "OAEPAndGCM": rsaTestKey, - "PKCSAndGCM": rsaTestKey, - "OAEPAndCBC": rsaTestKey, - "PKCSAndCBC": rsaTestKey, - - "DirectGCM128": symKey, - "DirectCBC128": symKey, - "DirectGCM256": symKey, - "DirectCBC256": symKey, - - "AESKWAndGCM128": symKey, - "AESKWAndCBC256": symKey, - - "ECDHOnP256AndGCM128": ecTestKey256, - "ECDHOnP384AndGCM128": ecTestKey384, - "ECDHOnP521AndGCM128": ecTestKey521, - } -) - -func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) } -func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) } -func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) } -func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) } -func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) } -func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) } -func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) } -func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) } -func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) } -func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) } -func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) } -func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) } -func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) } -func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) } - -func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) } -func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) } -func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) } -func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) } -func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) } -func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) } - -func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) } -func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) } -func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) } -func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) } -func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) } -func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) } - -func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) } -func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) } -func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) } -func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) } -func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) } -func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) } - -func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchDecrypt(chunkKey, primKey string, b *testing.B) { - chunk, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - dec, ok := decryptionKeys[primKey] - if !ok { - b.Fatalf("unknown decryption key %s", primKey) - } - - data, err := enc.Encrypt(chunk) - if err != nil { - b.Fatal(err) - } - - b.SetBytes(int64(len(chunk))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - data.Decrypt(dec) - } -} - -func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - panic(err) - } - return enc -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go deleted file mode 100644 index b4cd1e989..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on -encryption and signing based on the JSON Web Encryption and JSON Web Signature -standards. The library supports both the compact and full serialization -formats, and has optional support for multiple recipients. - -*/ -package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go deleted file mode 100644 index 50468295d..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -// Dummy encrypter for use in examples -var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{}) - -func Example_jWE() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would - // indicate that the selected algorithm(s) are not currently supported. - publicKey := &privateKey.PublicKey - encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) - if err != nil { - panic(err) - } - - // Encrypt a sample plaintext. Calling the encrypter returns an encrypted - // JWE object, which can then be serialized for output afterwards. An error - // would indicate a problem in an underlying cryptographic primitive. - var plaintext = []byte("Lorem ipsum dolor sit amet") - object, err := encrypter.Encrypt(plaintext) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, encrypted JWE object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseEncrypted(serialized) - if err != nil { - panic(err) - } - - // Now we can decrypt and get back our original plaintext. An error here - // would indicate the the message failed to decrypt, e.g. because the auth - // tag was broken or the message was tampered with. - decrypted, err := object.Decrypt(privateKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(decrypted)) - // output: Lorem ipsum dolor sit amet -} - -func Example_jWS() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. - signer, err := NewSigner(PS512, privateKey) - if err != nil { - panic(err) - } - - // Sign a sample payload. Calling the signer returns a protected JWS object, - // which can then be serialized for output afterwards. An error would - // indicate a problem in an underlying cryptographic primitive. - var payload = []byte("Lorem ipsum dolor sit amet") - object, err := signer.Sign(payload) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, protected JWS object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseSigned(serialized) - if err != nil { - panic(err) - } - - // Now we can verify the signature on the payload. An error here would - // indicate the the message failed to verify, e.g. because the signature was - // broken or the message was tampered with. - output, err := object.Verify(&privateKey.PublicKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(output)) - // output: Lorem ipsum dolor sit amet -} - -func ExampleNewEncrypter_publicKey() { - var publicKey *rsa.PublicKey - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. - NewEncrypter(RSA_OAEP, A128GCM, publicKey) - - // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC. - NewEncrypter(RSA1_5, A128CBC_HS256, publicKey) -} - -func ExampleNewEncrypter_symmetric() { - var sharedKey []byte - - // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap. - NewEncrypter(A128GCMKW, A128GCM, sharedKey) - - // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping. - NewEncrypter(DIRECT, A256GCM, sharedKey) -} - -func ExampleNewSigner_publicKey() { - var rsaPrivateKey *rsa.PrivateKey - var ecdsaPrivateKey *ecdsa.PrivateKey - - // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256. - NewSigner(RS256, rsaPrivateKey) - - // Instantiate a signer using ECDSA with SHA-384. - NewSigner(ES384, ecdsaPrivateKey) -} - -func ExampleNewSigner_symmetric() { - var sharedKey []byte - - // Instantiate an signer using HMAC-SHA256. - NewSigner(HS256, sharedKey) - - // Instantiate an signer using HMAC-SHA512. - NewSigner(HS512, sharedKey) -} - -func ExampleNewMultiEncrypter() { - var publicKey *rsa.PublicKey - var sharedKey []byte - - // Instantiate an encrypter using AES-GCM. - encrypter, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - // Add a recipient using a shared key with AES-GCM key wap - err = encrypter.AddRecipient(A128GCMKW, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA public key with RSA-OAEP - err = encrypter.AddRecipient(RSA_OAEP, publicKey) - if err != nil { - panic(err) - } -} - -func ExampleNewMultiSigner() { - var privateKey *rsa.PrivateKey - var sharedKey []byte - - // Instantiate a signer for multiple recipients. - signer := NewMultiSigner() - - // Add a recipient using a shared key with HMAC-SHA256 - err := signer.AddRecipient(HS256, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA private key with RSASSA-PSS with SHA384 - err = signer.AddRecipient(PS384, privateKey) - if err != nil { - panic(err) - } -} - -func ExampleEncrypter_encrypt() { - // Encrypt a plaintext in order to get an encrypted JWE object. - var plaintext = []byte("This is a secret message") - - encrypter.Encrypt(plaintext) -} - -func ExampleEncrypter_encryptWithAuthData() { - // Encrypt a plaintext in order to get an encrypted JWE object. Also attach - // some additional authenticated data (AAD) to the object. Note that objects - // with attached AAD can only be represented using full serialization. - var plaintext = []byte("This is a secret message") - var aad = []byte("This is authenticated, but public data") - - encrypter.EncryptWithAuthData(plaintext, aad) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go deleted file mode 100644 index dde0a42db..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go +++ /dev/null @@ -1,193 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "regexp" - "strings" - - "gopkg.in/square/go-jose.v1/json" -) - -var stripWhitespaceRegex = regexp.MustCompile("\\s") - -// Url-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// Url-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - return stripWhitespaceRegex.ReplaceAllString(data, "") -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64URLDecode(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64URLEncode(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go deleted file mode 100644 index e2f8d979c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go +++ /dev/null @@ -1,173 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "strings" - "testing" -) - -func TestBase64URLEncode(t *testing.T) { - // Test arrays with various sizes - if base64URLEncode([]byte{}) != "" { - t.Error("failed to encode empty array") - } - - if base64URLEncode([]byte{0}) != "AA" { - t.Error("failed to encode [0x00]") - } - - if base64URLEncode([]byte{0, 1}) != "AAE" { - t.Error("failed to encode [0x00, 0x01]") - } - - if base64URLEncode([]byte{0, 1, 2}) != "AAEC" { - t.Error("failed to encode [0x00, 0x01, 0x02]") - } - - if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" { - t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestBase64URLDecode(t *testing.T) { - // Test arrays with various sizes - val, err := base64URLDecode("") - if err != nil || !bytes.Equal(val, []byte{}) { - t.Error("failed to decode empty array") - } - - val, err = base64URLDecode("AA") - if err != nil || !bytes.Equal(val, []byte{0}) { - t.Error("failed to decode [0x00]") - } - - val, err = base64URLDecode("AAE") - if err != nil || !bytes.Equal(val, []byte{0, 1}) { - t.Error("failed to decode [0x00, 0x01]") - } - - val, err = base64URLDecode("AAEC") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) { - t.Error("failed to decode [0x00, 0x01, 0x02]") - } - - val, err = base64URLDecode("AAECAw") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) { - t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestDeflateRoundtrip(t *testing.T) { - original := []byte("Lorem ipsum dolor sit amet") - - compressed, err := deflate(original) - if err != nil { - panic(err) - } - - output, err := inflate(compressed) - if err != nil { - panic(err) - } - - if bytes.Compare(output, original) != 0 { - t.Error("Input and output do not match") - } -} - -func TestInvalidCompression(t *testing.T) { - _, err := compress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress(DEFLATE, []byte{1, 2, 3, 4}) - if err == nil { - t.Error("should not accept invalid data") - } -} - -func TestByteBufferTrim(t *testing.T) { - buf := newBufferFromInt(1) - if !bytes.Equal(buf.data, []byte{1}) { - t.Error("Byte buffer for integer '1' should contain [0x01]") - } - - buf = newBufferFromInt(65537) - if !bytes.Equal(buf.data, []byte{1, 0, 1}) { - t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]") - } -} - -func TestFixedSizeBuffer(t *testing.T) { - data0 := []byte{} - data1 := []byte{1} - data2 := []byte{1, 2} - data3 := []byte{1, 2, 3} - data4 := []byte{1, 2, 3, 4} - - buf0 := newFixedSizeBuffer(data0, 4) - buf1 := newFixedSizeBuffer(data1, 4) - buf2 := newFixedSizeBuffer(data2, 4) - buf3 := newFixedSizeBuffer(data3, 4) - buf4 := newFixedSizeBuffer(data4, 4) - - if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) { - t.Error("Invalid padded buffer for buf0") - } - if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) { - t.Error("Invalid padded buffer for buf1") - } - if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) { - t.Error("Invalid padded buffer for buf2") - } - if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) { - t.Error("Invalid padded buffer for buf3") - } - if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) { - t.Error("Invalid padded buffer for buf4") - } -} - -func TestSerializeJSONRejectsNil(t *testing.T) { - defer func() { - r := recover() - if r == nil || !strings.Contains(r.(string), "nil pointer") { - t.Error("serialize function should not accept nil pointer") - } - }() - - mustSerializeJSON(nil) -} - -func TestFixedSizeBufferTooLarge(t *testing.T) { - defer func() { - r := recover() - if r == nil { - t.Error("should not be able to create fixed size buffer with oversized data") - } - }() - - newFixedSizeBuffer(make([]byte, 2), 1) -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md deleted file mode 100644 index 86de5e558..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go deleted file mode 100644 index ed89d1156..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Large data benchmark. -// The JSON data is a summary of agl's changes in the -// go, webkit, and chromium open source projects. -// We benchmark converting between the JSON form -// and in-memory data structures. - -package json - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "os" - "strings" - "testing" -) - -type codeResponse struct { - Tree *codeNode `json:"tree"` - Username string `json:"username"` -} - -type codeNode struct { - Name string `json:"name"` - Kids []*codeNode `json:"kids"` - CLWeight float64 `json:"cl_weight"` - Touches int `json:"touches"` - MinT int64 `json:"min_t"` - MaxT int64 `json:"max_t"` - MeanT int64 `json:"mean_t"` -} - -var codeJSON []byte -var codeStruct codeResponse - -func codeInit() { - f, err := os.Open("testdata/code.json.gz") - if err != nil { - panic(err) - } - defer f.Close() - gz, err := gzip.NewReader(f) - if err != nil { - panic(err) - } - data, err := ioutil.ReadAll(gz) - if err != nil { - panic(err) - } - - codeJSON = data - - if err := Unmarshal(codeJSON, &codeStruct); err != nil { - panic("unmarshal code.json: " + err.Error()) - } - - if data, err = Marshal(&codeStruct); err != nil { - panic("marshal code.json: " + err.Error()) - } - - if !bytes.Equal(data, codeJSON) { - println("different lengths", len(data), len(codeJSON)) - for i := 0; i < len(data) && i < len(codeJSON); i++ { - if data[i] != codeJSON[i] { - println("re-marshal: changed at byte", i) - println("orig: ", string(codeJSON[i-10:i+10])) - println("new: ", string(data[i-10:i+10])) - break - } - } - panic("re-marshal code.json: different result") - } -} - -func BenchmarkCodeEncoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - enc := NewEncoder(ioutil.Discard) - for i := 0; i < b.N; i++ { - if err := enc.Encode(&codeStruct); err != nil { - b.Fatal("Encode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeMarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - if _, err := Marshal(&codeStruct); err != nil { - b.Fatal("Marshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeDecoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var buf bytes.Buffer - dec := NewDecoder(&buf) - var r codeResponse - for i := 0; i < b.N; i++ { - buf.Write(codeJSON) - // hide EOF - buf.WriteByte('\n') - buf.WriteByte('\n') - buf.WriteByte('\n') - if err := dec.Decode(&r); err != nil { - b.Fatal("Decode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkDecoderStream(b *testing.B) { - b.StopTimer() - var buf bytes.Buffer - dec := NewDecoder(&buf) - buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") - var x interface{} - if err := dec.Decode(&x); err != nil { - b.Fatal("Decode:", err) - } - ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" - b.StartTimer() - for i := 0; i < b.N; i++ { - if i%300000 == 0 { - buf.WriteString(ones) - } - x = nil - if err := dec.Decode(&x); err != nil || x != 1.0 { - b.Fatalf("Decode: %v after %d", err, i) - } - } -} - -func BenchmarkCodeUnmarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - var r codeResponse - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeUnmarshalReuse(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var r codeResponse - for i := 0; i < b.N; i++ { - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } -} - -func BenchmarkUnmarshalString(b *testing.B) { - data := []byte(`"hello, world"`) - var s string - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &s); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalFloat64(b *testing.B) { - var f float64 - data := []byte(`3.14`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &f); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalInt64(b *testing.B) { - var x int64 - data := []byte(`3`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &x); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkIssue10335(b *testing.B) { - b.ReportAllocs() - var s struct{} - j := []byte(`{"a":{ }}`) - for n := 0; n < b.N; n++ { - if err := Unmarshal(j, &s); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go deleted file mode 100644 index 37457e5a8..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go deleted file mode 100644 index 32394654e..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "encoding" - "fmt" - "image" - "net" - "reflect" - "strings" - "testing" - "time" -) - -type T struct { - X string - Y int - Z int `json:"-"` -} - -type U struct { - Alphabet string `json:"alpha"` -} - -type V struct { - F1 interface{} - F2 int32 - F3 Number -} - -// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and -// without UseNumber -var ifaceNumAsFloat64 = map[string]interface{}{ - "k1": float64(1), - "k2": "s", - "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, - "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, -} - -var ifaceNumAsNumber = map[string]interface{}{ - "k1": Number("1"), - "k2": "s", - "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, - "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, -} - -type tx struct { - x int -} - -// A type that can unmarshal itself. - -type unmarshaler struct { - T bool -} - -func (u *unmarshaler) UnmarshalJSON(b []byte) error { - *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. - return nil -} - -type ustruct struct { - M unmarshaler -} - -type unmarshalerText struct { - T bool -} - -// needed for re-marshaling tests -func (u *unmarshalerText) MarshalText() ([]byte, error) { - return []byte(""), nil -} - -func (u *unmarshalerText) UnmarshalText(b []byte) error { - *u = unmarshalerText{true} // All we need to see that UnmarshalText is called. - return nil -} - -var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) - -type ustructText struct { - M unmarshalerText -} - -var ( - um0, um1 unmarshaler // target2 of unmarshaling - ump = &um1 - umtrue = unmarshaler{true} - umslice = []unmarshaler{{true}} - umslicep = new([]unmarshaler) - umstruct = ustruct{unmarshaler{true}} - - um0T, um1T unmarshalerText // target2 of unmarshaling - umpT = &um1T - umtrueT = unmarshalerText{true} - umsliceT = []unmarshalerText{{true}} - umslicepT = new([]unmarshalerText) - umstructT = ustructText{unmarshalerText{true}} -) - -// Test data structures for anonymous fields. - -type Point struct { - Z int -} - -type Top struct { - Level0 int - Embed0 - *Embed0a - *Embed0b `json:"e,omitempty"` // treated as named - Embed0c `json:"-"` // ignored - Loop - Embed0p // has Point with X, Y, used - Embed0q // has Point with Z, used - embed // contains exported field -} - -type Embed0 struct { - Level1a int // overridden by Embed0a's Level1a with json tag - Level1b int // used because Embed0a's Level1b is renamed - Level1c int // used because Embed0a's Level1c is ignored - Level1d int // annihilated by Embed0a's Level1d - Level1e int `json:"x"` // annihilated by Embed0a.Level1e -} - -type Embed0a struct { - Level1a int `json:"Level1a,omitempty"` - Level1b int `json:"LEVEL1B,omitempty"` - Level1c int `json:"-"` - Level1d int // annihilated by Embed0's Level1d - Level1f int `json:"x"` // annihilated by Embed0's Level1e -} - -type Embed0b Embed0 - -type Embed0c Embed0 - -type Embed0p struct { - image.Point -} - -type Embed0q struct { - Point -} - -type embed struct { - Q int -} - -type Loop struct { - Loop1 int `json:",omitempty"` - Loop2 int `json:",omitempty"` - *Loop -} - -// From reflect test: -// The X in S6 and S7 annihilate, but they also block the X in S8.S9. -type S5 struct { - S6 - S7 - S8 -} - -type S6 struct { - X int -} - -type S7 S6 - -type S8 struct { - S9 -} - -type S9 struct { - X int - Y int -} - -// From reflect test: -// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. -type S10 struct { - S11 - S12 - S13 -} - -type S11 struct { - S6 -} - -type S12 struct { - S6 -} - -type S13 struct { - S8 -} - -type unmarshalTest struct { - in string - ptr interface{} - out interface{} - err error - useNumber bool -} - -type XYZ struct { - X interface{} - Y interface{} - Z interface{} -} - -func sliceAddr(x []int) *[]int { return &x } -func mapAddr(x map[string]int) *map[string]int { return &x } - -var unmarshalTests = []unmarshalTest{ - // basic types - {in: `true`, ptr: new(bool), out: true}, - {in: `1`, ptr: new(int), out: 1}, - {in: `1.2`, ptr: new(float64), out: 1.2}, - {in: `-5`, ptr: new(int16), out: int16(-5)}, - {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, - {in: `2`, ptr: new(Number), out: Number("2")}, - {in: `2`, ptr: new(interface{}), out: float64(2.0)}, - {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, - {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, - {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, - {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, - {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, - {in: "null", ptr: new(interface{}), out: nil}, - {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, - {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, - - // raw values with whitespace - {in: "\n true ", ptr: new(bool), out: true}, - {in: "\t 1 ", ptr: new(int), out: 1}, - {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, - {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, - {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, - - // Z has a "-" tag. - {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, - - {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, - - // syntax errors - {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, - {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, - {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, - - // raw value errors - {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, - {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, - {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, - {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, - - // array tests - {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, - {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, - {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, - - // empty array to interface test - {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, - {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, - {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, - {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, - - // composite tests - {in: allValueIndent, ptr: new(All), out: allValue}, - {in: allValueCompact, ptr: new(All), out: allValue}, - {in: allValueIndent, ptr: new(*All), out: &allValue}, - {in: allValueCompact, ptr: new(*All), out: &allValue}, - {in: pallValueIndent, ptr: new(All), out: pallValue}, - {in: pallValueCompact, ptr: new(All), out: pallValue}, - {in: pallValueIndent, ptr: new(*All), out: &pallValue}, - {in: pallValueCompact, ptr: new(*All), out: &pallValue}, - - // unmarshal interface test - {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called - {in: `{"T":false}`, ptr: &ump, out: &umtrue}, - {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, - {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, - {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct}, - - // UnmarshalText interface test - {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called - {in: `"X"`, ptr: &umpT, out: &umtrueT}, - {in: `["X"]`, ptr: &umsliceT, out: umsliceT}, - {in: `["X"]`, ptr: &umslicepT, out: &umsliceT}, - {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT}, - - // Overwriting of data. - // This is different from package xml, but it's what we've always done. - // Now documented and tested. - {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, - {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, - - { - in: `{ - "Level0": 1, - "Level1b": 2, - "Level1c": 3, - "x": 4, - "Level1a": 5, - "LEVEL1B": 6, - "e": { - "Level1a": 8, - "Level1b": 9, - "Level1c": 10, - "Level1d": 11, - "x": 12 - }, - "Loop1": 13, - "Loop2": 14, - "X": 15, - "Y": 16, - "Z": 17, - "Q": 18 - }`, - ptr: new(Top), - out: Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - }, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S5), - out: S5{S8: S8{S9: S9{Y: 2}}}, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S10), - out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, - }, - - // invalid UTF-8 is coerced to valid UTF-8. - { - in: "\"hello\xffworld\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\xc2\xc2world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xc2\xffworld\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800world\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", - ptr: new(string), - out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", - }, - - // issue 8305 - { - in: `{"2009-11-10T23:00:00Z": "hello world"}`, - ptr: &map[time.Time]string{}, - err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1}, - }, -} - -func TestMarshal(t *testing.T) { - b, err := Marshal(allValue) - if err != nil { - t.Fatalf("Marshal allValue: %v", err) - } - if string(b) != allValueCompact { - t.Errorf("Marshal allValueCompact") - diff(t, b, []byte(allValueCompact)) - return - } - - b, err = Marshal(pallValue) - if err != nil { - t.Fatalf("Marshal pallValue: %v", err) - } - if string(b) != pallValueCompact { - t.Errorf("Marshal pallValueCompact") - diff(t, b, []byte(pallValueCompact)) - return - } -} - -var badUTF8 = []struct { - in, out string -}{ - {"hello\xffworld", `"hello\ufffdworld"`}, - {"", `""`}, - {"\xff", `"\ufffd"`}, - {"\xff\xff", `"\ufffd\ufffd"`}, - {"a\xffb", `"a\ufffdb"`}, - {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, -} - -func TestMarshalBadUTF8(t *testing.T) { - for _, tt := range badUTF8 { - b, err := Marshal(tt.in) - if string(b) != tt.out || err != nil { - t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) - } - } -} - -func TestMarshalNumberZeroVal(t *testing.T) { - var n Number - out, err := Marshal(n) - if err != nil { - t.Fatal(err) - } - outStr := string(out) - if outStr != "0" { - t.Fatalf("Invalid zero val for Number: %q", outStr) - } -} - -func TestMarshalEmbeds(t *testing.T) { - top := &Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - } - b, err := Marshal(top) - if err != nil { - t.Fatal(err) - } - want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" - if string(b) != want { - t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) - } -} - -func TestUnmarshal(t *testing.T) { - for i, tt := range unmarshalTests { - var scan scanner - in := []byte(tt.in) - if err := checkValid(in, &scan); err != nil { - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: checkValid: %#v", i, err) - continue - } - } - if tt.ptr == nil { - continue - } - - // v = new(right-type) - v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec := NewDecoder(bytes.NewReader(in)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: %v, want %v", i, err, tt.err) - continue - } else if err != nil { - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) - data, _ := Marshal(v.Elem().Interface()) - println(string(data)) - data, _ = Marshal(tt.out) - println(string(data)) - continue - } - - // Check round trip. - if tt.err == nil { - enc, err := Marshal(v.Interface()) - if err != nil { - t.Errorf("#%d: error re-marshaling: %v", i, err) - continue - } - vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec = NewDecoder(bytes.NewReader(enc)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(vv.Interface()); err != nil { - t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) - t.Errorf(" In: %q", strings.Map(noSpace, string(in))) - t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) - continue - } - } - } -} - -func TestUnmarshalMarshal(t *testing.T) { - initBig() - var v interface{} - if err := Unmarshal(jsonBig, &v); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - b, err := Marshal(v) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(jsonBig, b) { - t.Errorf("Marshal jsonBig") - diff(t, b, jsonBig) - return - } -} - -var numberTests = []struct { - in string - i int64 - intErr string - f float64 - floatErr string -}{ - {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, - {in: "-12", i: -12, f: -12.0}, - {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, -} - -// Independent of Decode, basic coverage of the accessors in Number -func TestNumberAccessors(t *testing.T) { - for _, tt := range numberTests { - n := Number(tt.in) - if s := n.String(); s != tt.in { - t.Errorf("Number(%q).String() is %q", tt.in, s) - } - if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { - t.Errorf("Number(%q).Int64() is %d", tt.in, i) - } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { - t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) - } - if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { - t.Errorf("Number(%q).Float64() is %g", tt.in, f) - } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { - t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) - } - } -} - -func TestLargeByteSlice(t *testing.T) { - s0 := make([]byte, 2000) - for i := range s0 { - s0[i] = byte(i) - } - b, err := Marshal(s0) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - var s1 []byte - if err := Unmarshal(b, &s1); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !bytes.Equal(s0, s1) { - t.Errorf("Marshal large byte slice") - diff(t, s0, s1) - } -} - -type Xint struct { - X int -} - -func TestUnmarshalInterface(t *testing.T) { - var xint Xint - var i interface{} = &xint - if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestUnmarshalPtrPtr(t *testing.T) { - var xint Xint - pxint := &xint - if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestEscape(t *testing.T) { - const input = `"foobar"` + " [\u2028 \u2029]" - const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` - b, err := Marshal(input) - if err != nil { - t.Fatalf("Marshal error: %v", err) - } - if s := string(b); s != expected { - t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) - } -} - -// WrongString is a struct that's misusing the ,string modifier. -type WrongString struct { - Message string `json:"result,string"` -} - -type wrongStringTest struct { - in, err string -} - -var wrongStringTests = []wrongStringTest{ - {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, - {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, - {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, - {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, -} - -// If people misuse the ,string modifier, the error message should be -// helpful, telling the user that they're doing it wrong. -func TestErrorMessageFromMisusedString(t *testing.T) { - for n, tt := range wrongStringTests { - r := strings.NewReader(tt.in) - var s WrongString - err := NewDecoder(r).Decode(&s) - got := fmt.Sprintf("%v", err) - if got != tt.err { - t.Errorf("%d. got err = %q, want %q", n, got, tt.err) - } - } -} - -func noSpace(c rune) rune { - if isSpace(byte(c)) { //only used for ascii - return -1 - } - return c -} - -type All struct { - Bool bool - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - Uintptr uintptr - Float32 float32 - Float64 float64 - - Foo string `json:"bar"` - Foo2 string `json:"bar2,dummyopt"` - - IntStr int64 `json:",string"` - - PBool *bool - PInt *int - PInt8 *int8 - PInt16 *int16 - PInt32 *int32 - PInt64 *int64 - PUint *uint - PUint8 *uint8 - PUint16 *uint16 - PUint32 *uint32 - PUint64 *uint64 - PUintptr *uintptr - PFloat32 *float32 - PFloat64 *float64 - - String string - PString *string - - Map map[string]Small - MapP map[string]*Small - PMap *map[string]Small - PMapP *map[string]*Small - - EmptyMap map[string]Small - NilMap map[string]Small - - Slice []Small - SliceP []*Small - PSlice *[]Small - PSliceP *[]*Small - - EmptySlice []Small - NilSlice []Small - - StringSlice []string - ByteSlice []byte - - Small Small - PSmall *Small - PPSmall **Small - - Interface interface{} - PInterface *interface{} - - unexported int -} - -type Small struct { - Tag string -} - -var allValue = All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Uintptr: 12, - Float32: 14.1, - Float64: 15.1, - Foo: "foo", - Foo2: "foo2", - IntStr: 42, - String: "16", - Map: map[string]Small{ - "17": {Tag: "tag17"}, - "18": {Tag: "tag18"}, - }, - MapP: map[string]*Small{ - "19": {Tag: "tag19"}, - "20": nil, - }, - EmptyMap: map[string]Small{}, - Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, - SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, - EmptySlice: []Small{}, - StringSlice: []string{"str24", "str25", "str26"}, - ByteSlice: []byte{27, 28, 29}, - Small: Small{Tag: "tag30"}, - PSmall: &Small{Tag: "tag31"}, - Interface: 5.2, -} - -var pallValue = All{ - PBool: &allValue.Bool, - PInt: &allValue.Int, - PInt8: &allValue.Int8, - PInt16: &allValue.Int16, - PInt32: &allValue.Int32, - PInt64: &allValue.Int64, - PUint: &allValue.Uint, - PUint8: &allValue.Uint8, - PUint16: &allValue.Uint16, - PUint32: &allValue.Uint32, - PUint64: &allValue.Uint64, - PUintptr: &allValue.Uintptr, - PFloat32: &allValue.Float32, - PFloat64: &allValue.Float64, - PString: &allValue.String, - PMap: &allValue.Map, - PMapP: &allValue.MapP, - PSlice: &allValue.Slice, - PSliceP: &allValue.SliceP, - PPSmall: &allValue.PSmall, - PInterface: &allValue.Interface, -} - -var allValueIndent = `{ - "Bool": true, - "Int": 2, - "Int8": 3, - "Int16": 4, - "Int32": 5, - "Int64": 6, - "Uint": 7, - "Uint8": 8, - "Uint16": 9, - "Uint32": 10, - "Uint64": 11, - "Uintptr": 12, - "Float32": 14.1, - "Float64": 15.1, - "bar": "foo", - "bar2": "foo2", - "IntStr": "42", - "PBool": null, - "PInt": null, - "PInt8": null, - "PInt16": null, - "PInt32": null, - "PInt64": null, - "PUint": null, - "PUint8": null, - "PUint16": null, - "PUint32": null, - "PUint64": null, - "PUintptr": null, - "PFloat32": null, - "PFloat64": null, - "String": "16", - "PString": null, - "Map": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "MapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "PMap": null, - "PMapP": null, - "EmptyMap": {}, - "NilMap": null, - "Slice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "SliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "PSlice": null, - "PSliceP": null, - "EmptySlice": [], - "NilSlice": null, - "StringSlice": [ - "str24", - "str25", - "str26" - ], - "ByteSlice": "Gxwd", - "Small": { - "Tag": "tag30" - }, - "PSmall": { - "Tag": "tag31" - }, - "PPSmall": null, - "Interface": 5.2, - "PInterface": null -}` - -var allValueCompact = strings.Map(noSpace, allValueIndent) - -var pallValueIndent = `{ - "Bool": false, - "Int": 0, - "Int8": 0, - "Int16": 0, - "Int32": 0, - "Int64": 0, - "Uint": 0, - "Uint8": 0, - "Uint16": 0, - "Uint32": 0, - "Uint64": 0, - "Uintptr": 0, - "Float32": 0, - "Float64": 0, - "bar": "", - "bar2": "", - "IntStr": "0", - "PBool": true, - "PInt": 2, - "PInt8": 3, - "PInt16": 4, - "PInt32": 5, - "PInt64": 6, - "PUint": 7, - "PUint8": 8, - "PUint16": 9, - "PUint32": 10, - "PUint64": 11, - "PUintptr": 12, - "PFloat32": 14.1, - "PFloat64": 15.1, - "String": "", - "PString": "16", - "Map": null, - "MapP": null, - "PMap": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "PMapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "EmptyMap": null, - "NilMap": null, - "Slice": null, - "SliceP": null, - "PSlice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "PSliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "EmptySlice": null, - "NilSlice": null, - "StringSlice": null, - "ByteSlice": null, - "Small": { - "Tag": "" - }, - "PSmall": null, - "PPSmall": { - "Tag": "tag31" - }, - "Interface": null, - "PInterface": 5.2 -}` - -var pallValueCompact = strings.Map(noSpace, pallValueIndent) - -func TestRefUnmarshal(t *testing.T) { - type S struct { - // Ref is defined in encode_test.go. - R0 Ref - R1 *Ref - R2 RefText - R3 *RefText - } - want := S{ - R0: 12, - R1: new(Ref), - R2: 13, - R3: new(RefText), - } - *want.R1 = 12 - *want.R3 = 13 - - var got S - if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %+v, want %+v", got, want) - } -} - -// Test that the empty string doesn't panic decoding when ,string is specified -// Issue 3450 -func TestEmptyString(t *testing.T) { - type T2 struct { - Number1 int `json:",string"` - Number2 int `json:",string"` - } - data := `{"Number1":"1", "Number2":""}` - dec := NewDecoder(strings.NewReader(data)) - var t2 T2 - err := dec.Decode(&t2) - if err == nil { - t.Fatal("Decode: did not return error") - } - if t2.Number1 != 1 { - t.Fatal("Decode: did not set Number1") - } -} - -// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). -// It should also not be an error (issue 2540, issue 8587). -func TestNullString(t *testing.T) { - type T struct { - A int `json:",string"` - B int `json:",string"` - C *int `json:",string"` - } - data := []byte(`{"A": "1", "B": null, "C": null}`) - var s T - s.B = 1 - s.C = new(int) - *s.C = 2 - err := Unmarshal(data, &s) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if s.B != 1 || s.C != nil { - t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) - } -} - -func intp(x int) *int { - p := new(int) - *p = x - return p -} - -func intpp(x *int) **int { - pp := new(*int) - *pp = x - return pp -} - -var interfaceSetTests = []struct { - pre interface{} - json string - post interface{} -}{ - {"foo", `"bar"`, "bar"}, - {"foo", `2`, 2.0}, - {"foo", `true`, true}, - {"foo", `null`, nil}, - - {nil, `null`, nil}, - {new(int), `null`, nil}, - {(*int)(nil), `null`, nil}, - {new(*int), `null`, new(*int)}, - {(**int)(nil), `null`, nil}, - {intp(1), `null`, nil}, - {intpp(nil), `null`, intpp(nil)}, - {intpp(intp(1)), `null`, intpp(nil)}, -} - -func TestInterfaceSet(t *testing.T) { - for _, tt := range interfaceSetTests { - b := struct{ X interface{} }{tt.pre} - blob := `{"X":` + tt.json + `}` - if err := Unmarshal([]byte(blob), &b); err != nil { - t.Errorf("Unmarshal %#q: %v", blob, err) - continue - } - if !reflect.DeepEqual(b.X, tt.post) { - t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) - } - } -} - -// JSON null values should be ignored for primitives and string values instead of resulting in an error. -// Issue 2540 -func TestUnmarshalNulls(t *testing.T) { - jsonData := []byte(`{ - "Bool" : null, - "Int" : null, - "Int8" : null, - "Int16" : null, - "Int32" : null, - "Int64" : null, - "Uint" : null, - "Uint8" : null, - "Uint16" : null, - "Uint32" : null, - "Uint64" : null, - "Float32" : null, - "Float64" : null, - "String" : null}`) - - nulls := All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Float32: 12.1, - Float64: 13.1, - String: "14"} - - err := Unmarshal(jsonData, &nulls) - if err != nil { - t.Errorf("Unmarshal of null values failed: %v", err) - } - if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || - nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || - nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { - - t.Errorf("Unmarshal of null values affected primitives") - } -} - -func TestStringKind(t *testing.T) { - type stringKind string - - var m1, m2 map[stringKind]int - m1 = map[stringKind]int{ - "foo": 42, - } - - data, err := Marshal(m1) - if err != nil { - t.Errorf("Unexpected error marshaling: %v", err) - } - - err = Unmarshal(data, &m2) - if err != nil { - t.Errorf("Unexpected error unmarshaling: %v", err) - } - - if !reflect.DeepEqual(m1, m2) { - t.Error("Items should be equal after encoding and then decoding") - } -} - -// Custom types with []byte as underlying type could not be marshalled -// and then unmarshalled. -// Issue 8962. -func TestByteKind(t *testing.T) { - type byteKind []byte - - a := byteKind("hello") - - data, err := Marshal(a) - if err != nil { - t.Error(err) - } - var b byteKind - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Errorf("expected %v == %v", a, b) - } -} - -// The fix for issue 8962 introduced a regression. -// Issue 12921. -func TestSliceOfCustomByte(t *testing.T) { - type Uint8 uint8 - - a := []Uint8("hello") - - data, err := Marshal(a) - if err != nil { - t.Fatal(err) - } - var b []Uint8 - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Fatalf("expected %v == %v", a, b) - } -} - -var decodeTypeErrorTests = []struct { - dest interface{} - src string -}{ - {new(string), `{"user": "name"}`}, // issue 4628. - {new(error), `{}`}, // issue 4222 - {new(error), `[]`}, - {new(error), `""`}, - {new(error), `123`}, - {new(error), `true`}, -} - -func TestUnmarshalTypeError(t *testing.T) { - for _, item := range decodeTypeErrorTests { - err := Unmarshal([]byte(item.src), item.dest) - if _, ok := err.(*UnmarshalTypeError); !ok { - t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", - item.src, item.dest, err) - } - } -} - -var unmarshalSyntaxTests = []string{ - "tru", - "fals", - "nul", - "123e", - `"hello`, - `[1,2,3`, - `{"key":1`, - `{"key":1,`, -} - -func TestUnmarshalSyntax(t *testing.T) { - var x interface{} - for _, src := range unmarshalSyntaxTests { - err := Unmarshal([]byte(src), &x) - if _, ok := err.(*SyntaxError); !ok { - t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) - } - } -} - -// Test handling of unexported fields that should be ignored. -// Issue 4660 -type unexportedFields struct { - Name string - m map[string]interface{} `json:"-"` - m2 map[string]interface{} `json:"abcd"` -} - -func TestUnmarshalUnexported(t *testing.T) { - input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` - want := &unexportedFields{Name: "Bob"} - - out := &unexportedFields{} - err := Unmarshal([]byte(input), out) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !reflect.DeepEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -// Time3339 is a time.Time which encodes to and from JSON -// as an RFC 3339 time in UTC. -type Time3339 time.Time - -func (t *Time3339) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) - } - tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) - if err != nil { - return err - } - *t = Time3339(tm) - return nil -} - -func TestUnmarshalJSONLiteralError(t *testing.T) { - var t3 Time3339 - err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) - if err == nil { - t.Fatalf("expected error; got time %v", time.Time(t3)) - } - if !strings.Contains(err.Error(), "range") { - t.Errorf("got err = %v; want out of range error", err) - } -} - -// Test that extra object elements in an array do not result in a -// "data changing underfoot" error. -// Issue 3717 -func TestSkipArrayObjects(t *testing.T) { - json := `[{}]` - var dest [0]interface{} - - err := Unmarshal([]byte(json), &dest) - if err != nil { - t.Errorf("got error %q, want nil", err) - } -} - -// Test semantics of pre-filled struct fields and pre-filled map fields. -// Issue 4900. -func TestPrefilled(t *testing.T) { - ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } - - // Values here change, cannot reuse table across runs. - var prefillTests = []struct { - in string - ptr interface{} - out interface{} - }{ - { - in: `{"X": 1, "Y": 2}`, - ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, - out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, - }, - { - in: `{"X": 1, "Y": 2}`, - ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), - out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), - }, - } - - for _, tt := range prefillTests { - ptrstr := fmt.Sprintf("%v", tt.ptr) - err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here - if err != nil { - t.Errorf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(tt.ptr, tt.out) { - t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) - } - } -} - -var invalidUnmarshalTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, -} - -func TestInvalidUnmarshal(t *testing.T) { - buf := []byte(`{"a":"1"}`) - for _, tt := range invalidUnmarshalTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -var invalidUnmarshalTextTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, - {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, -} - -func TestInvalidUnmarshalText(t *testing.T) { - buf := []byte(`123`) - for _, tt := range invalidUnmarshalTextTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -// Test that string option is ignored for invalid types. -// Issue 9812. -func TestInvalidStringOption(t *testing.T) { - num := 0 - item := struct { - T time.Time `json:",string"` - M map[string]string `json:",string"` - S []string `json:",string"` - A [1]string `json:",string"` - I interface{} `json:",string"` - P *int `json:",string"` - }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} - - data, err := Marshal(item) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - err = Unmarshal(data, &item) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } -} diff --git a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go deleted file mode 100644 index 1dae8bb7c..000000000 --- a/vendor/github.com/rsc/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML - - - diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt deleted file mode 100644 index b396900b8..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt +++ /dev/null @@ -1,286 +0,0 @@ -/* - {4} ({1} and []{1}) - -------------------------------------------------- -*/ - -// {4} gets the value as a {1}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) {4}(optionalDefault ...{1}) {1} { - if s, ok := v.data.({1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return {3} -} - -// Must{4} gets the value as a {1}. -// -// Panics if the object is not a {1}. -func (v *Value) Must{4}() {1} { - return v.data.({1}) -} - -// {4}Slice gets the value as a []{1}, returns the optionalDefault -// value or nil if the value is not a []{1}. -func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { - if s, ok := v.data.([]{1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// Must{4}Slice gets the value as a []{1}. -// -// Panics if the object is not a []{1}. -func (v *Value) Must{4}Slice() []{1} { - return v.data.([]{1}) -} - -// Is{4} gets whether the object contained is a {1} or not. -func (v *Value) Is{4}() bool { - _, ok := v.data.({1}) - return ok -} - -// Is{4}Slice gets whether the object contained is a []{1} or not. -func (v *Value) Is{4}Slice() bool { - _, ok := v.data.([]{1}) - return ok -} - -// Each{4} calls the specified callback for each object -// in the []{1}. -// -// Panics if the object is the wrong type. -func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { - - for index, val := range v.Must{4}Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// Where{4} uses the specified decider function to select items -// from the []{1}. The object contained in the result will contain -// only the selected items. -func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { - - var selected []{1} - - v.Each{4}(func(index int, val {1}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data:selected} - -} - -// Group{4} uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]{1}. -func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { - - groups := make(map[string][]{1}) - - v.Each{4}(func(index int, val {1}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]{1}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data:groups} - -} - -// Replace{4} uses the specified function to replace each {1}s -// by iterating each item. The data in the returned result will be a -// []{1} containing the replaced items. -func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { - - arr := v.Must{4}Slice() - replaced := make([]{1}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data:replaced} - -} - -// Collect{4} uses the specified collector function to collect a value -// for each of the {1}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { - - arr := v.Must{4}Slice() - collected := make([]interface{}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data:collected} -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func Test{4}(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}()) - assert.Equal(t, val, New(m).Get("value").Must{4}()) - assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) - assert.Equal(t, val, New(m).Get("nothing").{4}({2})) - - assert.Panics(t, func() { - New(m).Get("age").Must{4}() - }) - -} - -func Test{4}Slice(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) - assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) - assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) - assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").Must{4}Slice() - }) - -} - -func TestIs{4}(t *testing.T) { - - var v *Value - - v = &Value{data: {1}({2})} - assert.True(t, v.Is{4}()) - - v = &Value{data: []{1}{ {1}({2}) }} - assert.True(t, v.Is{4}Slice()) - -} - -func TestEach{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - count := 0 - replacedVals := make([]{1}, 0) - assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) - assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) - assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) - -} - -func TestWhere{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - selected := v.Where{4}(func(i int, val {1}) bool { - return i%2==0 - }).Must{4}Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroup{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - grouped := v.Group{4}(func(i int, val {1}) string { - return fmt.Sprintf("%v", i%2==0) - }).data.(map[string][]{1}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplace{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - rawArr := v.Must{4}Slice() - - replaced := v.Replace{4}(func(index int, val {1}) {1} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.Must{4}Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollect{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - collected := v.Collect{4}(func(index int, val {1}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt deleted file mode 100644 index 069d43d8e..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt +++ /dev/null @@ -1,20 +0,0 @@ -Interface,interface{},"something",nil,Inter -Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI -ObjxMap,(Map),New(1),New(nil),ObjxMap -Bool,bool,true,false,Bool -String,string,"hello","",Str -Int,int,1,0,Int -Int8,int8,1,0,Int8 -Int16,int16,1,0,Int16 -Int32,int32,1,0,Int32 -Int64,int64,1,0,Int64 -Uint,uint,1,0,Uint -Uint8,uint8,1,0,Uint8 -Uint16,uint16,1,0,Uint16 -Uint32,uint32,1,0,Uint32 -Uint64,uint64,1,0,Uint64 -Uintptr,uintptr,1,0,Uintptr -Float32,float32,1,0,Float32 -Float64,float64,1,0,Float64 -Complex64,complex64,1,0,Complex64 -Complex128,complex128,1,0,Complex128 diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go index 9cdfa9f9f..5e020f310 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go @@ -12,15 +12,11 @@ import ( // JSON converts the contained object to a JSON string // representation func (m Map) JSON() (string, error) { - result, err := json.Marshal(m) - if err != nil { err = errors.New("objx: JSON encode failed with: " + err.Error()) } - return string(result), err - } // MustJSON converts the contained object to a JSON string @@ -36,7 +32,6 @@ func (m Map) MustJSON() string { // Base64 converts the contained object to a Base64 string // representation of the JSON string representation func (m Map) Base64() (string, error) { - var buf bytes.Buffer jsonData, err := m.JSON() @@ -45,11 +40,13 @@ func (m Map) Base64() (string, error) { } encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() + _, err = encoder.Write([]byte(jsonData)) + if err != nil { + return "", err + } + _ = encoder.Close() return buf.String(), nil - } // MustBase64 converts the contained object to a Base64 string @@ -67,16 +64,13 @@ func (m Map) MustBase64() string { // representation of the JSON string representation and signs it // using the provided key. func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() if err != nil { return "", err } sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil - } // MustSignedBase64 converts the contained object to a Base64 string @@ -98,14 +92,11 @@ func (m Map) MustSignedBase64(key string) string { // URLValues creates a url.Values object from an Obj. This // function requires that the wrapped object be a map[string]interface{} func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { //TODO: can this be done without sprintf? vals.Set(k, fmt.Sprintf("%v", v)) } - return vals } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go deleted file mode 100644 index e9ccd2987..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestConversionJSON(t *testing.T) { - - jsonString := `{"name":"Mat"}` - o := MustFromJSON(jsonString) - - result, err := o.JSON() - - if assert.NoError(t, err) { - assert.Equal(t, jsonString, result) - } - - assert.Equal(t, jsonString, o.MustJSON()) - -} - -func TestConversionJSONWithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustJSON() - }) - - _, err := o.JSON() - - assert.Error(t, err) - -} - -func TestConversionBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) - - result, err := o.Base64() - - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) - } - - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) - -} - -func TestConversionBase64WithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustBase64() - }) - - _, err := o.Base64() - - assert.Error(t, err) - -} - -func TestConversionSignedBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) - - result, err := o.SignedBase64("key") - - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) - } - - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) - -} - -func TestConversionSignedBase64WithError(t *testing.T) { - - o := MSI() - o["test"] = func() {} - - assert.Panics(t, func() { - o.MustSignedBase64("key") - }) - - _, err := o.SignedBase64("key") - - assert.Error(t, err) - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go index 47bf85e46..6d6af1a83 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go @@ -1,72 +1,66 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ package objx diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go deleted file mode 100644 index 27f7d9049..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -var fixtures = []struct { - // name is the name of the fixture (used for reporting - // failures) - name string - // data is the JSON data to be worked on - data string - // get is the argument(s) to pass to Get - get interface{} - // output is the expected output - output interface{} -}{ - { - name: "Simple get", - data: `{"name": "Mat"}`, - get: "name", - output: "Mat", - }, - { - name: "Get with dot notation", - data: `{"address": {"city": "Boulder"}}`, - get: "address.city", - output: "Boulder", - }, - { - name: "Deep get with dot notation", - data: `{"one": {"two": {"three": {"four": "hello"}}}}`, - get: "one.two.three.four", - output: "hello", - }, - { - name: "Get missing with dot notation", - data: `{"one": {"two": {"three": {"four": "hello"}}}}`, - get: "one.ten", - output: nil, - }, - { - name: "Get with array notation", - data: `{"tags": ["one", "two", "three"]}`, - get: "tags[1]", - output: "two", - }, - { - name: "Get with array and dot notation", - data: `{"types": { "tags": ["one", "two", "three"]}}`, - get: "types.tags[1]", - output: "two", - }, - { - name: "Get with array and dot notation - field after array", - data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, - get: "tags[1].name", - output: "two", - }, - { - name: "Complex get with array and dot notation", - data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, - get: "tags[0].list[0].one", - output: "pizza", - }, - { - name: "Get field from within string should be nil", - data: `{"name":"Tyler"}`, - get: "name.something", - output: nil, - }, - { - name: "Get field from within string (using array accessor) should be nil", - data: `{"numbers":["one", "two", "three"]}`, - get: "numbers[0].nope", - output: nil, - }, -} - -func TestFixtures(t *testing.T) { - - for _, fixture := range fixtures { - - m := MustFromJSON(fixture.data) - - // get the value - t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) - value := m.Get(fixture.get.(string)) - - // make sure it matches - assert.Equal(t, fixture.output, value.data, - "Get fixture \"%s\" failed: %v", fixture.name, fixture, - ) - - } - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go index eb6ed8e28..7e9389a20 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go @@ -27,7 +27,7 @@ func (m Map) Value() *Value { } // Nil represents a nil Map. -var Nil Map = New(nil) +var Nil = New(nil) // New creates a new Map containing the map[string]interface{} in the data argument. // If the data argument is not a map[string]interface, New attempts to call the @@ -49,7 +49,7 @@ func New(data interface{}) Map { // // Panics // -// Panics if any key arugment is non-string or if there are an odd number of arguments. +// Panics if any key argument is non-string or if there are an odd number of arguments. // // Example // @@ -60,16 +60,13 @@ func New(data interface{}) Map { // // creates an Map equivalent to // m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) func MSI(keyAndValuePairs ...interface{}) Map { - newMap := make(map[string]interface{}) keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") } for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] value := keyAndValuePairs[i+1] @@ -78,11 +75,8 @@ func MSI(keyAndValuePairs ...interface{}) Map { if !keyStringOK { panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") } - newMap[keyString] = value - } - return New(newMap) } @@ -94,11 +88,9 @@ func MSI(keyAndValuePairs ...interface{}) Map { // Panics if the JSON is invalid. func MustFromJSON(jsonString string) Map { o, err := FromJSON(jsonString) - if err != nil { panic("objx: MustFromJSON failed with error: " + err.Error()) } - return o } @@ -107,16 +99,12 @@ func MustFromJSON(jsonString string) Map { // // Returns an error if the JSON is invalid. func FromJSON(jsonString string) (Map, error) { - var data interface{} err := json.Unmarshal([]byte(jsonString), &data) - if err != nil { return Nil, err } - return New(data), nil - } // FromBase64 creates a new Obj containing the data specified @@ -124,14 +112,11 @@ func FromJSON(jsonString string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) if err != nil { return nil, err } - return FromJSON(string(decoded)) } @@ -140,13 +125,10 @@ func FromBase64(base64String string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { panic("objx: MustFromBase64 failed with error: " + err.Error()) } - return result } @@ -157,14 +139,13 @@ func MustFromBase64(base64String string) Map { func FromSignedBase64(base64String, key string) (Map, error) { parts := strings.Split(base64String, SignatureSeparator) if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") + return nil, errors.New("objx: Signed base64 string is malformed") } sig := HashWithKey(parts[0], key) if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") + return nil, errors.New("objx: Signature for base64 data does not match") } - return FromBase64(parts[0]) } @@ -173,13 +154,10 @@ func FromSignedBase64(base64String, key string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) } - return result } @@ -188,9 +166,7 @@ func MustFromSignedBase64(base64String, key string) Map { // // For queries with multiple values, the first value is selected. func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { return nil, err } @@ -199,7 +175,6 @@ func FromURLQuery(query string) (Map, error) { for k, vals := range vals { m[k] = vals[0] } - return New(m), nil } @@ -210,13 +185,9 @@ func FromURLQuery(query string) (Map, error) { // // Panics if it encounters an error func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { panic("objx: MustFromURLQuery failed with error: " + err.Error()) } - return o - } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go deleted file mode 100644 index 6beb50675..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package objx - -var TestMap map[string]interface{} = map[string]interface{}{ - "name": "Tyler", - "address": map[string]interface{}{ - "city": "Salt Lake City", - "state": "UT", - }, - "numbers": []interface{}{"one", "two", "three", "four", "five"}, -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go deleted file mode 100644 index 1f8b45c61..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -type Convertable struct { - name string -} - -func (c *Convertable) MSI() map[string]interface{} { - return map[string]interface{}{"name": c.name} -} - -type Unconvertable struct { - name string -} - -func TestMapCreation(t *testing.T) { - - o := New(nil) - assert.Nil(t, o) - - o = New("Tyler") - assert.Nil(t, o) - - unconvertable := &Unconvertable{name: "Tyler"} - o = New(unconvertable) - assert.Nil(t, o) - - convertable := &Convertable{name: "Tyler"} - o = New(convertable) - if assert.NotNil(t, convertable) { - assert.Equal(t, "Tyler", o["name"], "Tyler") - } - - o = MSI() - if assert.NotNil(t, o) { - assert.NotNil(t, o) - } - - o = MSI("name", "Tyler") - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, o["name"], "Tyler") - } - } - -} - -func TestMapMustFromJSONWithError(t *testing.T) { - - _, err := FromJSON(`"name":"Mat"}`) - assert.Error(t, err) - -} - -func TestMapFromJSON(t *testing.T) { - - o := MustFromJSON(`{"name":"Mat"}`) - - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, "Mat", o["name"]) - } - } - -} - -func TestMapFromJSONWithError(t *testing.T) { - - var m Map - - assert.Panics(t, func() { - m = MustFromJSON(`"name":"Mat"}`) - }) - - assert.Nil(t, m) - -} - -func TestMapFromBase64String(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWF0In0=" - - o, err := FromBase64(base64String) - - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } - - assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") - -} - -func TestMapFromBase64StringWithError(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWFasd0In0=" - - _, err := FromBase64(base64String) - - assert.Error(t, err) - - assert.Panics(t, func() { - MustFromBase64(base64String) - }) - -} - -func TestMapFromSignedBase64String(t *testing.T) { - - base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - - o, err := FromSignedBase64(base64String, "key") - - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } - - assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") - -} - -func TestMapFromSignedBase64StringWithError(t *testing.T) { - - base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - - _, err := FromSignedBase64(base64String, "key") - - assert.Error(t, err) - - assert.Panics(t, func() { - MustFromSignedBase64(base64String, "key") - }) - -} - -func TestMapFromURLQuery(t *testing.T) { - - m, err := FromURLQuery("name=tyler&state=UT") - if assert.NoError(t, err) && assert.NotNil(t, m) { - assert.Equal(t, "tyler", m.Get("name").Str()) - assert.Equal(t, "UT", m.Get("state").Str()) - } - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go index b35c86392..e7b8eb794 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go @@ -2,11 +2,10 @@ package objx // Exclude returns a new Map with the keys in the specified []string // excluded. -func (d Map) Exclude(exclude []string) Map { - +func (m Map) Exclude(exclude []string) Map { excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true + for k, v := range m { + var shouldInclude = true for _, toExclude := range exclude { if k == toExclude { shouldInclude = false @@ -17,7 +16,6 @@ func (d Map) Exclude(exclude []string) Map { excluded[k] = v } } - return excluded } @@ -38,19 +36,16 @@ func (m Map) Merge(merge Map) Map { return m.Copy().MergeHere(merge) } -// Merge blends the specified map with this map and returns the current map. +// MergeHere blends the specified map with this map and returns the current map. // -// Keys that appear in both will be selected from the specified map. The original map +// Keys that appear in both will be selected from the specified map. The original map // will be modified. This method requires that // the wrapped object be a map[string]interface{} func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { m[k] = v } - return m - } // Transform builds a new Obj giving the transformer a chance @@ -71,11 +66,9 @@ func (m Map) Transform(transformer func(key string, value interface{}) (string, // This method requires that the wrapped object be a map[string]interface{} func (m Map) TransformKeys(mapping map[string]string) Map { return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { return newKey, value } - return key, value }) } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go deleted file mode 100644 index e20ee23bc..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestExclude(t *testing.T) { - - d := make(Map) - d["name"] = "Mat" - d["age"] = 29 - d["secret"] = "ABC" - - excluded := d.Exclude([]string{"secret"}) - - assert.Equal(t, d["name"], excluded["name"]) - assert.Equal(t, d["age"], excluded["age"]) - assert.False(t, excluded.Has("secret"), "secret should be excluded") - -} - -func TestCopy(t *testing.T) { - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - d1Obj := New(d1) - d2Obj := d1Obj.Copy() - - d2Obj["name"] = "Mat" - - assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") - assert.Equal(t, d2Obj.Get("name").Str(), "Mat") - -} - -func TestMerge(t *testing.T) { - - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) - - merged := dObj.Merge(d1Obj) - - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Empty(t, dObj.Get("location").Str()) - -} - -func TestMergeHere(t *testing.T) { - - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) - - merged := dObj.MergeHere(d1Obj) - - assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go index fdd6be9cf..e052ff890 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go @@ -9,6 +9,9 @@ import ( // key. func HashWithKey(data, key string) string { hash := sha1.New() - hash.Write([]byte(data + ":" + key)) + _, err := hash.Write([]byte(data + ":" + key)) + if err != nil { + return "" + } return hex.EncodeToString(hash.Sum(nil)) } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go deleted file mode 100644 index 8f0898f62..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestHashWithKey(t *testing.T) { - - assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go deleted file mode 100644 index 5408c7fd3..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSimpleExample(t *testing.T) { - - // build a map from a JSON object - o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) - - // Map can be used as a straight map[string]interface{} - assert.Equal(t, o["name"], "Mat") - - // Get an Value object - v := o.Get("name") - assert.Equal(t, v, &Value{data: "Mat"}) - - // Test the contained value - assert.False(t, v.IsInt()) - assert.False(t, v.IsBool()) - assert.True(t, v.IsStr()) - - // Get the contained value - assert.Equal(t, v.Str(), "Mat") - - // Get a default value if the contained value is not of the expected type or does not exist - assert.Equal(t, 1, v.Int(1)) - - // Get a value by using array notation - assert.Equal(t, "indian", o.Get("foods[0]").Data()) - - // Set a value by using array notation - o.Set("foods[0]", "italian") - assert.Equal(t, "italian", o.Get("foods[0]").Str()) - - // Get a value by using dot notation - assert.Equal(t, "hobbiton", o.Get("location.county").Str()) - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go deleted file mode 100644 index bcc1eb03d..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package objx - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestHas(t *testing.T) { - - m := New(TestMap) - - assert.True(t, m.Has("name")) - assert.True(t, m.Has("address.state")) - assert.True(t, m.Has("numbers[4]")) - - assert.False(t, m.Has("address.state.nope")) - assert.False(t, m.Has("address.nope")) - assert.False(t, m.Has("nope")) - assert.False(t, m.Has("numbers[5]")) - - m = nil - assert.False(t, m.Has("nothing")) - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go index f3ecb29b9..202a91f8c 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -2,7 +2,6 @@ package objx /* Inter (interface{} and []interface{}) - -------------------------------------------------- */ // Inter gets the value as a interface{}, returns the optionalDefault @@ -60,44 +59,35 @@ func (v *Value) IsInterSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInter uses the specified decider function to select items // from the []interface{}. The object contained in the result will contain // only the selected items. func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInter uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]interface{}. func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -106,47 +96,37 @@ func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInter uses the specified function to replace each interface{}s // by iterating each item. The data in the returned result will be a // []interface{} containing the replaced items. func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInter uses the specified collector function to collect a value // for each of the interface{}s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- */ // MSI gets the value as a map[string]interface{}, returns the optionalDefault @@ -204,44 +184,35 @@ func (v *Value) IsMSISlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereMSI uses the specified decider function to select items // from the []map[string]interface{}. The object contained in the result will contain // only the selected items. func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupMSI uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]map[string]interface{}. func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -250,47 +221,37 @@ func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Valu groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceMSI uses the specified function to replace each map[string]interface{}s // by iterating each item. The data in the returned result will be a // []map[string]interface{} containing the replaced items. func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectMSI uses the specified collector function to collect a value // for each of the map[string]interface{}s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* ObjxMap ((Map) and [](Map)) - -------------------------------------------------- */ // ObjxMap gets the value as a (Map), returns the optionalDefault @@ -348,44 +309,35 @@ func (v *Value) IsObjxMapSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereObjxMap uses the specified decider function to select items // from the [](Map). The object contained in the result will contain // only the selected items. func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupObjxMap uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][](Map). func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -394,47 +346,37 @@ func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceObjxMap uses the specified function to replace each (Map)s // by iterating each item. The data in the returned result will be a // [](Map) containing the replaced items. func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectObjxMap uses the specified collector function to collect a value // for each of the (Map)s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Bool (bool and []bool) - -------------------------------------------------- */ // Bool gets the value as a bool, returns the optionalDefault @@ -492,44 +434,35 @@ func (v *Value) IsBoolSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereBool uses the specified decider function to select items // from the []bool. The object contained in the result will contain // only the selected items. func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupBool uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]bool. func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -538,47 +471,37 @@ func (v *Value) GroupBool(grouper func(int, bool) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceBool uses the specified function to replace each bools // by iterating each item. The data in the returned result will be a // []bool containing the replaced items. func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectBool uses the specified collector function to collect a value // for each of the bools in the slice. The data returned will be a // []interface{}. func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Str (string and []string) - -------------------------------------------------- */ // Str gets the value as a string, returns the optionalDefault @@ -636,44 +559,35 @@ func (v *Value) IsStrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereStr uses the specified decider function to select items // from the []string. The object contained in the result will contain // only the selected items. func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupStr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]string. func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -682,47 +596,37 @@ func (v *Value) GroupStr(grouper func(int, string) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceStr uses the specified function to replace each strings // by iterating each item. The data in the returned result will be a // []string containing the replaced items. func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectStr uses the specified collector function to collect a value // for each of the strings in the slice. The data returned will be a // []interface{}. func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int (int and []int) - -------------------------------------------------- */ // Int gets the value as a int, returns the optionalDefault @@ -780,44 +684,35 @@ func (v *Value) IsIntSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt uses the specified decider function to select items // from the []int. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int. func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -826,47 +721,37 @@ func (v *Value) GroupInt(grouper func(int, int) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt uses the specified function to replace each ints // by iterating each item. The data in the returned result will be a // []int containing the replaced items. func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt uses the specified collector function to collect a value // for each of the ints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int8 (int8 and []int8) - -------------------------------------------------- */ // Int8 gets the value as a int8, returns the optionalDefault @@ -924,44 +809,35 @@ func (v *Value) IsInt8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt8 uses the specified decider function to select items // from the []int8. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int8. func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -970,47 +846,37 @@ func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt8 uses the specified function to replace each int8s // by iterating each item. The data in the returned result will be a // []int8 containing the replaced items. func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt8 uses the specified collector function to collect a value // for each of the int8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int16 (int16 and []int16) - -------------------------------------------------- */ // Int16 gets the value as a int16, returns the optionalDefault @@ -1068,44 +934,35 @@ func (v *Value) IsInt16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt16 uses the specified decider function to select items // from the []int16. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int16. func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1114,47 +971,37 @@ func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt16 uses the specified function to replace each int16s // by iterating each item. The data in the returned result will be a // []int16 containing the replaced items. func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt16 uses the specified collector function to collect a value // for each of the int16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int32 (int32 and []int32) - -------------------------------------------------- */ // Int32 gets the value as a int32, returns the optionalDefault @@ -1212,44 +1059,35 @@ func (v *Value) IsInt32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt32 uses the specified decider function to select items // from the []int32. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int32. func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1258,47 +1096,37 @@ func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt32 uses the specified function to replace each int32s // by iterating each item. The data in the returned result will be a // []int32 containing the replaced items. func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt32 uses the specified collector function to collect a value // for each of the int32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int64 (int64 and []int64) - -------------------------------------------------- */ // Int64 gets the value as a int64, returns the optionalDefault @@ -1356,44 +1184,35 @@ func (v *Value) IsInt64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt64 uses the specified decider function to select items // from the []int64. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int64. func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1402,47 +1221,37 @@ func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt64 uses the specified function to replace each int64s // by iterating each item. The data in the returned result will be a // []int64 containing the replaced items. func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt64 uses the specified collector function to collect a value // for each of the int64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint (uint and []uint) - -------------------------------------------------- */ // Uint gets the value as a uint, returns the optionalDefault @@ -1500,44 +1309,35 @@ func (v *Value) IsUintSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint uses the specified decider function to select items // from the []uint. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint. func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1546,47 +1346,37 @@ func (v *Value) GroupUint(grouper func(int, uint) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint uses the specified function to replace each uints // by iterating each item. The data in the returned result will be a // []uint containing the replaced items. func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint uses the specified collector function to collect a value // for each of the uints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint8 (uint8 and []uint8) - -------------------------------------------------- */ // Uint8 gets the value as a uint8, returns the optionalDefault @@ -1644,44 +1434,35 @@ func (v *Value) IsUint8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint8 uses the specified decider function to select items // from the []uint8. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint8. func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1690,47 +1471,37 @@ func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint8 uses the specified function to replace each uint8s // by iterating each item. The data in the returned result will be a // []uint8 containing the replaced items. func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint8 uses the specified collector function to collect a value // for each of the uint8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint16 (uint16 and []uint16) - -------------------------------------------------- */ // Uint16 gets the value as a uint16, returns the optionalDefault @@ -1788,44 +1559,35 @@ func (v *Value) IsUint16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint16 uses the specified decider function to select items // from the []uint16. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint16. func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1834,47 +1596,37 @@ func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint16 uses the specified function to replace each uint16s // by iterating each item. The data in the returned result will be a // []uint16 containing the replaced items. func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint16 uses the specified collector function to collect a value // for each of the uint16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint32 (uint32 and []uint32) - -------------------------------------------------- */ // Uint32 gets the value as a uint32, returns the optionalDefault @@ -1932,44 +1684,35 @@ func (v *Value) IsUint32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint32 uses the specified decider function to select items // from the []uint32. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint32. func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1978,47 +1721,37 @@ func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint32 uses the specified function to replace each uint32s // by iterating each item. The data in the returned result will be a // []uint32 containing the replaced items. func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint32 uses the specified collector function to collect a value // for each of the uint32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint64 (uint64 and []uint64) - -------------------------------------------------- */ // Uint64 gets the value as a uint64, returns the optionalDefault @@ -2076,44 +1809,35 @@ func (v *Value) IsUint64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint64 uses the specified decider function to select items // from the []uint64. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint64. func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2122,47 +1846,37 @@ func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint64 uses the specified function to replace each uint64s // by iterating each item. The data in the returned result will be a // []uint64 containing the replaced items. func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint64 uses the specified collector function to collect a value // for each of the uint64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uintptr (uintptr and []uintptr) - -------------------------------------------------- */ // Uintptr gets the value as a uintptr, returns the optionalDefault @@ -2220,44 +1934,35 @@ func (v *Value) IsUintptrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUintptr uses the specified decider function to select items // from the []uintptr. The object contained in the result will contain // only the selected items. func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUintptr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uintptr. func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2266,47 +1971,37 @@ func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUintptr uses the specified function to replace each uintptrs // by iterating each item. The data in the returned result will be a // []uintptr containing the replaced items. func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUintptr uses the specified collector function to collect a value // for each of the uintptrs in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Float32 (float32 and []float32) - -------------------------------------------------- */ // Float32 gets the value as a float32, returns the optionalDefault @@ -2364,44 +2059,35 @@ func (v *Value) IsFloat32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat32 uses the specified decider function to select items // from the []float32. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float32. func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2410,47 +2096,37 @@ func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat32 uses the specified function to replace each float32s // by iterating each item. The data in the returned result will be a // []float32 containing the replaced items. func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat32 uses the specified collector function to collect a value // for each of the float32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Float64 (float64 and []float64) - -------------------------------------------------- */ // Float64 gets the value as a float64, returns the optionalDefault @@ -2508,44 +2184,35 @@ func (v *Value) IsFloat64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat64 uses the specified decider function to select items // from the []float64. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float64. func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2554,47 +2221,37 @@ func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat64 uses the specified function to replace each float64s // by iterating each item. The data in the returned result will be a // []float64 containing the replaced items. func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat64 uses the specified collector function to collect a value // for each of the float64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Complex64 (complex64 and []complex64) - -------------------------------------------------- */ // Complex64 gets the value as a complex64, returns the optionalDefault @@ -2652,44 +2309,35 @@ func (v *Value) IsComplex64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex64 uses the specified decider function to select items // from the []complex64. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex64. func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2698,47 +2346,37 @@ func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex64 uses the specified function to replace each complex64s // by iterating each item. The data in the returned result will be a // []complex64 containing the replaced items. func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex64 uses the specified collector function to collect a value // for each of the complex64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Complex128 (complex128 and []complex128) - -------------------------------------------------- */ // Complex128 gets the value as a complex128, returns the optionalDefault @@ -2796,44 +2434,35 @@ func (v *Value) IsComplex128Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex128 uses the specified decider function to select items // from the []complex128. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex128 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex128. func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2842,40 +2471,31 @@ func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex128 uses the specified function to replace each complex128s // by iterating each item. The data in the returned result will be a // []complex128 containing the replaced items. func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex128 uses the specified collector function to collect a value // for each of the complex128s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go deleted file mode 100644 index f7a4fceea..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go +++ /dev/null @@ -1,2867 +0,0 @@ -package objx - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "testing" -) - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInter(t *testing.T) { - - val := interface{}("something") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Inter()) - assert.Equal(t, val, New(m).Get("value").MustInter()) - assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) - assert.Equal(t, val, New(m).Get("nothing").Inter("something")) - - assert.Panics(t, func() { - New(m).Get("age").MustInter() - }) - -} - -func TestInterSlice(t *testing.T) { - - val := interface{}("something") - m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) - assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) - assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInterSlice() - }) - -} - -func TestIsInter(t *testing.T) { - - var v *Value - - v = &Value{data: interface{}("something")} - assert.True(t, v.IsInter()) - - v = &Value{data: []interface{}{interface{}("something")}} - assert.True(t, v.IsInterSlice()) - -} - -func TestEachInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - count := 0 - replacedVals := make([]interface{}, 0) - assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) - -} - -func TestWhereInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - selected := v.WhereInter(func(i int, val interface{}) bool { - return i%2 == 0 - }).MustInterSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - grouped := v.GroupInter(func(i int, val interface{}) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]interface{}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - rawArr := v.MustInterSlice() - - replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInterSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - collected := v.CollectInter(func(index int, val interface{}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestMSI(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSI()) - assert.Equal(t, val, New(m).Get("value").MustMSI()) - assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) - assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) - - assert.Panics(t, func() { - New(m).Get("age").MustMSI() - }) - -} - -func TestMSISlice(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) - assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) - assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustMSISlice() - }) - -} - -func TestIsMSI(t *testing.T) { - - var v *Value - - v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} - assert.True(t, v.IsMSI()) - - v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - assert.True(t, v.IsMSISlice()) - -} - -func TestEachMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - count := 0 - replacedVals := make([]map[string]interface{}, 0) - assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) - assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) - assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) - -} - -func TestWhereMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { - return i%2 == 0 - }).MustMSISlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]map[string]interface{}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - rawArr := v.MustMSISlice() - - replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustMSISlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestObjxMap(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMap()) - assert.Equal(t, val, New(m).Get("value").MustObjxMap()) - assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) - - assert.Panics(t, func() { - New(m).Get("age").MustObjxMap() - }) - -} - -func TestObjxMapSlice(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) - assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustObjxMapSlice() - }) - -} - -func TestIsObjxMap(t *testing.T) { - - var v *Value - - v = &Value{data: (Map)(New(1))} - assert.True(t, v.IsObjxMap()) - - v = &Value{data: [](Map){(Map)(New(1))}} - assert.True(t, v.IsObjxMapSlice()) - -} - -func TestEachObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - count := 0 - replacedVals := make([](Map), 0) - assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) - -} - -func TestWhereObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - selected := v.WhereObjxMap(func(i int, val Map) bool { - return i%2 == 0 - }).MustObjxMapSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - grouped := v.GroupObjxMap(func(i int, val Map) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][](Map)) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - rawArr := v.MustObjxMapSlice() - - replaced := v.ReplaceObjxMap(func(index int, val Map) Map { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustObjxMapSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - collected := v.CollectObjxMap(func(index int, val Map) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestBool(t *testing.T) { - - val := bool(true) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Bool()) - assert.Equal(t, val, New(m).Get("value").MustBool()) - assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) - assert.Equal(t, val, New(m).Get("nothing").Bool(true)) - - assert.Panics(t, func() { - New(m).Get("age").MustBool() - }) - -} - -func TestBoolSlice(t *testing.T) { - - val := bool(true) - m := map[string]interface{}{"value": []bool{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) - assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) - assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustBoolSlice() - }) - -} - -func TestIsBool(t *testing.T) { - - var v *Value - - v = &Value{data: bool(true)} - assert.True(t, v.IsBool()) - - v = &Value{data: []bool{bool(true)}} - assert.True(t, v.IsBoolSlice()) - -} - -func TestEachBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} - count := 0 - replacedVals := make([]bool, 0) - assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) - -} - -func TestWhereBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - selected := v.WhereBool(func(i int, val bool) bool { - return i%2 == 0 - }).MustBoolSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - grouped := v.GroupBool(func(i int, val bool) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]bool) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - rawArr := v.MustBoolSlice() - - replaced := v.ReplaceBool(func(index int, val bool) bool { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustBoolSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - collected := v.CollectBool(func(index int, val bool) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestStr(t *testing.T) { - - val := string("hello") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Str()) - assert.Equal(t, val, New(m).Get("value").MustStr()) - assert.Equal(t, string(""), New(m).Get("nothing").Str()) - assert.Equal(t, val, New(m).Get("nothing").Str("hello")) - - assert.Panics(t, func() { - New(m).Get("age").MustStr() - }) - -} - -func TestStrSlice(t *testing.T) { - - val := string("hello") - m := map[string]interface{}{"value": []string{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) - assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) - assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustStrSlice() - }) - -} - -func TestIsStr(t *testing.T) { - - var v *Value - - v = &Value{data: string("hello")} - assert.True(t, v.IsStr()) - - v = &Value{data: []string{string("hello")}} - assert.True(t, v.IsStrSlice()) - -} - -func TestEachStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - count := 0 - replacedVals := make([]string, 0) - assert.Equal(t, v, v.EachStr(func(i int, val string) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) - -} - -func TestWhereStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - selected := v.WhereStr(func(i int, val string) bool { - return i%2 == 0 - }).MustStrSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - grouped := v.GroupStr(func(i int, val string) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]string) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - rawArr := v.MustStrSlice() - - replaced := v.ReplaceStr(func(index int, val string) string { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustStrSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - collected := v.CollectStr(func(index int, val string) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt(t *testing.T) { - - val := int(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int()) - assert.Equal(t, val, New(m).Get("value").MustInt()) - assert.Equal(t, int(0), New(m).Get("nothing").Int()) - assert.Equal(t, val, New(m).Get("nothing").Int(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt() - }) - -} - -func TestIntSlice(t *testing.T) { - - val := int(1) - m := map[string]interface{}{"value": []int{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) - assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) - assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustIntSlice() - }) - -} - -func TestIsInt(t *testing.T) { - - var v *Value - - v = &Value{data: int(1)} - assert.True(t, v.IsInt()) - - v = &Value{data: []int{int(1)}} - assert.True(t, v.IsIntSlice()) - -} - -func TestEachInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} - count := 0 - replacedVals := make([]int, 0) - assert.Equal(t, v, v.EachInt(func(i int, val int) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) - -} - -func TestWhereInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - selected := v.WhereInt(func(i int, val int) bool { - return i%2 == 0 - }).MustIntSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - grouped := v.GroupInt(func(i int, val int) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - rawArr := v.MustIntSlice() - - replaced := v.ReplaceInt(func(index int, val int) int { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustIntSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - collected := v.CollectInt(func(index int, val int) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt8(t *testing.T) { - - val := int8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8()) - assert.Equal(t, val, New(m).Get("value").MustInt8()) - assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) - assert.Equal(t, val, New(m).Get("nothing").Int8(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt8() - }) - -} - -func TestInt8Slice(t *testing.T) { - - val := int8(1) - m := map[string]interface{}{"value": []int8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) - assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt8Slice() - }) - -} - -func TestIsInt8(t *testing.T) { - - var v *Value - - v = &Value{data: int8(1)} - assert.True(t, v.IsInt8()) - - v = &Value{data: []int8{int8(1)}} - assert.True(t, v.IsInt8Slice()) - -} - -func TestEachInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} - count := 0 - replacedVals := make([]int8, 0) - assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) - -} - -func TestWhereInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - selected := v.WhereInt8(func(i int, val int8) bool { - return i%2 == 0 - }).MustInt8Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - grouped := v.GroupInt8(func(i int, val int8) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int8) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - rawArr := v.MustInt8Slice() - - replaced := v.ReplaceInt8(func(index int, val int8) int8 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt8Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - collected := v.CollectInt8(func(index int, val int8) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt16(t *testing.T) { - - val := int16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16()) - assert.Equal(t, val, New(m).Get("value").MustInt16()) - assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) - assert.Equal(t, val, New(m).Get("nothing").Int16(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt16() - }) - -} - -func TestInt16Slice(t *testing.T) { - - val := int16(1) - m := map[string]interface{}{"value": []int16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) - assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt16Slice() - }) - -} - -func TestIsInt16(t *testing.T) { - - var v *Value - - v = &Value{data: int16(1)} - assert.True(t, v.IsInt16()) - - v = &Value{data: []int16{int16(1)}} - assert.True(t, v.IsInt16Slice()) - -} - -func TestEachInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} - count := 0 - replacedVals := make([]int16, 0) - assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) - -} - -func TestWhereInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - selected := v.WhereInt16(func(i int, val int16) bool { - return i%2 == 0 - }).MustInt16Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - grouped := v.GroupInt16(func(i int, val int16) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int16) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - rawArr := v.MustInt16Slice() - - replaced := v.ReplaceInt16(func(index int, val int16) int16 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt16Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - collected := v.CollectInt16(func(index int, val int16) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt32(t *testing.T) { - - val := int32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32()) - assert.Equal(t, val, New(m).Get("value").MustInt32()) - assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) - assert.Equal(t, val, New(m).Get("nothing").Int32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt32() - }) - -} - -func TestInt32Slice(t *testing.T) { - - val := int32(1) - m := map[string]interface{}{"value": []int32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) - assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt32Slice() - }) - -} - -func TestIsInt32(t *testing.T) { - - var v *Value - - v = &Value{data: int32(1)} - assert.True(t, v.IsInt32()) - - v = &Value{data: []int32{int32(1)}} - assert.True(t, v.IsInt32Slice()) - -} - -func TestEachInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} - count := 0 - replacedVals := make([]int32, 0) - assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) - -} - -func TestWhereInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - selected := v.WhereInt32(func(i int, val int32) bool { - return i%2 == 0 - }).MustInt32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - grouped := v.GroupInt32(func(i int, val int32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - rawArr := v.MustInt32Slice() - - replaced := v.ReplaceInt32(func(index int, val int32) int32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - collected := v.CollectInt32(func(index int, val int32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestInt64(t *testing.T) { - - val := int64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64()) - assert.Equal(t, val, New(m).Get("value").MustInt64()) - assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) - assert.Equal(t, val, New(m).Get("nothing").Int64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustInt64() - }) - -} - -func TestInt64Slice(t *testing.T) { - - val := int64(1) - m := map[string]interface{}{"value": []int64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) - assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustInt64Slice() - }) - -} - -func TestIsInt64(t *testing.T) { - - var v *Value - - v = &Value{data: int64(1)} - assert.True(t, v.IsInt64()) - - v = &Value{data: []int64{int64(1)}} - assert.True(t, v.IsInt64Slice()) - -} - -func TestEachInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} - count := 0 - replacedVals := make([]int64, 0) - assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) - -} - -func TestWhereInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - selected := v.WhereInt64(func(i int, val int64) bool { - return i%2 == 0 - }).MustInt64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - grouped := v.GroupInt64(func(i int, val int64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - rawArr := v.MustInt64Slice() - - replaced := v.ReplaceInt64(func(index int, val int64) int64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustInt64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - collected := v.CollectInt64(func(index int, val int64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint(t *testing.T) { - - val := uint(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint()) - assert.Equal(t, val, New(m).Get("value").MustUint()) - assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) - assert.Equal(t, val, New(m).Get("nothing").Uint(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint() - }) - -} - -func TestUintSlice(t *testing.T) { - - val := uint(1) - m := map[string]interface{}{"value": []uint{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) - assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUintSlice() - }) - -} - -func TestIsUint(t *testing.T) { - - var v *Value - - v = &Value{data: uint(1)} - assert.True(t, v.IsUint()) - - v = &Value{data: []uint{uint(1)}} - assert.True(t, v.IsUintSlice()) - -} - -func TestEachUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} - count := 0 - replacedVals := make([]uint, 0) - assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) - -} - -func TestWhereUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - selected := v.WhereUint(func(i int, val uint) bool { - return i%2 == 0 - }).MustUintSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - grouped := v.GroupUint(func(i int, val uint) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - rawArr := v.MustUintSlice() - - replaced := v.ReplaceUint(func(index int, val uint) uint { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUintSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - collected := v.CollectUint(func(index int, val uint) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint8(t *testing.T) { - - val := uint8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8()) - assert.Equal(t, val, New(m).Get("value").MustUint8()) - assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) - assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint8() - }) - -} - -func TestUint8Slice(t *testing.T) { - - val := uint8(1) - m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) - assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint8Slice() - }) - -} - -func TestIsUint8(t *testing.T) { - - var v *Value - - v = &Value{data: uint8(1)} - assert.True(t, v.IsUint8()) - - v = &Value{data: []uint8{uint8(1)}} - assert.True(t, v.IsUint8Slice()) - -} - -func TestEachUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - count := 0 - replacedVals := make([]uint8, 0) - assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) - -} - -func TestWhereUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - selected := v.WhereUint8(func(i int, val uint8) bool { - return i%2 == 0 - }).MustUint8Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - grouped := v.GroupUint8(func(i int, val uint8) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint8) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - rawArr := v.MustUint8Slice() - - replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint8Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - collected := v.CollectUint8(func(index int, val uint8) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint16(t *testing.T) { - - val := uint16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16()) - assert.Equal(t, val, New(m).Get("value").MustUint16()) - assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) - assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint16() - }) - -} - -func TestUint16Slice(t *testing.T) { - - val := uint16(1) - m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) - assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint16Slice() - }) - -} - -func TestIsUint16(t *testing.T) { - - var v *Value - - v = &Value{data: uint16(1)} - assert.True(t, v.IsUint16()) - - v = &Value{data: []uint16{uint16(1)}} - assert.True(t, v.IsUint16Slice()) - -} - -func TestEachUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - count := 0 - replacedVals := make([]uint16, 0) - assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) - -} - -func TestWhereUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - selected := v.WhereUint16(func(i int, val uint16) bool { - return i%2 == 0 - }).MustUint16Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - grouped := v.GroupUint16(func(i int, val uint16) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint16) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - rawArr := v.MustUint16Slice() - - replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint16Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - collected := v.CollectUint16(func(index int, val uint16) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint32(t *testing.T) { - - val := uint32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32()) - assert.Equal(t, val, New(m).Get("value").MustUint32()) - assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) - assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint32() - }) - -} - -func TestUint32Slice(t *testing.T) { - - val := uint32(1) - m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) - assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint32Slice() - }) - -} - -func TestIsUint32(t *testing.T) { - - var v *Value - - v = &Value{data: uint32(1)} - assert.True(t, v.IsUint32()) - - v = &Value{data: []uint32{uint32(1)}} - assert.True(t, v.IsUint32Slice()) - -} - -func TestEachUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - count := 0 - replacedVals := make([]uint32, 0) - assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) - -} - -func TestWhereUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - selected := v.WhereUint32(func(i int, val uint32) bool { - return i%2 == 0 - }).MustUint32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - grouped := v.GroupUint32(func(i int, val uint32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - rawArr := v.MustUint32Slice() - - replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - collected := v.CollectUint32(func(index int, val uint32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUint64(t *testing.T) { - - val := uint64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64()) - assert.Equal(t, val, New(m).Get("value").MustUint64()) - assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) - assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUint64() - }) - -} - -func TestUint64Slice(t *testing.T) { - - val := uint64(1) - m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) - assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUint64Slice() - }) - -} - -func TestIsUint64(t *testing.T) { - - var v *Value - - v = &Value{data: uint64(1)} - assert.True(t, v.IsUint64()) - - v = &Value{data: []uint64{uint64(1)}} - assert.True(t, v.IsUint64Slice()) - -} - -func TestEachUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - count := 0 - replacedVals := make([]uint64, 0) - assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) - -} - -func TestWhereUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - selected := v.WhereUint64(func(i int, val uint64) bool { - return i%2 == 0 - }).MustUint64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - grouped := v.GroupUint64(func(i int, val uint64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - rawArr := v.MustUint64Slice() - - replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUint64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - collected := v.CollectUint64(func(index int, val uint64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestUintptr(t *testing.T) { - - val := uintptr(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uintptr()) - assert.Equal(t, val, New(m).Get("value").MustUintptr()) - assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) - assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustUintptr() - }) - -} - -func TestUintptrSlice(t *testing.T) { - - val := uintptr(1) - m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) - assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustUintptrSlice() - }) - -} - -func TestIsUintptr(t *testing.T) { - - var v *Value - - v = &Value{data: uintptr(1)} - assert.True(t, v.IsUintptr()) - - v = &Value{data: []uintptr{uintptr(1)}} - assert.True(t, v.IsUintptrSlice()) - -} - -func TestEachUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - count := 0 - replacedVals := make([]uintptr, 0) - assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) - -} - -func TestWhereUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - selected := v.WhereUintptr(func(i int, val uintptr) bool { - return i%2 == 0 - }).MustUintptrSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - grouped := v.GroupUintptr(func(i int, val uintptr) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uintptr) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - rawArr := v.MustUintptrSlice() - - replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustUintptrSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - collected := v.CollectUintptr(func(index int, val uintptr) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestFloat32(t *testing.T) { - - val := float32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32()) - assert.Equal(t, val, New(m).Get("value").MustFloat32()) - assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) - assert.Equal(t, val, New(m).Get("nothing").Float32(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustFloat32() - }) - -} - -func TestFloat32Slice(t *testing.T) { - - val := float32(1) - m := map[string]interface{}{"value": []float32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) - assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustFloat32Slice() - }) - -} - -func TestIsFloat32(t *testing.T) { - - var v *Value - - v = &Value{data: float32(1)} - assert.True(t, v.IsFloat32()) - - v = &Value{data: []float32{float32(1)}} - assert.True(t, v.IsFloat32Slice()) - -} - -func TestEachFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} - count := 0 - replacedVals := make([]float32, 0) - assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) - -} - -func TestWhereFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - selected := v.WhereFloat32(func(i int, val float32) bool { - return i%2 == 0 - }).MustFloat32Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - grouped := v.GroupFloat32(func(i int, val float32) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float32) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - rawArr := v.MustFloat32Slice() - - replaced := v.ReplaceFloat32(func(index int, val float32) float32 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustFloat32Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - collected := v.CollectFloat32(func(index int, val float32) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestFloat64(t *testing.T) { - - val := float64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64()) - assert.Equal(t, val, New(m).Get("value").MustFloat64()) - assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) - assert.Equal(t, val, New(m).Get("nothing").Float64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustFloat64() - }) - -} - -func TestFloat64Slice(t *testing.T) { - - val := float64(1) - m := map[string]interface{}{"value": []float64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) - assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustFloat64Slice() - }) - -} - -func TestIsFloat64(t *testing.T) { - - var v *Value - - v = &Value{data: float64(1)} - assert.True(t, v.IsFloat64()) - - v = &Value{data: []float64{float64(1)}} - assert.True(t, v.IsFloat64Slice()) - -} - -func TestEachFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} - count := 0 - replacedVals := make([]float64, 0) - assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) - -} - -func TestWhereFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - selected := v.WhereFloat64(func(i int, val float64) bool { - return i%2 == 0 - }).MustFloat64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - grouped := v.GroupFloat64(func(i int, val float64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - rawArr := v.MustFloat64Slice() - - replaced := v.ReplaceFloat64(func(index int, val float64) float64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustFloat64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - collected := v.CollectFloat64(func(index int, val float64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestComplex64(t *testing.T) { - - val := complex64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64()) - assert.Equal(t, val, New(m).Get("value").MustComplex64()) - assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) - assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustComplex64() - }) - -} - -func TestComplex64Slice(t *testing.T) { - - val := complex64(1) - m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) - assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustComplex64Slice() - }) - -} - -func TestIsComplex64(t *testing.T) { - - var v *Value - - v = &Value{data: complex64(1)} - assert.True(t, v.IsComplex64()) - - v = &Value{data: []complex64{complex64(1)}} - assert.True(t, v.IsComplex64Slice()) - -} - -func TestEachComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - count := 0 - replacedVals := make([]complex64, 0) - assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) - -} - -func TestWhereComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - selected := v.WhereComplex64(func(i int, val complex64) bool { - return i%2 == 0 - }).MustComplex64Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - grouped := v.GroupComplex64(func(i int, val complex64) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex64) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - rawArr := v.MustComplex64Slice() - - replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustComplex64Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - collected := v.CollectComplex64(func(index int, val complex64) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestComplex128(t *testing.T) { - - val := complex128(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128()) - assert.Equal(t, val, New(m).Get("value").MustComplex128()) - assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) - assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) - - assert.Panics(t, func() { - New(m).Get("age").MustComplex128() - }) - -} - -func TestComplex128Slice(t *testing.T) { - - val := complex128(1) - m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) - assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustComplex128Slice() - }) - -} - -func TestIsComplex128(t *testing.T) { - - var v *Value - - v = &Value{data: complex128(1)} - assert.True(t, v.IsComplex128()) - - v = &Value{data: []complex128{complex128(1)}} - assert.True(t, v.IsComplex128Slice()) - -} - -func TestEachComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - count := 0 - replacedVals := make([]complex128, 0) - assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) - -} - -func TestWhereComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - selected := v.WhereComplex128(func(i int, val complex128) bool { - return i%2 == 0 - }).MustComplex128Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - grouped := v.GroupComplex128(func(i int, val complex128) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex128) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - rawArr := v.MustComplex128Slice() - - replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustComplex128Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - collected := v.CollectComplex128(func(index int, val complex128) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go index 7aaef06b1..956a2211d 100644 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go @@ -1,5 +1,10 @@ package objx +import ( + "fmt" + "strconv" +) + // Value provides methods for extracting interface{} data in various // types. type Value struct { @@ -11,3 +16,41 @@ type Value struct { func (v *Value) Data() interface{} { return v.data } + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go deleted file mode 100644 index 0bc65d92c..000000000 --- a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go +++ /dev/null @@ -1 +0,0 @@ -package objx diff --git a/vendor/github.com/xenolf/lego/.gitcookies.enc b/vendor/github.com/xenolf/lego/.gitcookies.enc deleted file mode 100644 index 09c303c94..000000000 Binary files a/vendor/github.com/xenolf/lego/.gitcookies.enc and /dev/null differ diff --git a/vendor/github.com/xenolf/lego/.gitignore b/vendor/github.com/xenolf/lego/.gitignore deleted file mode 100644 index 74d32f0ab..000000000 --- a/vendor/github.com/xenolf/lego/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -lego.exe -lego -.lego -.idea diff --git a/vendor/github.com/xenolf/lego/.travis.yml b/vendor/github.com/xenolf/lego/.travis.yml deleted file mode 100644 index d7a626e23..000000000 --- a/vendor/github.com/xenolf/lego/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go: -- 1.8.x -- 1.9.x -- tip -services: - - memcached -env: - - MEMCACHED_HOSTS=localhost:11211 -install: -- go get -t ./... -script: -- go vet ./... -- go test -v ./... -before_install: -- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_26c593b079d9_key -iv $encrypted_26c593b079d9_iv -in .gitcookies.enc -out .gitcookies -d || true' diff --git a/vendor/github.com/xenolf/lego/CHANGELOG.md b/vendor/github.com/xenolf/lego/CHANGELOG.md deleted file mode 100644 index d71cc23dc..000000000 --- a/vendor/github.com/xenolf/lego/CHANGELOG.md +++ /dev/null @@ -1,154 +0,0 @@ -# Changelog - -## [0.4.1] - 2017-09-26 - -### Added: -- lib: A new DNS provider for OTC. -- lib: The `AWS_HOSTED_ZONE_ID` environment variable for the Route53 DNS provider to directly specify the zone. -- lib: The `RFC2136_TIMEOUT` enviroment variable to make the timeout for the RFC2136 provider configurable. -- lib: The `GCE_SERVICE_ACCOUNT_FILE` environment variable to specify a service account file for the Google Cloud DNS provider. - -### Fixed: -- lib: Fixed an authentication issue with the latest Azure SDK. - -## [0.4.0] - 2017-07-13 - -### Added: -- CLI: The `--http-timeout` switch. This allows for an override of the default client HTTP timeout. -- lib: The `HTTPClient` field. This allows for an override of the default HTTP timeout for library HTTP requests. -- CLI: The `--dns-timeout` switch. This allows for an override of the default DNS timeout for library DNS requests. -- lib: The `DNSTimeout` switch. This allows for an override of the default client DNS timeout. -- lib: The `QueryRegistration` function on `acme.Client`. This performs a POST on the client registration's URI and gets the updated registration info. -- lib: The `DeleteRegistration` function on `acme.Client`. This deletes the registration as currently configured in the client. -- lib: The `ObtainCertificateForCSR` function on `acme.Client`. The function allows to request a certificate for an already existing CSR. -- CLI: The `--csr` switch. Allows to use already existing CSRs for certificate requests on the command line. -- CLI: The `--pem` flag. This will change the certificate output so it outputs a .pem file concatanating the .key and .crt files together. -- CLI: The `--dns-resolvers` flag. Allows for users to override the default DNS servers used for recursive lookup. -- lib: Added a memcached provider for the HTTP challenge. -- CLI: The `--memcached-host` flag. This allows to use memcached for challenge storage. -- CLI: The `--must-staple` flag. This enables OCSP must staple in the generated CSR. -- lib: The library will now honor entries in your resolv.conf. -- lib: Added a field `IssuerCertificate` to the `CertificateResource` struct. -- lib: A new DNS provider for OVH. -- lib: A new DNS provider for DNSMadeEasy. -- lib: A new DNS provider for Linode. -- lib: A new DNS provider for AuroraDNS. -- lib: A new DNS provider for NS1. -- lib: A new DNS provider for Azure DNS. -- lib: A new DNS provider for Rackspace DNS. -- lib: A new DNS provider for Exoscale DNS. -- lib: A new DNS provider for DNSPod. - -### Changed: -- lib: Exported the `PreCheckDNS` field so library users can manage the DNS check in tests. -- lib: The library will now skip challenge solving if a valid Authz already exists. - -### Removed: -- lib: The library will no longer check for auto renewed certificates. This has been removed from the spec and is not supported in Boulder. - -### Fixed: -- lib: Fix a problem with the Route53 provider where it was possible the verification was published to a private zone. -- lib: Loading an account from file should fail if a integral part is nil -- lib: Fix a potential issue where the Dyn provider could resolve to an incorrect zone. -- lib: If a registration encounteres a conflict, the old registration is now recovered. -- CLI: The account.json file no longer has the executable flag set. -- lib: Made the client registration more robust in case of a 403 HTTP response. -- lib: Fixed an issue with zone lookups when they have a CNAME in another zone. -- lib: Fixed the lookup for the authoritative zone for Google Cloud. -- lib: Fixed a race condition in the nonce store. -- lib: The Google Cloud provider now removes old entries before trying to add new ones. -- lib: Fixed a condition where we could stall due to an early error condition. -- lib: Fixed an issue where Authz object could end up in an active state after an error condition. - -## [0.3.1] - 2016-04-19 - -### Added: -- lib: A new DNS provider for Vultr. - -### Fixed: -- lib: DNS Provider for DigitalOcean could not handle subdomains properly. -- lib: handleHTTPError should only try to JSON decode error messages with the right content type. -- lib: The propagation checker for the DNS challenge would not retry on send errors. - - -## [0.3.0] - 2016-03-19 - -### Added: -- CLI: The `--dns` switch. To include the DNS challenge for consideration. When using this switch, all other solvers are disabled. Supported are the following solvers: cloudflare, digitalocean, dnsimple, dyn, gandi, googlecloud, namecheap, route53, rfc2136 and manual. -- CLI: The `--accept-tos` switch. Indicates your acceptance of the Let's Encrypt terms of service without prompting you. -- CLI: The `--webroot` switch. The HTTP-01 challenge may now be completed by dropping a file into a webroot. When using this switch, all other solvers are disabled. -- CLI: The `--key-type` switch. This replaces the `--rsa-key-size` switch and supports the following key types: EC256, EC384, RSA2048, RSA4096 and RSA8192. -- CLI: The `--dnshelp` switch. This displays a more in-depth help topic for DNS solvers. -- CLI: The `--no-bundle` sub switch for the `run` and `renew` commands. When this switch is set, the CLI will not bundle the issuer certificate with your certificate. -- lib: A new type for challenge identifiers `Challenge` -- lib: A new interface for custom challenge providers `acme.ChallengeProvider` -- lib: A new interface for DNS-01 providers to allow for custom timeouts for the validation function `acme.ChallengeProviderTimeout` -- lib: SetChallengeProvider function. Pass a challenge identifier and a Provider to replace the default behaviour of a challenge. -- lib: The DNS-01 challenge has been implemented with modular solvers using the `ChallengeProvider` interface. Included solvers are: cloudflare, digitalocean, dnsimple, gandi, namecheap, route53, rfc2136 and manual. -- lib: The `acme.KeyType` type was added and is used for the configuration of crypto parameters for RSA and EC keys. Valid KeyTypes are: EC256, EC384, RSA2048, RSA4096 and RSA8192. - -### Changed -- lib: ExcludeChallenges now expects to be passed an array of `Challenge` types. -- lib: HTTP-01 now supports custom solvers using the `ChallengeProvider` interface. -- lib: TLS-SNI-01 now supports custom solvers using the `ChallengeProvider` interface. -- lib: The `GetPrivateKey` function in the `acme.User` interface is now expected to return a `crypto.PrivateKey` instead of an `rsa.PrivateKey` for EC compat. -- lib: The `acme.NewClient` function now expects an `acme.KeyType` instead of the keyBits parameter. - -### Removed -- CLI: The `rsa-key-size` switch was removed in favor of `key-type` to support EC keys. - -### Fixed -- lib: Fixed a race condition in HTTP-01 -- lib: Fixed an issue where status codes on ACME challenge responses could lead to no action being taken. -- lib: Fixed a regression when calling the Renew function with a SAN certificate. - -## [0.2.0] - 2016-01-09 - -### Added: -- CLI: The `--exclude` or `-x` switch. To exclude a challenge from being solved. -- CLI: The `--http` switch. To set the listen address and port of HTTP based challenges. Supports `host:port` and `:port` for any interface. -- CLI: The `--tls` switch. To set the listen address and port of TLS based challenges. Supports `host:port` and `:port` for any interface. -- CLI: The `--reuse-key` switch for the `renew` operation. This lets you reuse an existing private key for renewals. -- lib: ExcludeChallenges function. Pass an array of challenge identifiers to exclude them from solving. -- lib: SetHTTPAddress function. Pass a port to set the listen port for HTTP based challenges. -- lib: SetTLSAddress function. Pass a port to set the listen port of TLS based challenges. -- lib: acme.UserAgent variable. Use this to customize the user agent on all requests sent by lego. - -### Changed: -- lib: NewClient does no longer accept the optPort parameter -- lib: ObtainCertificate now returns a SAN certificate if you pass more then one domain. -- lib: GetOCSPForCert now returns the parsed OCSP response instead of just the status. -- lib: ObtainCertificate has a new parameter `privKey crypto.PrivateKey` which lets you reuse an existing private key for new certificates. -- lib: RenewCertificate now expects the PrivateKey property of the CertificateResource to be set only if you want to reuse the key. - -### Removed: -- CLI: The `--port` switch was removed. -- lib: RenewCertificate does no longer offer to also revoke your old certificate. - -### Fixed: -- CLI: Fix logic using the `--days` parameter for renew - -## [0.1.1] - 2015-12-18 - -### Added: -- CLI: Added a way to automate renewal through a cronjob using the --days parameter to renew - -### Changed: -- lib: Improved log output on challenge failures. - -### Fixed: -- CLI: The short parameter for domains would not get accepted -- CLI: The cli did not return proper exit codes on error library errors. -- lib: RenewCertificate did not properly renew SAN certificates. - -### Security -- lib: Fix possible DOS on GetOCSPForCert - -## [0.1.0] - 2015-12-03 -- Initial release - -[0.3.1]: https://github.com/xenolf/lego/compare/v0.3.0...v0.3.1 -[0.3.0]: https://github.com/xenolf/lego/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/xenolf/lego/compare/v0.1.1...v0.2.0 -[0.1.1]: https://github.com/xenolf/lego/compare/v0.1.0...v0.1.1 -[0.1.0]: https://github.com/xenolf/lego/tree/v0.1.0 diff --git a/vendor/github.com/xenolf/lego/CONTRIBUTING.md b/vendor/github.com/xenolf/lego/CONTRIBUTING.md deleted file mode 100644 index 9939a5ab3..000000000 --- a/vendor/github.com/xenolf/lego/CONTRIBUTING.md +++ /dev/null @@ -1,32 +0,0 @@ -# How to contribute to lego - -Contributions in the form of patches and proposals are essential to keep lego great and to make it even better. -To ensure a great and easy experience for everyone, please review the few guidelines in this document. - -## Bug reports - -- Use the issue search to see if the issue has already been reported. -- Also look for closed issues to see if your issue has already been fixed. -- If both of the above do not apply create a new issue and include as much information as possible. - -Bug reports should include all information a person could need to reproduce your problem without the need to -follow up for more information. If possible, provide detailed steps for us to reproduce it, the expected behaviour and the actual behaviour. - -## Feature proposals and requests - -Feature requests are welcome and should be discussed in an issue. -Please keep proposals focused on one thing at a time and be as detailed as possible. -It is up to you to make a strong point about your proposal and convince us of the merits and the added complexity of this feature. - -## Pull requests - -Patches, new features and improvements are a great way to help the project. -Please keep them focused on one thing and do not include unrelated commits. - -All pull requests which alter the behaviour of the program, add new behaviour or somehow alter code in a non-trivial way should **always** include tests. - -If you want to contribute a significant pull request (with a non-trivial workload for you) please **ask first**. We do not want you to spend -a lot of time on something the project's developers might not want to merge into the project. - -**IMPORTANT**: By submitting a patch, you agree to allow the project -owners to license your work under the terms of the [MIT License](LICENSE). diff --git a/vendor/github.com/xenolf/lego/Dockerfile b/vendor/github.com/xenolf/lego/Dockerfile deleted file mode 100644 index 511e403ce..000000000 --- a/vendor/github.com/xenolf/lego/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM alpine:3.6 - -ENV GOPATH /go -ENV LEGO_VERSION tags/v0.4.1 - -RUN apk update && apk add --no-cache --virtual run-dependencies ca-certificates && \ - apk add --no-cache --virtual build-dependencies go git musl-dev && \ - go get -u github.com/xenolf/lego && \ - cd ${GOPATH}/src/github.com/xenolf/lego && \ - git checkout ${LEGO_VERSION} && \ - go build -o /usr/bin/lego . && \ - apk del build-dependencies && \ - rm -rf ${GOPATH} - -ENTRYPOINT [ "/usr/bin/lego" ] diff --git a/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE deleted file mode 100644 index 270cba089..000000000 --- a/vendor/github.com/xenolf/lego/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015-2017 Sebastian Erhart - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md deleted file mode 100644 index 9ec7e1f38..000000000 --- a/vendor/github.com/xenolf/lego/README.md +++ /dev/null @@ -1,273 +0,0 @@ -# lego -Let's Encrypt client and ACME library written in Go - -[![GoDoc](https://godoc.org/github.com/xenolf/lego/acme?status.svg)](https://godoc.org/github.com/xenolf/lego/acme) -[![Build Status](https://travis-ci.org/xenolf/lego.svg?branch=master)](https://travis-ci.org/xenolf/lego) -[![Dev Chat](https://img.shields.io/badge/dev%20chat-gitter-blue.svg?label=dev+chat)](https://gitter.im/xenolf/lego) -[![Beerpay](https://beerpay.io/xenolf/lego/badge.svg)](https://beerpay.io/xenolf/lego) - -#### General -This is a work in progress. Please do *NOT* run this on a production server and please report any bugs you find! - -#### Installation -lego supports both binary installs and install from source. - -To get the binary just download the latest release for your OS/Arch from [the release page](https://github.com/xenolf/lego/releases) -and put the binary somewhere convenient. lego does not assume anything about the location you run it from. - -To install from source, just run -``` -go get -u github.com/xenolf/lego -``` - -To build lego inside a Docker container, just run -``` -docker build -t lego . -``` -##### From the package manager -- [ArchLinux (AUR)](https://aur.archlinux.org/packages/lego-git): -``` -yaourt -S lego-git -``` -#### Features - -- Register with CA -- Obtain certificates, both from scratch or with an existing CSR -- Renew certificates -- Revoke certificates -- Robust implementation of all ACME challenges - - HTTP (http-01) - - TLS with Server Name Indication (tls-sni-01) - - DNS (dns-01) -- SAN certificate support -- Comes with multiple optional [DNS providers](https://github.com/xenolf/lego/tree/master/providers/dns) -- [Custom challenge solvers](https://github.com/xenolf/lego/wiki/Writing-a-Challenge-Solver) -- Certificate bundling -- OCSP helper function - -Please keep in mind that CLI switches and APIs are still subject to change. - -When using the standard `--path` option, all certificates and account configurations are saved to a folder *.lego* in the current working directory. - -#### Sudo -The CLI does not require root permissions but needs to bind to port 80 and 443 for certain challenges. -To run the CLI without sudo, you have four options: - -- Use setcap 'cap_net_bind_service=+ep' /path/to/program -- Pass the `--http` or/and the `--tls` option and specify a custom port to bind to. In this case you have to forward port 80/443 to these custom ports (see [Port Usage](#port-usage)). -- Pass the `--webroot` option and specify the path to your webroot folder. In this case the challenge will be written in a file in `.well-known/acme-challenge/` inside your webroot. -- Pass the `--dns` option and specify a DNS provider. - -#### Port Usage -By default lego assumes it is able to bind to ports 80 and 443 to solve challenges. -If this is not possible in your environment, you can use the `--http` and `--tls` options to instruct -lego to listen on that interface:port for any incoming challenges. - -If you are using this option, make sure you proxy all of the following traffic to these ports. - -HTTP Port: -- All plaintext HTTP requests to port 80 which begin with a request path of `/.well-known/acme-challenge/` for the HTTP challenge. - -TLS Port: -- All TLS handshakes on port 443 for the TLS-SNI challenge. - -This traffic redirection is only needed as long as lego solves challenges. As soon as you have received your certificates you can deactivate the forwarding. - -#### Usage - -``` -NAME: - lego - Let's Encrypt client written in Go - -USAGE: - lego [global options] command [command options] [arguments...] - -VERSION: - 0.4.1 - -COMMANDS: - run Register an account, then create and install a certificate - revoke Revoke a certificate - renew Renew a certificate - dnshelp Shows additional help for the --dns global option - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --domains value, -d value Add a domain to the process. Can be specified multiple times. - --csr value, -c value Certificate signing request filename, if an external CSR is to be used - --server value, -s value CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client. (default: "https://acme-v01.api.letsencrypt.org/directory") - --email value, -m value Email used for registration and recovery contact. - --accept-tos, -a By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service. - --key-type value, -k value Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384 (default: "rsa2048") - --path value Directory to use for storing the data (default: "/.lego") - --exclude value, -x value Explicitly disallow solvers by name from being used. Solvers: "http-01", "tls-sni-01". - --webroot value Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge - --memcached-host value Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts. - --http value Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port - --tls value Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port - --dns value Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage. - --http-timeout value Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds. (default: 0) - --dns-timeout value Set the DNS timeout value to a specific value in seconds. The default is 10 seconds. (default: 0) - --dns-resolvers value Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers. - --pem Generate a .pem file by concatanating the .key and .crt files together. - --help, -h show help - --version, -v print the version -``` - -##### CLI Example - -Assumes the `lego` binary has permission to bind to ports 80 and 443. You can get a pre-built binary from the [releases](https://github.com/xenolf/lego/releases) page. -If your environment does not allow you to bind to these ports, please read [Port Usage](#port-usage). - -Obtain a certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" run -``` - -(Find your certificate in the `.lego` folder of current working directory.) - -To renew the certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" renew -``` - -To renew the certificate only if it's older than 30 days - -```bash -$ lego --email="foo@bar.com" --domains="example.com" renew --days 30 -``` - -Obtain a certificate using the DNS challenge and AWS Route 53: - -```bash -$ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=my_id AWS_SECRET_ACCESS_KEY=my_key lego --email="foo@bar.com" --domains="example.com" --dns="route53" run -``` - -Note that `--dns=foo` implies `--exclude=http-01` and `--exclude=tls-sni-01`. lego will not attempt other challenges if you've told it to use DNS instead. - -Obtain a certificate given a certificate signing request (CSR) generated by something else: - -```bash -$ lego --email="foo@bar.com" --csr=/path/to/csr.pem run -``` - -(lego will infer the domains to be validated based on the contents of the CSR, so make sure the CSR's Common Name and optional SubjectAltNames are set correctly.) - -lego defaults to communicating with the production Let's Encrypt ACME server. If you'd like to test something without issuing real certificates, consider using the staging endpoint instead: - -```bash -$ lego --server=https://acme-staging.api.letsencrypt.org/directory … -``` - -#### DNS Challenge API Details - -##### AWS Route 53 - -The following AWS IAM policy document describes the permissions required for lego to complete the DNS challenge. -Replace `` with the Route 53 zone ID of the domain you are authorizing. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "route53:GetChange", - "route53:ListHostedZonesByName" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:ChangeResourceRecordSets" - ], - "Resource": [ - "arn:aws:route53:::hostedzone/" - ] - } - ] -} -``` - -#### ACME Library Usage - -A valid, but bare-bones example use of the acme package: - -```go -// You'll need a user or account type that implements acme.User -type MyUser struct { - Email string - Registration *acme.RegistrationResource - key crypto.PrivateKey -} -func (u MyUser) GetEmail() string { - return u.Email -} -func (u MyUser) GetRegistration() *acme.RegistrationResource { - return u.Registration -} -func (u MyUser) GetPrivateKey() crypto.PrivateKey { - return u.key -} - -// Create a user. New accounts need an email and private key to start. -const rsaKeySize = 2048 -privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize) -if err != nil { - log.Fatal(err) -} -myUser := MyUser{ - Email: "you@yours.com", - key: privateKey, -} - -// A client facilitates communication with the CA server. This CA URL is -// configured for a local dev instance of Boulder running in Docker in a VM. -client, err := acme.NewClient("http://192.168.99.100:4000/directory", &myUser, acme.RSA2048) -if err != nil { - log.Fatal(err) -} - -// We specify an http port of 5002 and an tls port of 5001 on all interfaces -// because we aren't running as root and can't bind a listener to port 80 and 443 -// (used later when we attempt to pass challenges). Keep in mind that we still -// need to proxy challenge traffic to port 5002 and 5001. -client.SetHTTPAddress(":5002") -client.SetTLSAddress(":5001") - -// New users will need to register -reg, err := client.Register() -if err != nil { - log.Fatal(err) -} -myUser.Registration = reg - -// SAVE THE USER. - -// The client has a URL to the current Let's Encrypt Subscriber -// Agreement. The user will need to agree to it. -err = client.AgreeToTOS() -if err != nil { - log.Fatal(err) -} - -// The acme library takes care of completing the challenges to obtain the certificate(s). -// The domains must resolve to this machine or you have to use the DNS challenge. -bundle := false -certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil, false) -if len(failures) > 0 { - log.Fatal(failures) -} - -// Each certificate comes back with the cert bytes, the bytes of the client's -// private key, and a certificate URL. SAVE THESE TO DISK. -fmt.Printf("%#v\n", certificates) - -// ... all done. -``` diff --git a/vendor/github.com/xenolf/lego/account.go b/vendor/github.com/xenolf/lego/account.go deleted file mode 100644 index 34856e16f..000000000 --- a/vendor/github.com/xenolf/lego/account.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "crypto" - "encoding/json" - "io/ioutil" - "os" - "path" - - "github.com/xenolf/lego/acme" -) - -// Account represents a users local saved credentials -type Account struct { - Email string `json:"email"` - key crypto.PrivateKey - Registration *acme.RegistrationResource `json:"registration"` - - conf *Configuration -} - -// NewAccount creates a new account for an email address -func NewAccount(email string, conf *Configuration) *Account { - accKeysPath := conf.AccountKeysPath(email) - // TODO: move to function in configuration? - accKeyPath := accKeysPath + string(os.PathSeparator) + email + ".key" - if err := checkFolder(accKeysPath); err != nil { - logger().Fatalf("Could not check/create directory for account %s: %v", email, err) - } - - var privKey crypto.PrivateKey - if _, err := os.Stat(accKeyPath); os.IsNotExist(err) { - - logger().Printf("No key found for account %s. Generating a curve P384 EC key.", email) - privKey, err = generatePrivateKey(accKeyPath) - if err != nil { - logger().Fatalf("Could not generate RSA private account key for account %s: %v", email, err) - } - - logger().Printf("Saved key to %s", accKeyPath) - } else { - privKey, err = loadPrivateKey(accKeyPath) - if err != nil { - logger().Fatalf("Could not load RSA private key from file %s: %v", accKeyPath, err) - } - } - - accountFile := path.Join(conf.AccountPath(email), "account.json") - if _, err := os.Stat(accountFile); os.IsNotExist(err) { - return &Account{Email: email, key: privKey, conf: conf} - } - - fileBytes, err := ioutil.ReadFile(accountFile) - if err != nil { - logger().Fatalf("Could not load file for account %s -> %v", email, err) - } - - var acc Account - err = json.Unmarshal(fileBytes, &acc) - if err != nil { - logger().Fatalf("Could not parse file for account %s -> %v", email, err) - } - - acc.key = privKey - acc.conf = conf - - if acc.Registration == nil { - logger().Fatalf("Could not load account for %s. Registration is nil.", email) - } - - if acc.conf == nil { - logger().Fatalf("Could not load account for %s. Configuration is nil.", email) - } - - return &acc -} - -/** Implementation of the acme.User interface **/ - -// GetEmail returns the email address for the account -func (a *Account) GetEmail() string { - return a.Email -} - -// GetPrivateKey returns the private RSA account key. -func (a *Account) GetPrivateKey() crypto.PrivateKey { - return a.key -} - -// GetRegistration returns the server registration -func (a *Account) GetRegistration() *acme.RegistrationResource { - return a.Registration -} - -/** End **/ - -// Save the account to disk -func (a *Account) Save() error { - jsonBytes, err := json.MarshalIndent(a, "", "\t") - if err != nil { - return err - } - - return ioutil.WriteFile( - path.Join(a.conf.AccountPath(a.Email), "account.json"), - jsonBytes, - 0600, - ) -} diff --git a/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/xenolf/lego/acme/challenges.go deleted file mode 100644 index 857900507..000000000 --- a/vendor/github.com/xenolf/lego/acme/challenges.go +++ /dev/null @@ -1,16 +0,0 @@ -package acme - -// Challenge is a string that identifies a particular type and version of ACME challenge. -type Challenge string - -const ( - // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http - // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge - HTTP01 = Challenge("http-01") - // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni - // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge - TLSSNI01 = Challenge("tls-sni-01") - // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns - // Note: DNS01Record returns a DNS record which will fulfill this challenge - DNS01 = Challenge("dns-01") -) diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go deleted file mode 100644 index bcb844371..000000000 --- a/vendor/github.com/xenolf/lego/acme/client.go +++ /dev/null @@ -1,825 +0,0 @@ -// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. -package acme - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "regexp" - "strconv" - "strings" - "time" -) - -var ( - // Logger is an optional custom logger. - Logger *log.Logger -) - -const ( - // maxBodySize is the maximum size of body that we will read. - maxBodySize = 1024 * 1024 - - // overallRequestLimit is the overall number of request per second limited on the - // “new-regâ€, “new-authz†and “new-cert†endpoints. From the documentation the - // limitation is 20 requests per second, but using 20 as value doesn't work but 18 do - overallRequestLimit = 18 -) - -// logf writes a log entry. It uses Logger if not -// nil, otherwise it uses the default log.Logger. -func logf(format string, args ...interface{}) { - if Logger != nil { - Logger.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// User interface is to be implemented by users of this library. -// It is used by the client type to get user specific information. -type User interface { - GetEmail() string - GetRegistration() *RegistrationResource - GetPrivateKey() crypto.PrivateKey -} - -// Interface for all challenge solvers to implement. -type solver interface { - Solve(challenge challenge, domain string) error -} - -type validateFunc func(j *jws, domain, uri string, chlng challenge) error - -// Client is the user-friendy way to ACME -type Client struct { - directory directory - user User - jws *jws - keyType KeyType - solvers map[Challenge]solver -} - -// NewClient creates a new ACME client on behalf of the user. The client will depend on -// the ACME directory located at caDirURL for the rest of its actions. A private -// key of type keyType (see KeyType contants) will be generated when requesting a new -// certificate if one isn't provided. -func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { - privKey := user.GetPrivateKey() - if privKey == nil { - return nil, errors.New("private key was nil") - } - - var dir directory - if _, err := getJSON(caDirURL, &dir); err != nil { - return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) - } - - if dir.NewRegURL == "" { - return nil, errors.New("directory missing new registration URL") - } - if dir.NewAuthzURL == "" { - return nil, errors.New("directory missing new authz URL") - } - if dir.NewCertURL == "" { - return nil, errors.New("directory missing new certificate URL") - } - if dir.RevokeCertURL == "" { - return nil, errors.New("directory missing revoke certificate URL") - } - - jws := &jws{privKey: privKey, directoryURL: caDirURL} - - // REVIEW: best possibility? - // Add all available solvers with the right index as per ACME - // spec to this map. Otherwise they won`t be found. - solvers := make(map[Challenge]solver) - solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} - solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} - - return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil -} - -// SetChallengeProvider specifies a custom provider p that can solve the given challenge type. -func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { - switch challenge { - case HTTP01: - c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} - case TLSSNI01: - c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} - case DNS01: - c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p} - default: - return fmt.Errorf("Unknown challenge %v", challenge) - } - return nil -} - -// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. -// If this option is not used, the default port 80 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -// -// NOTE: This REPLACES any custom HTTP provider previously set by calling -// c.SetChallengeProvider with the default HTTP challenge provider. -func (c *Client) SetHTTPAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[HTTP01]; ok { - chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) - } - - return nil -} - -// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. -// If this option is not used, the default port 443 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -// -// NOTE: This REPLACES any custom TLS-SNI provider previously set by calling -// c.SetChallengeProvider with the default TLS-SNI challenge provider. -func (c *Client) SetTLSAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[TLSSNI01]; ok { - chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) - } - return nil -} - -// ExcludeChallenges explicitly removes challenges from the pool for solving. -func (c *Client) ExcludeChallenges(challenges []Challenge) { - // Loop through all challenges and delete the requested one if found. - for _, challenge := range challenges { - delete(c.solvers, challenge) - } -} - -// Register the current account to the ACME server. -func (c *Client) Register() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot register a nil client or user") - } - logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "new-reg", - } - if c.user.GetEmail() != "" { - regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} - } else { - regMsg.Contact = []string{} - } - - var serverReg Registration - var regURI string - hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) - if err != nil { - remoteErr, ok := err.(RemoteError) - if ok && remoteErr.StatusCode == 409 { - regURI = hdr.Get("Location") - regMsg = registrationMessage{ - Resource: "reg", - } - if hdr, err = postJSON(c.jws, regURI, regMsg, &serverReg); err != nil { - return nil, err - } - } else { - return nil, err - } - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - - if regURI == "" { - regURI = hdr.Get("Location") - } - reg.URI = regURI - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: The server did not return 'next' link to proceed") - } - - return reg, nil -} - -// DeleteRegistration deletes the client's user registration from the ACME -// server. -func (c *Client) DeleteRegistration() error { - if c == nil || c.user == nil { - return errors.New("acme: cannot unregister a nil client or user") - } - logf("[INFO] acme: Deleting account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "reg", - Delete: true, - } - - _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil) - if err != nil { - return err - } - - return nil -} - -// QueryRegistration runs a POST request on the client's registration and -// returns the result. -// -// This is similar to the Register function, but acting on an existing -// registration link and resource. -func (c *Client) QueryRegistration() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot query the registration of a nil client or user") - } - // Log the URL here instead of the email as the email may not be set - logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI) - - regMsg := registrationMessage{ - Resource: "reg", - } - - var serverReg Registration - hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg) - if err != nil { - return nil, err - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - // Location: header is not returned so this needs to be populated off of - // existing URI - reg.URI = c.user.GetRegistration().URI - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: No new-authz link in response to registration query") - } - - return reg, nil -} - -// AgreeToTOS updates the Client registration and sends the agreement to -// the server. -func (c *Client) AgreeToTOS() error { - reg := c.user.GetRegistration() - - reg.Body.Agreement = c.user.GetRegistration().TosURL - reg.Body.Resource = "reg" - _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) - return err -} - -// ObtainCertificateForCSR tries to obtain a certificate matching the CSR passed into it. -// The domains are inferred from the CommonName and SubjectAltNames, if any. The private key -// for this CSR is not required. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificateForCSR(csr x509.CertificateRequest, bundle bool) (CertificateResource, map[string]error) { - // figure out what domains it concerns - // start with the common name - domains := []string{csr.Subject.CommonName} - - // loop over the SubjectAltName DNS names -DNSNames: - for _, sanName := range csr.DNSNames { - for _, existingName := range domains { - if existingName == sanName { - // duplicate; skip this name - continue DNSNames - } - } - - // name is unique - domains = append(domains, sanName) - } - - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - for _, auth := range challenges { - c.disableAuthz(auth) - } - - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificateForCsr(challenges, bundle, csr.Raw, nil) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - // Add the CSR to the certificate so that it can be used for renewals. - cert.CSR = pemEncode(&csr) - - return cert, failures -} - -// ObtainCertificate tries to obtain a single certificate using all domains passed into it. -// The first domain in domains is used for the CommonName field of the certificate, all other -// domains are added using the Subject Alternate Names extension. A new private key is generated -// for every invocation of this function. If you do not want that you can supply your own private key -// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, map[string]error) { - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - for _, auth := range challenges { - c.disableAuthz(auth) - } - - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificate(challenges, bundle, privKey, mustStaple) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - return cert, failures -} - -// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. -func (c *Client) RevokeCertificate(certificate []byte) error { - certificates, err := parsePEMBundle(certificate) - if err != nil { - return err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return fmt.Errorf("Certificate bundle starts with a CA certificate") - } - - encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) - - _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) - return err -} - -// RenewCertificate takes a CertificateResource and tries to renew the certificate. -// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. -// Please be aware that this function will return a new certificate in ANY case that is not an error. -// If the server does not provide us with a new cert on a GET request to the CertURL -// this function will start a new-cert flow where a new certificate gets generated. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. -func (c *Client) RenewCertificate(cert CertificateResource, bundle, mustStaple bool) (CertificateResource, error) { - // Input certificate is PEM encoded. Decode it here as we may need the decoded - // cert later on in the renewal process. The input may be a bundle or a single certificate. - certificates, err := parsePEMBundle(cert.Certificate) - if err != nil { - return CertificateResource{}, err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) - } - - // This is just meant to be informal for the user. - timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) - logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) - - // We always need to request a new certificate to renew. - // Start by checking to see if the certificate was based off a CSR, and - // use that if it's defined. - if len(cert.CSR) > 0 { - csr, err := pemDecodeTox509CSR(cert.CSR) - if err != nil { - return CertificateResource{}, err - } - newCert, failures := c.ObtainCertificateForCSR(*csr, bundle) - return newCert, failures[cert.Domain] - } - - var privKey crypto.PrivateKey - if cert.PrivateKey != nil { - privKey, err = parsePEMPrivateKey(cert.PrivateKey) - if err != nil { - return CertificateResource{}, err - } - } - - var domains []string - var failures map[string]error - // check for SAN certificate - if len(x509Cert.DNSNames) > 1 { - domains = append(domains, x509Cert.Subject.CommonName) - for _, sanDomain := range x509Cert.DNSNames { - if sanDomain == x509Cert.Subject.CommonName { - continue - } - domains = append(domains, sanDomain) - } - } else { - domains = append(domains, x509Cert.Subject.CommonName) - } - - newCert, failures := c.ObtainCertificate(domains, bundle, privKey, mustStaple) - return newCert, failures[cert.Domain] -} - -// Looks through the challenge combinations to find a solvable match. -// Then solves the challenges in series and returns. -func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { - // loop through the resources, basically through the domains. - failures := make(map[string]error) - for _, authz := range challenges { - if authz.Body.Status == "valid" { - // Boulder might recycle recent validated authz (see issue #267) - logf("[INFO][%s] acme: Authorization already valid; skipping challenge", authz.Domain) - continue - } - // no solvers - no solving - if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { - for i, solver := range solvers { - // TODO: do not immediately fail if one domain fails to validate. - err := solver.Solve(authz.Body.Challenges[i], authz.Domain) - if err != nil { - c.disableAuthz(authz) - failures[authz.Domain] = err - } - } - } else { - c.disableAuthz(authz) - failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) - } - } - - return failures -} - -// Checks all combinations from the server and returns an array of -// solvers which should get executed in series. -func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { - for _, combination := range auth.Combinations { - solvers := make(map[int]solver) - for _, idx := range combination { - if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { - solvers[idx] = solver - } else { - logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) - } - } - - // If we can solve the whole combination, return the solvers - if len(solvers) == len(combination) { - return solvers - } - } - return nil -} - -// Get the challenges needed to proof our identifier to the ACME server. -func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { - resc, errc := make(chan authorizationResource), make(chan domainError) - - delay := time.Second / overallRequestLimit - - for _, domain := range domains { - time.Sleep(delay) - - go func(domain string) { - authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} - var authz authorization - hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) - if err != nil { - errc <- domainError{Domain: domain, Error: err} - return - } - - links := parseLinks(hdr["Link"]) - if links["next"] == "" { - logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) - errc <- domainError{Domain: domain, Error: errors.New("Server did not provide next link to proceed")} - return - } - - resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} - }(domain) - } - - responses := make(map[string]authorizationResource) - failures := make(map[string]error) - for i := 0; i < len(domains); i++ { - select { - case res := <-resc: - responses[res.Domain] = res - case err := <-errc: - failures[err.Domain] = err.Error - } - } - - challenges := make([]authorizationResource, 0, len(responses)) - for _, domain := range domains { - if challenge, ok := responses[domain]; ok { - challenges = append(challenges, challenge) - } - } - - logAuthz(challenges) - - close(resc) - close(errc) - - return challenges, failures -} - -func logAuthz(authz []authorizationResource) { - for _, auth := range authz { - logf("[INFO][%s] AuthURL: %s", auth.Domain, auth.AuthURL) - } -} - -// cleanAuthz loops through the passed in slice and disables any auths which are not "valid" -func (c *Client) disableAuthz(auth authorizationResource) error { - var disabledAuth authorization - _, err := postJSON(c.jws, auth.AuthURL, deactivateAuthMessage{Resource: "authz", Status: "deactivated"}, &disabledAuth) - return err -} - -func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, error) { - if len(authz) == 0 { - return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") - } - - var err error - if privKey == nil { - privKey, err = generatePrivateKey(c.keyType) - if err != nil { - return CertificateResource{}, err - } - } - - // determine certificate name(s) based on the authorization resources - commonName := authz[0] - var san []string - for _, auth := range authz[1:] { - san = append(san, auth.Domain) - } - - // TODO: should the CSR be customizable? - csr, err := generateCsr(privKey, commonName.Domain, san, mustStaple) - if err != nil { - return CertificateResource{}, err - } - - return c.requestCertificateForCsr(authz, bundle, csr, pemEncode(privKey)) -} - -func (c *Client) requestCertificateForCsr(authz []authorizationResource, bundle bool, csr []byte, privateKeyPem []byte) (CertificateResource, error) { - commonName := authz[0] - - var authURLs []string - for _, auth := range authz[1:] { - authURLs = append(authURLs, auth.AuthURL) - } - - csrString := base64.URLEncoding.EncodeToString(csr) - jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) - if err != nil { - return CertificateResource{}, err - } - - resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) - if err != nil { - return CertificateResource{}, err - } - - certRes := CertificateResource{ - Domain: commonName.Domain, - CertURL: resp.Header.Get("Location"), - PrivateKey: privateKeyPem, - } - - maxChecks := 1000 - for i := 0; i < maxChecks; i++ { - done, err := c.checkCertResponse(resp, &certRes, bundle) - resp.Body.Close() - if err != nil { - return CertificateResource{}, err - } - if done { - break - } - if i == maxChecks-1 { - return CertificateResource{}, fmt.Errorf("polled for certificate %d times; giving up", i) - } - resp, err = httpGet(certRes.CertURL) - if err != nil { - return CertificateResource{}, err - } - } - - return certRes, nil -} - -// checkCertResponse checks resp to see if a certificate is contained in the -// response, and if so, loads it into certRes and returns true. If the cert -// is not yet ready, it returns false. This function honors the waiting period -// required by the Retry-After header of the response, if specified. This -// function may read from resp.Body but does NOT close it. The certRes input -// should already have the Domain (common name) field populated. If bundle is -// true, the certificate will be bundled with the issuer's cert. -func (c *Client) checkCertResponse(resp *http.Response, certRes *CertificateResource, bundle bool) (bool, error) { - switch resp.StatusCode { - case 201, 202: - cert, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return false, err - } - - // The server returns a body with a length of zero if the - // certificate was not ready at the time this request completed. - // Otherwise the body is the certificate. - if len(cert) > 0 { - certRes.CertStableURL = resp.Header.Get("Content-Location") - certRes.AccountRef = c.user.GetRegistration().URI - - issuedCert := pemEncode(derCertificateBytes(cert)) - - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", certRes.Domain, err) - } else { - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - - // If bundle is true, we want to return a certificate bundle. - // To do this, we append the issuer cert to the issued cert. - if bundle { - issuedCert = append(issuedCert, issuerCert...) - } - } - - certRes.Certificate = issuedCert - certRes.IssuerCertificate = issuerCert - logf("[INFO][%s] Server responded with a certificate.", certRes.Domain) - return true, nil - } - - // The certificate was granted but is not yet issued. - // Check retry-after and loop. - ra := resp.Header.Get("Retry-After") - retryAfter, err := strconv.Atoi(ra) - if err != nil { - return false, err - } - - logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", certRes.Domain, retryAfter) - time.Sleep(time.Duration(retryAfter) * time.Second) - - return false, nil - default: - return false, handleHTTPError(resp) - } -} - -// getIssuerCertificate requests the issuer certificate -func (c *Client) getIssuerCertificate(url string) ([]byte, error) { - logf("[INFO] acme: Requesting issuer cert from %s", url) - resp, err := httpGet(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return nil, err - } - - _, err = x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, err - } - - return issuerBytes, err -} - -func parseLinks(links []string) map[string]string { - aBrkt := regexp.MustCompile("[<>]") - slver := regexp.MustCompile("(.+) *= *\"(.+)\"") - linkMap := make(map[string]string) - - for _, link := range links { - - link = aBrkt.ReplaceAllString(link, "") - parts := strings.Split(link, ";") - - matches := slver.FindStringSubmatch(parts[1]) - if len(matches) > 0 { - linkMap[matches[2]] = parts[0] - } - } - - return linkMap -} - -// validate makes the ACME server start validating a -// challenge response, only returning once it is done. -func validate(j *jws, domain, uri string, chlng challenge) error { - var challengeResponse challenge - - hdr, err := postJSON(j, uri, chlng, &challengeResponse) - if err != nil { - return err - } - - // After the path is sent, the ACME server will access our server. - // Repeatedly check the server for an updated status on our request. - for { - switch challengeResponse.Status { - case "valid": - logf("[INFO][%s] The server validated our request", domain) - return nil - case "pending": - break - case "invalid": - return handleChallengeError(challengeResponse) - default: - return errors.New("The server returned an unexpected state.") - } - - ra, err := strconv.Atoi(hdr.Get("Retry-After")) - if err != nil { - // The ACME server MUST return a Retry-After. - // If it doesn't, we'll just poll hard. - ra = 1 - } - time.Sleep(time.Duration(ra) * time.Second) - - hdr, err = getJSON(uri, &challengeResponse) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/xenolf/lego/acme/client_test.go deleted file mode 100644 index b18334c8a..000000000 --- a/vendor/github.com/xenolf/lego/acme/client_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package acme - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -func TestNewClient(t *testing.T) { - keyBits := 32 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - if client.jws == nil { - t.Fatalf("Expected client.jws to not be nil") - } - if expected, actual := key, client.jws.privKey; actual != expected { - t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual) - } - - if client.keyType != keyType { - t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType) - } - - if expected, actual := 2, len(client.solvers); actual != expected { - t.Fatalf("Expected %d solver(s), got %d", expected, actual) - } -} - -func TestClientOptPort(t *testing.T) { - keyBits := 32 // small value keeps test fast - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - optPort := "1234" - optHost := "" - client, err := NewClient(ts.URL, user, RSA2048) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - httpSolver, ok := client.solvers[HTTP01].(*httpChallenge) - if !ok { - t.Fatal("Expected http-01 solver to be httpChallenge type") - } - if httpSolver.jws != client.jws { - t.Error("Expected http-01 to have same jws as client") - } - if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort { - t.Errorf("Expected http-01 to have port %s but was %s", optPort, got) - } - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - - httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge) - if !ok { - t.Fatal("Expected tls-sni-01 solver to be httpChallenge type") - } - if httpsSolver.jws != client.jws { - t.Error("Expected tls-sni-01 to have same jws as client") - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got) - } - - // test setting different host - optHost = "127.0.0.1" - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } -} - -func TestNotHoldingLockWhileMakingHTTPRequests(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(250 * time.Millisecond) - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - writeJSONResponse(w, &challenge{Type: "http-01", Status: "Valid", URI: "http://example.com/", Token: "token"}) - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - ch := make(chan bool) - resultCh := make(chan bool) - go func() { - j.Nonce() - ch <- true - }() - go func() { - j.Nonce() - ch <- true - }() - go func() { - <-ch - <-ch - resultCh <- true - }() - select { - case <-resultCh: - case <-time.After(400 * time.Millisecond): - t.Fatal("JWS is probably holding a lock while making HTTP request") - } -} - -func TestValidate(t *testing.T) { - var statuses []string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Minimal stub ACME server for validation. - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - switch r.Method { - case "HEAD": - case "POST": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - case "GET": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - default: - http.Error(w, r.Method, http.StatusMethodNotAllowed) - } - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - - tsts := []struct { - name string - statuses []string - want string - }{ - {"POST-unexpected", []string{"weird"}, "unexpected"}, - {"POST-valid", []string{"valid"}, ""}, - {"POST-invalid", []string{"invalid"}, "Error Detail"}, - {"GET-unexpected", []string{"pending", "weird"}, "unexpected"}, - {"GET-valid", []string{"pending", "valid"}, ""}, - {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"}, - } - - for _, tst := range tsts { - statuses = tst.statuses - if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } else if err != nil && !strings.Contains(err.Error(), tst.want) { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } - } -} - -func TestGetChallenges(t *testing.T) { - var ts *httptest.Server - ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET", "HEAD": - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - writeJSONResponse(w, directory{NewAuthzURL: ts.URL, NewCertURL: ts.URL, NewRegURL: ts.URL, RevokeCertURL: ts.URL}) - case "POST": - writeJSONResponse(w, authorization{}) - } - })) - defer ts.Close() - - keyBits := 512 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: &RegistrationResource{NewAuthzURL: ts.URL}, - privatekey: key, - } - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - _, failures := client.getChallenges([]string{"example.com"}) - if failures["example.com"] == nil { - t.Fatal("Expecting \"Server did not provide next link to proceed\" error, got nil") - } -} - -// writeJSONResponse marshals the body as JSON and writes it to the response. -func writeJSONResponse(w http.ResponseWriter, body interface{}) { - bs, err := json.Marshal(body) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(bs); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -// stubValidate is like validate, except it does nothing. -func stubValidate(j *jws, domain, uri string, chlng challenge) error { - return nil -} - -type mockUser struct { - email string - regres *RegistrationResource - privatekey *rsa.PrivateKey -} - -func (u mockUser) GetEmail() string { return u.email } -func (u mockUser) GetRegistration() *RegistrationResource { return u.regres } -func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey } diff --git a/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go deleted file mode 100644 index fa868a90d..000000000 --- a/vendor/github.com/xenolf/lego/acme/crypto.go +++ /dev/null @@ -1,347 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "time" - - "encoding/asn1" - - "golang.org/x/crypto/ocsp" -) - -// KeyType represents the key algo as well as the key size or curve to use. -type KeyType string -type derCertificateBytes []byte - -// Constants for all key types we support. -const ( - EC256 = KeyType("P256") - EC384 = KeyType("P384") - RSA2048 = KeyType("2048") - RSA4096 = KeyType("4096") - RSA8192 = KeyType("8192") -) - -const ( - // OCSPGood means that the certificate is valid. - OCSPGood = ocsp.Good - // OCSPRevoked means that the certificate has been deliberately revoked. - OCSPRevoked = ocsp.Revoked - // OCSPUnknown means that the OCSP responder doesn't know about the certificate. - OCSPUnknown = ocsp.Unknown - // OCSPServerFailed means that the OCSP responder failed to process the request. - OCSPServerFailed = ocsp.ServerFailed -) - -// Constants for OCSP must staple -var ( - tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} - ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05} -) - -// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { - certificates, err := parsePEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, errors.New("no OCSP server specified in cert") - } - if len(certificates) == 1 { - // TODO: build fallback. If this fails, check the remaining array entries. - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, errors.New("no issuing certificate URL") - } - - resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, err - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, err - } - - // Insert it into the slice on position 0 - // We want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - issuerCert := certificates[1] - - // Finally kick off the OCSP request. - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, err - } - - reader := bytes.NewReader(ocspReq) - req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) - if err != nil { - return nil, nil, err - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, err - } - - return ocspResBytes, ocspRes, nil -} - -func getKeyAuthorization(token string, key interface{}) (string, error) { - var publicKey crypto.PublicKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - publicKey = k.Public() - case *rsa.PrivateKey: - publicKey = k.Public() - } - - // Generate the Key Authorization for the challenge - jwk := keyAsJWK(publicKey) - if jwk == nil { - return "", errors.New("Could not generate JWK from key.") - } - thumbBytes, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - // unpad the base64URL - keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) - index := strings.Index(keyThumb, "=") - if index != -1 { - keyThumb = keyThumb[:index] - } - - return token + "." + keyThumb, nil -} - -// parsePEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - - if len(certificates) == 0 { - return nil, errors.New("No certificates were found while parsing the bundle.") - } - - return certificates, nil -} - -func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(key) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - default: - return nil, errors.New("Unknown PEM header value") - } -} - -func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { - - switch keyType { - case EC256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case EC384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - - return nil, fmt.Errorf("Invalid KeyType: %s", keyType) -} - -func generateCsr(privateKey crypto.PrivateKey, domain string, san []string, mustStaple bool) ([]byte, error) { - template := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: domain, - }, - } - - if len(san) > 0 { - template.DNSNames = san - } - - if mustStaple { - template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ - Id: tlsFeatureExtensionOID, - Value: ocspMustStapleFeature, - }) - } - - return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) -} - -func pemEncode(data interface{}) []byte { - var pemBlock *pem.Block - switch key := data.(type) { - case *ecdsa.PrivateKey: - keyBytes, _ := x509.MarshalECPrivateKey(key) - pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - case *rsa.PrivateKey: - pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} - break - case *x509.CertificateRequest: - pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw} - break - case derCertificateBytes: - pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} - } - - return pem.EncodeToMemory(pemBlock) -} - -func pemDecode(data []byte) (*pem.Block, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") - } - - return pemBlock, nil -} - -func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - return x509.ParseCertificate(pemBlock.Bytes) -} - -func pemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - if pemBlock.Type != "CERTIFICATE REQUEST" { - return nil, fmt.Errorf("PEM block is not a certificate request") - } - - return x509.ParseCertificateRequest(pemBlock.Bytes) -} - -// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. -// The certificate has to be PEM encoded. Any other encodings like DER will fail. -func GetPEMCertExpiration(cert []byte) (time.Time, error) { - pemBlock, err := pemDecode(cert) - if pemBlock == nil { - return time.Time{}, err - } - - return getCertExpiration(pemBlock.Bytes) -} - -// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. -func getCertExpiration(cert []byte) (time.Time, error) { - pCert, err := x509.ParseCertificate(cert) - if err != nil { - return time.Time{}, err - } - - return pCert.NotAfter, nil -} - -func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { - derBytes, err := generateDerCert(privKey, time.Time{}, domain) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: "ACME Challenge TEMP", - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) -} - -func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { - return http.MaxBytesReader(nil, rd, numBytes) -} diff --git a/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/xenolf/lego/acme/crypto_test.go deleted file mode 100644 index 6f43835fb..000000000 --- a/vendor/github.com/xenolf/lego/acme/crypto_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package acme - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "testing" - "time" -) - -func TestGeneratePrivateKey(t *testing.T) { - key, err := generatePrivateKey(RSA2048) - if err != nil { - t.Error("Error generating private key:", err) - } - if key == nil { - t.Error("Expected key to not be nil, but it was") - } -} - -func TestGenerateCSR(t *testing.T) { - key, err := rsa.GenerateKey(rand.Reader, 512) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - csr, err := generateCsr(key, "fizz.buzz", nil, true) - if err != nil { - t.Error("Error generating CSR:", err) - } - if csr == nil || len(csr) == 0 { - t.Error("Expected CSR with data, but it was nil or length 0") - } -} - -func TestPEMEncode(t *testing.T) { - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - reader := MockRandReader{b: buf} - key, err := rsa.GenerateKey(reader, 32) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - data := pemEncode(key) - - if data == nil { - t.Fatal("Expected result to not be nil, but it was") - } - if len(data) != 127 { - t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data)) - } -} - -func TestPEMCertExpiration(t *testing.T) { - privKey, err := generatePrivateKey(RSA2048) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - expiration := time.Now().Add(365) - expiration = expiration.Round(time.Second) - certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com") - if err != nil { - t.Fatal("Error generating cert:", err) - } - - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - // Some random string should return an error. - if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil { - t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime) - } - - // A DER encoded certificate should return an error. - if _, err := GetPEMCertExpiration(certBytes); err == nil { - t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.") - } - - // A PEM encoded certificate should work ok. - pemCert := pemEncode(derCertificateBytes(certBytes)) - if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) { - t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err) - } -} - -type MockRandReader struct { - b *bytes.Buffer -} - -func (r MockRandReader) Read(p []byte) (int, error) { - return r.b.Read(p) -} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go deleted file mode 100644 index d6844dcd4..000000000 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge.go +++ /dev/null @@ -1,309 +0,0 @@ -package acme - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "log" - "net" - "strings" - "time" - - "github.com/miekg/dns" -) - -type preCheckDNSFunc func(fqdn, value string) (bool, error) - -var ( - // PreCheckDNS checks DNS propagation before notifying ACME that - // the DNS challenge is ready. - PreCheckDNS preCheckDNSFunc = checkDNSPropagation - fqdnToZone = map[string]string{} -) - -const defaultResolvConf = "/etc/resolv.conf" - -var defaultNameservers = []string{ - "google-public-dns-a.google.com:53", - "google-public-dns-b.google.com:53", -} - -var RecursiveNameservers = getNameservers(defaultResolvConf, defaultNameservers) - -// DNSTimeout is used to override the default DNS timeout of 10 seconds. -var DNSTimeout = 10 * time.Second - -// getNameservers attempts to get systems nameservers before falling back to the defaults -func getNameservers(path string, defaults []string) []string { - config, err := dns.ClientConfigFromFile(path) - if err != nil || len(config.Servers) == 0 { - return defaults - } - - systemNameservers := []string{} - for _, server := range config.Servers { - // ensure all servers have a port number - if _, _, err := net.SplitHostPort(server); err != nil { - systemNameservers = append(systemNameservers, net.JoinHostPort(server, "53")) - } else { - systemNameservers = append(systemNameservers, server) - } - } - return systemNameservers -} - -// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge -func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { - keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) - // base64URL encoding without padding - keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) - value = strings.TrimRight(keyAuthSha, "=") - ttl = 120 - fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) - return -} - -// dnsChallenge implements the dns-01 challenge according to ACME 7.5 -type dnsChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (s *dnsChallenge) Solve(chlng challenge, domain string) error { - logf("[INFO][%s] acme: Trying to solve DNS-01", domain) - - if s.provider == nil { - return errors.New("No DNS Provider configured") - } - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("Error presenting token: %s", err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("Error cleaning up %s: %v ", domain, err) - } - }() - - fqdn, value, _ := DNS01Record(domain, keyAuth) - - logf("[INFO][%s] Checking DNS record propagation using %+v", domain, RecursiveNameservers) - - var timeout, interval time.Duration - switch provider := s.provider.(type) { - case ChallengeProviderTimeout: - timeout, interval = provider.Timeout() - default: - timeout, interval = 60*time.Second, 2*time.Second - } - - err = WaitFor(timeout, interval, func() (bool, error) { - return PreCheckDNS(fqdn, value) - }) - if err != nil { - return err - } - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. -func checkDNSPropagation(fqdn, value string) (bool, error) { - // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true) - if err != nil { - return false, err - } - if r.Rcode == dns.RcodeSuccess { - // If we see a CNAME here then use the alias - for _, rr := range r.Answer { - if cn, ok := rr.(*dns.CNAME); ok { - if cn.Hdr.Name == fqdn { - fqdn = cn.Target - break - } - } - } - } - - authoritativeNss, err := lookupNameservers(fqdn) - if err != nil { - return false, err - } - - return checkAuthoritativeNss(fqdn, value, authoritativeNss) -} - -// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. -func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { - for _, ns := range nameservers { - r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) - if err != nil { - return false, err - } - - if r.Rcode != dns.RcodeSuccess { - return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) - } - - var found bool - for _, rr := range r.Answer { - if txt, ok := rr.(*dns.TXT); ok { - if strings.Join(txt.Txt, "") == value { - found = true - break - } - } - } - - if !found { - return false, fmt.Errorf("NS %s did not return the expected TXT record", ns) - } - } - - return true, nil -} - -// dnsQuery will query a nameserver, iterating through the supplied servers as it retries -// The nameserver should include a port, to facilitate testing where we talk to a mock dns server. -func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) { - m := new(dns.Msg) - m.SetQuestion(fqdn, rtype) - m.SetEdns0(4096, false) - - if !recursive { - m.RecursionDesired = false - } - - // Will retry the request based on the number of servers (n+1) - for i := 1; i <= len(nameservers)+1; i++ { - ns := nameservers[i%len(nameservers)] - udp := &dns.Client{Net: "udp", Timeout: DNSTimeout} - in, _, err = udp.Exchange(m, ns) - - if err == dns.ErrTruncated { - tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} - // If the TCP request succeeds, the err will reset to nil - in, _, err = tcp.Exchange(m, ns) - } - - if err == nil { - break - } - } - return -} - -// lookupNameservers returns the authoritative nameservers for the given fqdn. -func lookupNameservers(fqdn string) ([]string, error) { - var authoritativeNss []string - - zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return nil, fmt.Errorf("Could not determine the zone: %v", err) - } - - r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true) - if err != nil { - return nil, err - } - - for _, rr := range r.Answer { - if ns, ok := rr.(*dns.NS); ok { - authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) - } - } - - if len(authoritativeNss) > 0 { - return authoritativeNss, nil - } - return nil, fmt.Errorf("Could not determine authoritative nameservers") -} - -// FindZoneByFqdn determines the zone apex for the given fqdn by recursing up the -// domain labels until the nameserver returns a SOA record in the answer section. -func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { - // Do we have it cached? - if zone, ok := fqdnToZone[fqdn]; ok { - return zone, nil - } - - labelIndexes := dns.Split(fqdn) - for _, index := range labelIndexes { - domain := fqdn[index:] - - in, err := dnsQuery(domain, dns.TypeSOA, nameservers, true) - if err != nil { - return "", err - } - - // Any response code other than NOERROR and NXDOMAIN is treated as error - if in.Rcode != dns.RcodeNameError && in.Rcode != dns.RcodeSuccess { - return "", fmt.Errorf("Unexpected response code '%s' for %s", - dns.RcodeToString[in.Rcode], domain) - } - - // Check if we got a SOA RR in the answer section - if in.Rcode == dns.RcodeSuccess { - - // CNAME records cannot/should not exist at the root of a zone. - // So we skip a domain when a CNAME is found. - if dnsMsgContainsCNAME(in) { - continue - } - - for _, ans := range in.Answer { - if soa, ok := ans.(*dns.SOA); ok { - zone := soa.Hdr.Name - fqdnToZone[fqdn] = zone - return zone, nil - } - } - } - } - - return "", fmt.Errorf("Could not find the start of authority") -} - -// dnsMsgContainsCNAME checks for a CNAME answer in msg -func dnsMsgContainsCNAME(msg *dns.Msg) bool { - for _, ans := range msg.Answer { - if _, ok := ans.(*dns.CNAME); ok { - return true - } - } - return false -} - -// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. -func ClearFqdnCache() { - fqdnToZone = map[string]string{} -} - -// ToFqdn converts the name into a fqdn appending a trailing dot. -func ToFqdn(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } - return name + "." -} - -// UnFqdn converts the fqdn into a name removing the trailing dot. -func UnFqdn(name string) string { - n := len(name) - if n != 0 && name[n-1] == '.' { - return name[:n-1] - } - return name -} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go deleted file mode 100644 index 240384e60..000000000 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go +++ /dev/null @@ -1,53 +0,0 @@ -package acme - -import ( - "bufio" - "fmt" - "os" -) - -const ( - dnsTemplate = "%s %d IN TXT \"%s\"" -) - -// DNSProviderManual is an implementation of the ChallengeProvider interface -type DNSProviderManual struct{} - -// NewDNSProviderManual returns a DNSProviderManual instance. -func NewDNSProviderManual() (*DNSProviderManual, error) { - return &DNSProviderManual{}, nil -} - -// Present prints instructions for manually creating the TXT record -func (*DNSProviderManual) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - logf("[INFO] acme: Press 'Enter' when you are done") - - reader := bufio.NewReader(os.Stdin) - _, _ = reader.ReadString('\n') - return nil -} - -// CleanUp prints instructions for manually removing the TXT record -func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { - fqdn, _, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - return nil -} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go deleted file mode 100644 index 117ac3038..000000000 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package acme - -import ( - "bufio" - "crypto/rand" - "crypto/rsa" - "net/http" - "net/http/httptest" - "os" - "reflect" - "sort" - "strings" - "testing" - "time" -) - -var lookupNameserversTestsOK = []struct { - fqdn string - nss []string -}{ - {"books.google.com.ng.", - []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."}, - }, - {"www.google.com.", - []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."}, - }, - {"physics.georgetown.edu.", - []string{"ns1.georgetown.edu.", "ns2.georgetown.edu.", "ns3.georgetown.edu."}, - }, -} - -var lookupNameserversTestsErr = []struct { - fqdn string - error string -}{ - // invalid tld - {"_null.n0n0.", - "Could not determine the zone", - }, -} - -var findZoneByFqdnTests = []struct { - fqdn string - zone string -}{ - {"mail.google.com.", "google.com."}, // domain is a CNAME - {"foo.google.com.", "google.com."}, // domain is a non-existent subdomain - {"example.com.ac.", "ac."}, // domain is a eTLD - {"cross-zone-example.assets.sh.", "assets.sh."}, // domain is a cross-zone CNAME -} - -var checkAuthoritativeNssTests = []struct { - fqdn, value string - ns []string - ok bool -}{ - // TXT RR w/ expected value - {"8.8.8.8.asn.routeviews.org.", "151698.8.8.024", []string{"asnums.routeviews.org."}, - true, - }, - // No TXT RR - {"ns1.google.com.", "", []string{"ns2.google.com."}, - false, - }, -} - -var checkAuthoritativeNssTestsErr = []struct { - fqdn, value string - ns []string - error string -}{ - // TXT RR /w unexpected value - {"8.8.8.8.asn.routeviews.org.", "fe01=", []string{"asnums.routeviews.org."}, - "did not return the expected TXT record", - }, - // No TXT RR - {"ns1.google.com.", "fe01=", []string{"ns2.google.com."}, - "did not return the expected TXT record", - }, -} - -var checkResolvConfServersTests = []struct { - fixture string - expected []string - defaults []string -}{ - {"testdata/resolv.conf.1", []string{"10.200.3.249:53", "10.200.3.250:5353", "[2001:4860:4860::8844]:53", "[10.0.0.1]:5353"}, []string{"127.0.0.1:53"}}, - {"testdata/resolv.conf.nonexistant", []string{"127.0.0.1:53"}, []string{"127.0.0.1:53"}}, -} - -func TestDNSValidServerResponse(t *testing.T) { - PreCheckDNS = func(fqdn, value string) (bool, error) { - return true, nil - } - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Replay-Nonce", "12345") - w.Write([]byte("{\"type\":\"dns01\",\"status\":\"valid\",\"uri\":\"http://some.url\",\"token\":\"http8\"}")) - })) - - manualProvider, _ := NewDNSProviderManual() - jws := &jws{privKey: privKey, directoryURL: ts.URL} - solver := &dnsChallenge{jws: jws, validate: validate, provider: manualProvider} - clientChallenge := challenge{Type: "dns01", Status: "pending", URI: ts.URL, Token: "http8"} - - go func() { - time.Sleep(time.Second * 2) - f := bufio.NewWriter(os.Stdout) - defer f.Flush() - f.WriteString("\n") - }() - - if err := solver.Solve(clientChallenge, "example.com"); err != nil { - t.Errorf("VALID: Expected Solve to return no error but the error was -> %v", err) - } -} - -func TestPreCheckDNS(t *testing.T) { - ok, err := PreCheckDNS("acme-staging.api.letsencrypt.org", "fe01=") - if err != nil || !ok { - t.Errorf("preCheckDNS failed for acme-staging.api.letsencrypt.org") - } -} - -func TestLookupNameserversOK(t *testing.T) { - for _, tt := range lookupNameserversTestsOK { - nss, err := lookupNameservers(tt.fqdn) - if err != nil { - t.Fatalf("#%s: got %q; want nil", tt.fqdn, err) - } - - sort.Strings(nss) - sort.Strings(tt.nss) - - if !reflect.DeepEqual(nss, tt.nss) { - t.Errorf("#%s: got %v; want %v", tt.fqdn, nss, tt.nss) - } - } -} - -func TestLookupNameserversErr(t *testing.T) { - for _, tt := range lookupNameserversTestsErr { - _, err := lookupNameservers(tt.fqdn) - if err == nil { - t.Fatalf("#%s: expected %q (error); got ", tt.fqdn, tt.error) - } - - if !strings.Contains(err.Error(), tt.error) { - t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err) - continue - } - } -} - -func TestFindZoneByFqdn(t *testing.T) { - for _, tt := range findZoneByFqdnTests { - res, err := FindZoneByFqdn(tt.fqdn, RecursiveNameservers) - if err != nil { - t.Errorf("FindZoneByFqdn failed for %s: %v", tt.fqdn, err) - } - if res != tt.zone { - t.Errorf("%s: got %s; want %s", tt.fqdn, res, tt.zone) - } - } -} - -func TestCheckAuthoritativeNss(t *testing.T) { - for _, tt := range checkAuthoritativeNssTests { - ok, _ := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns) - if ok != tt.ok { - t.Errorf("%s: got %t; want %t", tt.fqdn, ok, tt.ok) - } - } -} - -func TestCheckAuthoritativeNssErr(t *testing.T) { - for _, tt := range checkAuthoritativeNssTestsErr { - _, err := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns) - if err == nil { - t.Fatalf("#%s: expected %q (error); got ", tt.fqdn, tt.error) - } - if !strings.Contains(err.Error(), tt.error) { - t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err) - continue - } - } -} - -func TestResolveConfServers(t *testing.T) { - for _, tt := range checkResolvConfServersTests { - result := getNameservers(tt.fixture, tt.defaults) - - sort.Strings(result) - sort.Strings(tt.expected) - if !reflect.DeepEqual(result, tt.expected) { - t.Errorf("#%s: expected %q; got %q", tt.fixture, tt.expected, result) - } - } -} diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go deleted file mode 100644 index e4bc934c2..000000000 --- a/vendor/github.com/xenolf/lego/acme/error.go +++ /dev/null @@ -1,94 +0,0 @@ -package acme - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -const ( - tosAgreementError = "Must agree to subscriber agreement before any further actions" - invalidNonceError = "JWS has invalid anti-replay nonce" -) - -// RemoteError is the base type for all errors specific to the ACME protocol. -type RemoteError struct { - StatusCode int `json:"status,omitempty"` - Type string `json:"type"` - Detail string `json:"detail"` -} - -func (e RemoteError) Error() string { - return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) -} - -// TOSError represents the error which is returned if the user needs to -// accept the TOS. -// TODO: include the new TOS url if we can somehow obtain it. -type TOSError struct { - RemoteError -} - -// NonceError represents the error which is returned if the -// nonce sent by the client was not accepted by the server. -type NonceError struct { - RemoteError -} - -type domainError struct { - Domain string - Error error -} - -type challengeError struct { - RemoteError - records []validationRecord -} - -func (c challengeError) Error() string { - - var errStr string - for _, validation := range c.records { - errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", - validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) - } - - return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) -} - -func handleHTTPError(resp *http.Response) error { - var errorDetail RemoteError - - contentType := resp.Header.Get("Content-Type") - if contentType == "application/json" || contentType == "application/problem+json" { - err := json.NewDecoder(resp.Body).Decode(&errorDetail) - if err != nil { - return err - } - } else { - detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize)) - if err != nil { - return err - } - errorDetail.Detail = string(detailBytes) - } - - errorDetail.StatusCode = resp.StatusCode - - // Check for errors we handle specifically - if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { - return TOSError{errorDetail} - } - - if errorDetail.StatusCode == http.StatusBadRequest && strings.HasPrefix(errorDetail.Detail, invalidNonceError) { - return NonceError{errorDetail} - } - - return errorDetail -} - -func handleChallengeError(chlng challenge) error { - return challengeError{chlng.Error, chlng.ValidationRecords} -} diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go deleted file mode 100644 index e469e0de2..000000000 --- a/vendor/github.com/xenolf/lego/acme/http.go +++ /dev/null @@ -1,160 +0,0 @@ -package acme - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "runtime" - "strings" - "time" -) - -// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. -var UserAgent string - -// HTTPClient is an HTTP client with a reasonable timeout value. -var HTTPClient = http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 15 * time.Second, - ResponseHeaderTimeout: 15 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, -} - -const ( - // defaultGoUserAgent is the Go HTTP package user agent string. Too - // bad it isn't exported. If it changes, we should update it here, too. - defaultGoUserAgent = "Go-http-client/1.1" - - // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme" -) - -// httpHead performs a HEAD request with a proper User-Agent string. -// The response body (resp.Body) is already closed when this function returns. -func httpHead(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, fmt.Errorf("failed to head %q: %v", url, err) - } - - req.Header.Set("User-Agent", userAgent()) - - resp, err = HTTPClient.Do(req) - if err != nil { - return resp, fmt.Errorf("failed to do head %q: %v", url, err) - } - resp.Body.Close() - return resp, err -} - -// httpPost performs a POST request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, fmt.Errorf("failed to post %q: %v", url, err) - } - req.Header.Set("Content-Type", bodyType) - req.Header.Set("User-Agent", userAgent()) - - return HTTPClient.Do(req) -} - -// httpGet performs a GET request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpGet(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, fmt.Errorf("failed to get %q: %v", url, err) - } - req.Header.Set("User-Agent", userAgent()) - - return HTTPClient.Do(req) -} - -// getJSON performs an HTTP GET request and parses the response body -// as JSON, into the provided respBody object. -func getJSON(uri string, respBody interface{}) (http.Header, error) { - resp, err := httpGet(uri) - if err != nil { - return nil, fmt.Errorf("failed to get json %q: %v", uri, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// postJSON performs an HTTP POST request and parses the response body -// as JSON, into the provided respBody object. -func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { - jsonBytes, err := json.Marshal(reqBody) - if err != nil { - return nil, errors.New("Failed to marshal network message...") - } - - resp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - - err := handleHTTPError(resp) - - switch err.(type) { - - case NonceError: - - // Retry once if the nonce was invalidated - - retryResp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - - defer retryResp.Body.Close() - - if retryResp.StatusCode >= http.StatusBadRequest { - return retryResp.Header, handleHTTPError(retryResp) - } - - if respBody == nil { - return retryResp.Header, nil - } - - return retryResp.Header, json.NewDecoder(retryResp.Body).Decode(respBody) - - default: - return resp.Header, err - - } - - } - - if respBody == nil { - return resp.Header, nil - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// userAgent builds and returns the User-Agent string to use in requests. -func userAgent() string { - ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) - return strings.TrimSpace(ua) -} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/xenolf/lego/acme/http_challenge.go deleted file mode 100644 index 95cb1fd81..000000000 --- a/vendor/github.com/xenolf/lego/acme/http_challenge.go +++ /dev/null @@ -1,41 +0,0 @@ -package acme - -import ( - "fmt" - "log" -) - -type httpChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -// HTTP01ChallengePath returns the URL path for the `http-01` challenge -func HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -func (s *httpChallenge) Solve(chlng challenge, domain string) error { - - logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go deleted file mode 100644 index 64c6a8280..000000000 --- a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go +++ /dev/null @@ -1,79 +0,0 @@ -package acme - -import ( - "fmt" - "net" - "net/http" - "strings" -) - -// HTTPProviderServer implements ChallengeProvider for `http-01` challenge -// It may be instantiated without using the NewHTTPProviderServer function if -// you want only to use the default values. -type HTTPProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 80 respectively. -func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { - return &HTTPProviderServer{iface: iface, port: port} -} - -// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. -func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "80" - } - - var err error - s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) - if err != nil { - return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) - } - - s.done = make(chan bool) - go s.serve(domain, token, keyAuth) - return nil -} - -// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` -func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} - -func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { - path := HTTP01ChallengePath(token) - - // The handler validates the HOST header and request type. - // For validation it then writes the token the server returned with the challenge - mux := http.NewServeMux() - mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(keyAuth)) - logf("[INFO][%s] Served key authentication", domain) - } else { - logf("[WARN] Received request for domain %s with method %s but the domain did not match any challenge. Please ensure your are passing the HOST header properly.", r.Host, r.Method) - w.Write([]byte("TEST")) - } - }) - - httpServer := &http.Server{ - Handler: mux, - } - // Once httpServer is shut down we don't want any lingering - // connections, so disable KeepAlives. - httpServer.SetKeepAlivesEnabled(false) - httpServer.Serve(s.listener) - s.done <- true -} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/xenolf/lego/acme/http_challenge_test.go deleted file mode 100644 index 7400f56d4..000000000 --- a/vendor/github.com/xenolf/lego/acme/http_challenge_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "io/ioutil" - "strings" - "testing" -) - -func TestHTTPChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token - resp, err := httpGet(uri) - if err != nil { - return err - } - defer resp.Body.Close() - - if want := "text/plain"; resp.Header.Get("Content-Type") != want { - t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - bodyStr := string(body) - - if bodyStr != chlng.KeyAuthorization { - t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization) - } - - return nil - } - solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestHTTPChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http2"} - solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want, want18 := "invalid port 123456", "123456: invalid port"; !strings.HasSuffix(err.Error(), want) && !strings.HasSuffix(err.Error(), want18) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/xenolf/lego/acme/http_test.go deleted file mode 100644 index 33a48a331..000000000 --- a/vendor/github.com/xenolf/lego/acme/http_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package acme - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestHTTPHeadUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - _, err := httpHead(ts.URL) - if err != nil { - t.Fatal(err) - } - - if method != "HEAD" { - t.Errorf("Expected method to be HEAD, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPGetUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpGet(ts.URL) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "GET" { - t.Errorf("Expected method to be GET, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPPostUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala")) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "POST" { - t.Errorf("Expected method to be POST, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestUserAgent(t *testing.T) { - ua := userAgent() - - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if strings.HasSuffix(ua, " ") { - t.Errorf("UA should not have trailing spaces; got '%s'", ua) - } - - // customize the UA by appending a value - UserAgent = "MyApp/1.2.3" - ua = userAgent() - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if !strings.Contains(ua, UserAgent) { - t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua) - } -} diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go deleted file mode 100644 index a39434342..000000000 --- a/vendor/github.com/xenolf/lego/acme/jws.go +++ /dev/null @@ -1,131 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "fmt" - "net/http" - "sync" - - "gopkg.in/square/go-jose.v1" -) - -type jws struct { - directoryURL string - privKey crypto.PrivateKey - nonces nonceManager -} - -func keyAsJWK(key interface{}) *jose.JsonWebKey { - switch k := key.(type) { - case *ecdsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "EC"} - case *rsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} - - default: - return nil - } -} - -// Posts a JWS signed message to the specified URL. -// It does NOT close the response body, so the caller must -// do that if no error was returned. -func (j *jws) post(url string, content []byte) (*http.Response, error) { - signedContent, err := j.signContent(content) - if err != nil { - return nil, fmt.Errorf("Failed to sign content -> %s", err.Error()) - } - - resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) - if err != nil { - return nil, fmt.Errorf("Failed to HTTP POST to %s -> %s", url, err.Error()) - } - - nonce, nonceErr := getNonceFromResponse(resp) - if nonceErr == nil { - j.nonces.Push(nonce) - } - - return resp, nil -} - -func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { - - var alg jose.SignatureAlgorithm - switch k := j.privKey.(type) { - case *rsa.PrivateKey: - alg = jose.RS256 - case *ecdsa.PrivateKey: - if k.Curve == elliptic.P256() { - alg = jose.ES256 - } else if k.Curve == elliptic.P384() { - alg = jose.ES384 - } - } - - signer, err := jose.NewSigner(alg, j.privKey) - if err != nil { - return nil, fmt.Errorf("Failed to create jose signer -> %s", err.Error()) - } - signer.SetNonceSource(j) - - signed, err := signer.Sign(content) - if err != nil { - return nil, fmt.Errorf("Failed to sign content -> %s", err.Error()) - } - return signed, nil -} - -func (j *jws) Nonce() (string, error) { - if nonce, ok := j.nonces.Pop(); ok { - return nonce, nil - } - - return getNonce(j.directoryURL) -} - -type nonceManager struct { - nonces []string - sync.Mutex -} - -func (n *nonceManager) Pop() (string, bool) { - n.Lock() - defer n.Unlock() - - if len(n.nonces) == 0 { - return "", false - } - - nonce := n.nonces[len(n.nonces)-1] - n.nonces = n.nonces[:len(n.nonces)-1] - return nonce, true -} - -func (n *nonceManager) Push(nonce string) { - n.Lock() - defer n.Unlock() - n.nonces = append(n.nonces, nonce) -} - -func getNonce(url string) (string, error) { - resp, err := httpHead(url) - if err != nil { - return "", fmt.Errorf("Failed to get nonce from HTTP HEAD -> %s", err.Error()) - } - - return getNonceFromResponse(resp) -} - -func getNonceFromResponse(resp *http.Response) (string, error) { - nonce := resp.Header.Get("Replay-Nonce") - if nonce == "" { - return "", fmt.Errorf("Server did not respond with a proper nonce header.") - } - - return nonce, nil -} diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go deleted file mode 100644 index 79ccf154e..000000000 --- a/vendor/github.com/xenolf/lego/acme/messages.go +++ /dev/null @@ -1,115 +0,0 @@ -package acme - -import ( - "time" - - "gopkg.in/square/go-jose.v1" -) - -type directory struct { - NewAuthzURL string `json:"new-authz"` - NewCertURL string `json:"new-cert"` - NewRegURL string `json:"new-reg"` - RevokeCertURL string `json:"revoke-cert"` -} - -type registrationMessage struct { - Resource string `json:"resource"` - Contact []string `json:"contact"` - Delete bool `json:"delete,omitempty"` -} - -// Registration is returned by the ACME server after the registration -// The client implementation should save this registration somewhere. -type Registration struct { - Resource string `json:"resource,omitempty"` - ID int `json:"id"` - Key jose.JsonWebKey `json:"key"` - Contact []string `json:"contact"` - Agreement string `json:"agreement,omitempty"` - Authorizations string `json:"authorizations,omitempty"` - Certificates string `json:"certificates,omitempty"` -} - -// RegistrationResource represents all important informations about a registration -// of which the client needs to keep track itself. -type RegistrationResource struct { - Body Registration `json:"body,omitempty"` - URI string `json:"uri,omitempty"` - NewAuthzURL string `json:"new_authzr_uri,omitempty"` - TosURL string `json:"terms_of_service,omitempty"` -} - -type authorizationResource struct { - Body authorization - Domain string - NewCertURL string - AuthURL string -} - -type authorization struct { - Resource string `json:"resource,omitempty"` - Identifier identifier `json:"identifier"` - Status string `json:"status,omitempty"` - Expires time.Time `json:"expires,omitempty"` - Challenges []challenge `json:"challenges,omitempty"` - Combinations [][]int `json:"combinations,omitempty"` -} - -type identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -type validationRecord struct { - URI string `json:"url,omitempty"` - Hostname string `json:"hostname,omitempty"` - Port string `json:"port,omitempty"` - ResolvedAddresses []string `json:"addressesResolved,omitempty"` - UsedAddress string `json:"addressUsed,omitempty"` -} - -type challenge struct { - Resource string `json:"resource,omitempty"` - Type Challenge `json:"type,omitempty"` - Status string `json:"status,omitempty"` - URI string `json:"uri,omitempty"` - Token string `json:"token,omitempty"` - KeyAuthorization string `json:"keyAuthorization,omitempty"` - TLS bool `json:"tls,omitempty"` - Iterations int `json:"n,omitempty"` - Error RemoteError `json:"error,omitempty"` - ValidationRecords []validationRecord `json:"validationRecord,omitempty"` -} - -type csrMessage struct { - Resource string `json:"resource,omitempty"` - Csr string `json:"csr"` - Authorizations []string `json:"authorizations"` -} - -type revokeCertMessage struct { - Resource string `json:"resource"` - Certificate string `json:"certificate"` -} - -type deactivateAuthMessage struct { - Resource string `json:"resource,omitempty"` - Status string `jsom:"status"` -} - -// CertificateResource represents a CA issued certificate. -// PrivateKey, Certificate and IssuerCertificate are all -// already PEM encoded and can be directly written to disk. -// Certificate may be a certificate bundle, depending on the -// options supplied to create it. -type CertificateResource struct { - Domain string `json:"domain"` - CertURL string `json:"certUrl"` - CertStableURL string `json:"certStableUrl"` - AccountRef string `json:"accountRef,omitempty"` - PrivateKey []byte `json:"-"` - Certificate []byte `json:"-"` - IssuerCertificate []byte `json:"-"` - CSR []byte `json:"-"` -} diff --git a/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/xenolf/lego/acme/pop_challenge.go deleted file mode 100644 index 8d2a213b0..000000000 --- a/vendor/github.com/xenolf/lego/acme/pop_challenge.go +++ /dev/null @@ -1 +0,0 @@ -package acme diff --git a/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/xenolf/lego/acme/provider.go deleted file mode 100644 index d177ff07a..000000000 --- a/vendor/github.com/xenolf/lego/acme/provider.go +++ /dev/null @@ -1,28 +0,0 @@ -package acme - -import "time" - -// ChallengeProvider enables implementing a custom challenge -// provider. Present presents the solution to a challenge available to -// be solved. CleanUp will be called by the challenge if Present ends -// in a non-error state. -type ChallengeProvider interface { - Present(domain, token, keyAuth string) error - CleanUp(domain, token, keyAuth string) error -} - -// ChallengeProviderTimeout allows for implementing a -// ChallengeProvider where an unusually long timeout is required when -// waiting for an ACME challenge to be satisfied, such as when -// checking for DNS record progagation. If an implementor of a -// ChallengeProvider provides a Timeout method, then the return values -// of the Timeout method will be used when appropriate by the acme -// package. The interval value is the time between checks. -// -// The default values used for timeout and interval are 60 seconds and -// 2 seconds respectively. These are used when no Timeout method is -// defined for the ChallengeProvider. -type ChallengeProviderTimeout interface { - ChallengeProvider - Timeout() (timeout, interval time.Duration) -} diff --git a/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 b/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 deleted file mode 100644 index 3098f99b5..000000000 --- a/vendor/github.com/xenolf/lego/acme/testdata/resolv.conf.1 +++ /dev/null @@ -1,5 +0,0 @@ -domain company.com -nameserver 10.200.3.249 -nameserver 10.200.3.250:5353 -nameserver 2001:4860:4860::8844 -nameserver [10.0.0.1]:5353 diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go deleted file mode 100644 index 34383cbfa..000000000 --- a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go +++ /dev/null @@ -1,67 +0,0 @@ -package acme - -import ( - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "log" -) - -type tlsSNIChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { - // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 - // Currently we implement this challenge to track boulder, not the current spec! - - logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) - if err != nil { - return err - } - - err = t.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := t.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge -func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) { - // generate a new RSA key for the certificates - tempPrivKey, err := generatePrivateKey(RSA2048) - if err != nil { - return tls.Certificate{}, "", err - } - rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) - rsaPrivPEM := pemEncode(rsaPrivKey) - - zBytes := sha256.Sum256([]byte(keyAuth)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - tempCertPEM, err := generatePemCert(rsaPrivKey, domain) - if err != nil { - return tls.Certificate{}, "", err - } - - certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) - if err != nil { - return tls.Certificate{}, "", err - } - - return certificate, domain, nil -} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go deleted file mode 100644 index df00fbb5a..000000000 --- a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go +++ /dev/null @@ -1,62 +0,0 @@ -package acme - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" -) - -// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge -// It may be instantiated without using the NewTLSProviderServer function if -// you want only to use the default values. -type TLSProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 443 respectively. -func NewTLSProviderServer(iface, port string) *TLSProviderServer { - return &TLSProviderServer{iface: iface, port: port} -} - -// Present makes the keyAuth available as a cert -func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "443" - } - - cert, _, err := TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - tlsConf := new(tls.Config) - tlsConf.Certificates = []tls.Certificate{cert} - - s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) - if err != nil { - return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) - } - - s.done = make(chan bool) - go func() { - http.Serve(s.listener, nil) - s.done <- true - }() - return nil -} - -// CleanUp closes the HTTP server. -func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go deleted file mode 100644 index 83b2833a9..000000000 --- a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "strings" - "testing" -) - -func TestTLSSNIChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{ - InsecureSkipVerify: true, - }) - if err != nil { - t.Errorf("Expected to connect to challenge server without an error. %s", err.Error()) - } - - // Expect the server to only return one certificate - connState := conn.ConnectionState() - if count := len(connState.PeerCertificates); count != 1 { - t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count) - } - - remoteCert := connState.PeerCertificates[0] - if count := len(remoteCert.DNSNames); count != 1 { - t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count) - } - - zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - - if remoteCert.DNSNames[0] != domain { - t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0]) - } - - return nil - } - solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestTLSSNIChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"} - solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want, want18 := "invalid port 123456", "123456: invalid port"; !strings.HasSuffix(err.Error(), want) && !strings.HasSuffix(err.Error(), want18) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/xenolf/lego/acme/utils.go deleted file mode 100644 index 2fa0db304..000000000 --- a/vendor/github.com/xenolf/lego/acme/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package acme - -import ( - "fmt" - "time" -) - -// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. -func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { - var lastErr string - timeup := time.After(timeout) - for { - select { - case <-timeup: - return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(interval) - } -} diff --git a/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/xenolf/lego/acme/utils_test.go deleted file mode 100644 index 158af4116..000000000 --- a/vendor/github.com/xenolf/lego/acme/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package acme - -import ( - "testing" - "time" -) - -func TestWaitForTimeout(t *testing.T) { - c := make(chan error) - go func() { - err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) { - return false, nil - }) - c <- err - }() - - timeout := time.After(4 * time.Second) - select { - case <-timeout: - t.Fatal("timeout exceeded") - case err := <-c: - if err == nil { - t.Errorf("expected timeout error; got %v", err) - } - } -} diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go deleted file mode 100644 index 58567be9f..000000000 --- a/vendor/github.com/xenolf/lego/cli.go +++ /dev/null @@ -1,232 +0,0 @@ -// Let's Encrypt client to go! -// CLI application for generating Let's Encrypt certificates using the ACME package. -package main - -import ( - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" -) - -// Logger is used to log errors; if nil, the default log.Logger is used. -var Logger *log.Logger - -// logger is an helper function to retrieve the available logger -func logger() *log.Logger { - if Logger == nil { - Logger = log.New(os.Stderr, "", log.LstdFlags) - } - return Logger -} - -var gittag string - -func main() { - app := cli.NewApp() - app.Name = "lego" - app.Usage = "Let's Encrypt client written in Go" - - version := "0.4.1" - if strings.HasPrefix(gittag, "v") { - version = gittag - } - - app.Version = version - - acme.UserAgent = "lego/" + app.Version - - defaultPath := "" - cwd, err := os.Getwd() - if err == nil { - defaultPath = path.Join(cwd, ".lego") - } - - app.Before = func(c *cli.Context) error { - if c.GlobalString("path") == "" { - logger().Fatal("Could not determine current working directory. Please pass --path.") - } - return nil - } - - app.Commands = []cli.Command{ - { - Name: "run", - Usage: "Register an account, then create and install a certificate", - Action: run, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "no-bundle", - Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.", - }, - cli.BoolFlag{ - Name: "must-staple", - Usage: "Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.", - }, - }, - }, - { - Name: "revoke", - Usage: "Revoke a certificate", - Action: revoke, - }, - { - Name: "renew", - Usage: "Renew a certificate", - Action: renew, - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "days", - Value: 0, - Usage: "The number of days left on a certificate to renew it.", - }, - cli.BoolFlag{ - Name: "reuse-key", - Usage: "Used to indicate you want to reuse your current private key for the new certificate.", - }, - cli.BoolFlag{ - Name: "no-bundle", - Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.", - }, - cli.BoolFlag{ - Name: "must-staple", - Usage: "Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.", - }, - }, - }, - { - Name: "dnshelp", - Usage: "Shows additional help for the --dns global option", - Action: dnshelp, - }, - } - - app.Flags = []cli.Flag{ - cli.StringSliceFlag{ - Name: "domains, d", - Usage: "Add a domain to the process. Can be specified multiple times.", - }, - cli.StringFlag{ - Name: "csr, c", - Usage: "Certificate signing request filename, if an external CSR is to be used", - }, - cli.StringFlag{ - Name: "server, s", - Value: "https://acme-v01.api.letsencrypt.org/directory", - Usage: "CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.", - }, - cli.StringFlag{ - Name: "email, m", - Usage: "Email used for registration and recovery contact.", - }, - cli.BoolFlag{ - Name: "accept-tos, a", - Usage: "By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.", - }, - cli.StringFlag{ - Name: "key-type, k", - Value: "rsa2048", - Usage: "Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384", - }, - cli.StringFlag{ - Name: "path", - Usage: "Directory to use for storing the data", - Value: defaultPath, - }, - cli.StringSliceFlag{ - Name: "exclude, x", - Usage: "Explicitly disallow solvers by name from being used. Solvers: \"http-01\", \"tls-sni-01\".", - }, - cli.StringFlag{ - Name: "webroot", - Usage: "Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge", - }, - cli.StringSliceFlag{ - Name: "memcached-host", - Usage: "Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.", - }, - cli.StringFlag{ - Name: "http", - Usage: "Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port", - }, - cli.StringFlag{ - Name: "tls", - Usage: "Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port", - }, - cli.StringFlag{ - Name: "dns", - Usage: "Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.", - }, - cli.IntFlag{ - Name: "http-timeout", - Usage: "Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.", - }, - cli.IntFlag{ - Name: "dns-timeout", - Usage: "Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.", - }, - cli.StringSliceFlag{ - Name: "dns-resolvers", - Usage: "Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use the system resolvers, or Google's DNS resolvers if the system's cannot be determined.", - }, - cli.BoolFlag{ - Name: "pem", - Usage: "Generate a .pem file by concatanating the .key and .crt files together.", - }, - } - - err = app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} - -func dnshelp(c *cli.Context) error { - fmt.Printf( - `Credentials for DNS providers must be passed through environment variables. - -Here is an example bash command using the CloudFlare DNS provider: - - $ CLOUDFLARE_EMAIL=foo@bar.com \ - CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \ - lego --dns cloudflare --domains www.example.com --email me@bar.com run - -`) - - w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) - fmt.Fprintln(w, "Valid providers and their associated credential environment variables:") - fmt.Fprintln(w) - fmt.Fprintln(w, "\tazure:\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP") - fmt.Fprintln(w, "\tauroradns:\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT") - fmt.Fprintln(w, "\tcloudflare:\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY") - fmt.Fprintln(w, "\tdigitalocean:\tDO_AUTH_TOKEN") - fmt.Fprintln(w, "\tdnsimple:\tDNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN") - fmt.Fprintln(w, "\tdnsmadeeasy:\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET") - fmt.Fprintln(w, "\texoscale:\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT") - fmt.Fprintln(w, "\tgandi:\tGANDI_API_KEY") - fmt.Fprintln(w, "\tgcloud:\tGCE_PROJECT, GCE_SERVICE_ACCOUNT_FILE") - fmt.Fprintln(w, "\tlinode:\tLINODE_API_KEY") - fmt.Fprintln(w, "\tmanual:\tnone") - fmt.Fprintln(w, "\tnamecheap:\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY") - fmt.Fprintln(w, "\trackspace:\tRACKSPACE_USER, RACKSPACE_API_KEY") - fmt.Fprintln(w, "\trfc2136:\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\n\t\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER") - fmt.Fprintln(w, "\troute53:\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_HOSTED_ZONE_ID") - fmt.Fprintln(w, "\tdyn:\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD") - fmt.Fprintln(w, "\tvultr:\tVULTR_API_KEY") - fmt.Fprintln(w, "\tovh:\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY") - fmt.Fprintln(w, "\tpdns:\tPDNS_API_KEY, PDNS_API_URL") - fmt.Fprintln(w, "\tdnspod:\tDNSPOD_API_KEY") - fmt.Fprintln(w, "\totc:\tOTC_USER_NAME, OTC_PASSWORD, OTC_PROJECT_NAME, OTC_DOMAIN_NAME, OTC_IDENTITY_ENDPOINT") - w.Flush() - - fmt.Println(` -For a more detailed explanation of a DNS provider's credential variables, -please consult their online documentation.`) - - return nil -} diff --git a/vendor/github.com/xenolf/lego/cli_handlers.go b/vendor/github.com/xenolf/lego/cli_handlers.go deleted file mode 100644 index b8790c4b2..000000000 --- a/vendor/github.com/xenolf/lego/cli_handlers.go +++ /dev/null @@ -1,423 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "crypto/x509" - "encoding/json" - "encoding/pem" - "io/ioutil" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" - "github.com/xenolf/lego/providers/dns" - "github.com/xenolf/lego/providers/http/memcached" - "github.com/xenolf/lego/providers/http/webroot" -) - -func checkFolder(path string) error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return os.MkdirAll(path, 0700) - } - return nil -} - -func setup(c *cli.Context) (*Configuration, *Account, *acme.Client) { - - if c.GlobalIsSet("http-timeout") { - acme.HTTPClient = http.Client{Timeout: time.Duration(c.GlobalInt("http-timeout")) * time.Second} - } - - if c.GlobalIsSet("dns-timeout") { - acme.DNSTimeout = time.Duration(c.GlobalInt("dns-timeout")) * time.Second - } - - if len(c.GlobalStringSlice("dns-resolvers")) > 0 { - resolvers := []string{} - for _, resolver := range c.GlobalStringSlice("dns-resolvers") { - if !strings.Contains(resolver, ":") { - resolver += ":53" - } - resolvers = append(resolvers, resolver) - } - acme.RecursiveNameservers = resolvers - } - - err := checkFolder(c.GlobalString("path")) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - conf := NewConfiguration(c) - if len(c.GlobalString("email")) == 0 { - logger().Fatal("You have to pass an account (email address) to the program using --email or -m") - } - - //TODO: move to account struct? Currently MUST pass email. - acc := NewAccount(c.GlobalString("email"), conf) - - keyType, err := conf.KeyType() - if err != nil { - logger().Fatal(err.Error()) - } - - client, err := acme.NewClient(c.GlobalString("server"), acc, keyType) - if err != nil { - logger().Fatalf("Could not create client: %s", err.Error()) - } - - if len(c.GlobalStringSlice("exclude")) > 0 { - client.ExcludeChallenges(conf.ExcludedSolvers()) - } - - if c.GlobalIsSet("webroot") { - provider, err := webroot.NewHTTPProvider(c.GlobalString("webroot")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.HTTP01, provider) - - // --webroot=foo indicates that the user specifically want to do a HTTP challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01}) - } - if c.GlobalIsSet("memcached-host") { - provider, err := memcached.NewMemcachedProvider(c.GlobalStringSlice("memcached-host")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.HTTP01, provider) - - // --memcached-host=foo:11211 indicates that the user specifically want to do a HTTP challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01}) - } - if c.GlobalIsSet("http") { - if strings.Index(c.GlobalString("http"), ":") == -1 { - logger().Fatalf("The --http switch only accepts interface:port or :port for its argument.") - } - client.SetHTTPAddress(c.GlobalString("http")) - } - - if c.GlobalIsSet("tls") { - if strings.Index(c.GlobalString("tls"), ":") == -1 { - logger().Fatalf("The --tls switch only accepts interface:port or :port for its argument.") - } - client.SetTLSAddress(c.GlobalString("tls")) - } - - if c.GlobalIsSet("dns") { - provider, err := dns.NewDNSChallengeProviderByName(c.GlobalString("dns")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.DNS01, provider) - - // --dns=foo indicates that the user specifically want to do a DNS challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01}) - } - - return conf, acc, client -} - -func saveCertRes(certRes acme.CertificateResource, conf *Configuration) { - // We store the certificate, private key and metadata in different files - // as web servers would not be able to work with a combined file. - certOut := path.Join(conf.CertPath(), certRes.Domain+".crt") - privOut := path.Join(conf.CertPath(), certRes.Domain+".key") - pemOut := path.Join(conf.CertPath(), certRes.Domain+".pem") - metaOut := path.Join(conf.CertPath(), certRes.Domain+".json") - issuerOut := path.Join(conf.CertPath(), certRes.Domain+".issuer.crt") - - err := ioutil.WriteFile(certOut, certRes.Certificate, 0600) - if err != nil { - logger().Fatalf("Unable to save Certificate for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - if certRes.IssuerCertificate != nil { - err = ioutil.WriteFile(issuerOut, certRes.IssuerCertificate, 0600) - if err != nil { - logger().Fatalf("Unable to save IssuerCertificate for domain %s\n\t%s", certRes.Domain, err.Error()) - } - } - - if certRes.PrivateKey != nil { - // if we were given a CSR, we don't know the private key - err = ioutil.WriteFile(privOut, certRes.PrivateKey, 0600) - if err != nil { - logger().Fatalf("Unable to save PrivateKey for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - if conf.context.GlobalBool("pem") { - err = ioutil.WriteFile(pemOut, bytes.Join([][]byte{certRes.Certificate, certRes.PrivateKey}, nil), 0600) - if err != nil { - logger().Fatalf("Unable to save Certificate and PrivateKey in .pem for domain %s\n\t%s", certRes.Domain, err.Error()) - } - } - - } else if conf.context.GlobalBool("pem") { - // we don't have the private key; can't write the .pem file - logger().Fatalf("Unable to save pem without private key for domain %s\n\t%s; are you using a CSR?", certRes.Domain, err.Error()) - } - - jsonBytes, err := json.MarshalIndent(certRes, "", "\t") - if err != nil { - logger().Fatalf("Unable to marshal CertResource for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - err = ioutil.WriteFile(metaOut, jsonBytes, 0600) - if err != nil { - logger().Fatalf("Unable to save CertResource for domain %s\n\t%s", certRes.Domain, err.Error()) - } -} - -func handleTOS(c *cli.Context, client *acme.Client, acc *Account) { - // Check for a global accept override - if c.GlobalBool("accept-tos") { - err := client.AgreeToTOS() - if err != nil { - logger().Fatalf("Could not agree to TOS: %s", err.Error()) - } - - acc.Save() - return - } - - reader := bufio.NewReader(os.Stdin) - logger().Printf("Please review the TOS at %s", acc.Registration.TosURL) - - for { - logger().Println("Do you accept the TOS? Y/n") - text, err := reader.ReadString('\n') - if err != nil { - logger().Fatalf("Could not read from console: %s", err.Error()) - } - - text = strings.Trim(text, "\r\n") - - if text == "n" { - logger().Fatal("You did not accept the TOS. Unable to proceed.") - } - - if text == "Y" || text == "y" || text == "" { - err = client.AgreeToTOS() - if err != nil { - logger().Fatalf("Could not agree to TOS: %s", err.Error()) - } - acc.Save() - break - } - - logger().Println("Your input was invalid. Please answer with one of Y/y, n or by pressing enter.") - } -} - -func readCSRFile(filename string) (*x509.CertificateRequest, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - raw := bytes - - // see if we can find a PEM-encoded CSR - var p *pem.Block - rest := bytes - for { - // decode a PEM block - p, rest = pem.Decode(rest) - - // did we fail? - if p == nil { - break - } - - // did we get a CSR? - if p.Type == "CERTIFICATE REQUEST" { - raw = p.Bytes - } - } - - // no PEM-encoded CSR - // assume we were given a DER-encoded ASN.1 CSR - // (if this assumption is wrong, parsing these bytes will fail) - return x509.ParseCertificateRequest(raw) -} - -func run(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - reg, err := client.Register() - if err != nil { - logger().Fatalf("Could not complete registration\n\t%s", err.Error()) - } - - acc.Registration = reg - acc.Save() - - logger().Print("!!!! HEADS UP !!!!") - logger().Printf(` - Your account credentials have been saved in your Let's Encrypt - configuration directory at "%s". - You should make a secure backup of this folder now. This - configuration directory will also contain certificates and - private keys obtained from Let's Encrypt so making regular - backups of this folder is ideal.`, conf.AccountPath(c.GlobalString("email"))) - - } - - // If the agreement URL is empty, the account still needs to accept the LE TOS. - if acc.Registration.Body.Agreement == "" { - handleTOS(c, client, acc) - } - - // we require either domains or csr, but not both - hasDomains := len(c.GlobalStringSlice("domains")) > 0 - hasCsr := len(c.GlobalString("csr")) > 0 - if hasDomains && hasCsr { - logger().Fatal("Please specify either --domains/-d or --csr/-c, but not both") - } - if !hasDomains && !hasCsr { - logger().Fatal("Please specify --domains/-d (or --csr/-c if you already have a CSR)") - } - - var cert acme.CertificateResource - var failures map[string]error - - if hasDomains { - // obtain a certificate, generating a new private key - cert, failures = client.ObtainCertificate(c.GlobalStringSlice("domains"), !c.Bool("no-bundle"), nil, c.Bool("must-staple")) - } else { - // read the CSR - csr, err := readCSRFile(c.GlobalString("csr")) - if err != nil { - // we couldn't read the CSR - failures = map[string]error{"csr": err} - } else { - // obtain a certificate for this CSR - cert, failures = client.ObtainCertificateForCSR(*csr, !c.Bool("no-bundle")) - } - } - - if len(failures) > 0 { - for k, v := range failures { - logger().Printf("[%s] Could not obtain certificates\n\t%s", k, v.Error()) - } - - // Make sure to return a non-zero exit code if ObtainSANCertificate - // returned at least one error. Due to us not returning partial - // certificate we can just exit here instead of at the end. - os.Exit(1) - } - - err := checkFolder(conf.CertPath()) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - saveCertRes(cert, conf) - - return nil -} - -func revoke(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - logger().Fatalf("Account %s is not registered. Use 'run' to register a new account.\n", acc.Email) - } - - err := checkFolder(conf.CertPath()) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - for _, domain := range c.GlobalStringSlice("domains") { - logger().Printf("Trying to revoke certificate for domain %s", domain) - - certPath := path.Join(conf.CertPath(), domain+".crt") - certBytes, err := ioutil.ReadFile(certPath) - - err = client.RevokeCertificate(certBytes) - if err != nil { - logger().Fatalf("Error while revoking the certificate for domain %s\n\t%s", domain, err.Error()) - } else { - logger().Print("Certificate was revoked.") - } - } - - return nil -} - -func renew(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - logger().Fatalf("Account %s is not registered. Use 'run' to register a new account.\n", acc.Email) - } - - if len(c.GlobalStringSlice("domains")) <= 0 { - logger().Fatal("Please specify at least one domain.") - } - - domain := c.GlobalStringSlice("domains")[0] - - // load the cert resource from files. - // We store the certificate, private key and metadata in different files - // as web servers would not be able to work with a combined file. - certPath := path.Join(conf.CertPath(), domain+".crt") - privPath := path.Join(conf.CertPath(), domain+".key") - metaPath := path.Join(conf.CertPath(), domain+".json") - - certBytes, err := ioutil.ReadFile(certPath) - if err != nil { - logger().Fatalf("Error while loading the certificate for domain %s\n\t%s", domain, err.Error()) - } - - if c.IsSet("days") { - expTime, err := acme.GetPEMCertExpiration(certBytes) - if err != nil { - logger().Printf("Could not get Certification expiration for domain %s", domain) - } - - if int(expTime.Sub(time.Now()).Hours()/24.0) > c.Int("days") { - return nil - } - } - - metaBytes, err := ioutil.ReadFile(metaPath) - if err != nil { - logger().Fatalf("Error while loading the meta data for domain %s\n\t%s", domain, err.Error()) - } - - var certRes acme.CertificateResource - err = json.Unmarshal(metaBytes, &certRes) - if err != nil { - logger().Fatalf("Error while marshalling the meta data for domain %s\n\t%s", domain, err.Error()) - } - - if c.Bool("reuse-key") { - keyBytes, err := ioutil.ReadFile(privPath) - if err != nil { - logger().Fatalf("Error while loading the private key for domain %s\n\t%s", domain, err.Error()) - } - certRes.PrivateKey = keyBytes - } - - certRes.Certificate = certBytes - - newCert, err := client.RenewCertificate(certRes, !c.Bool("no-bundle"), c.Bool("must-staple")) - if err != nil { - logger().Fatalf("%s", err.Error()) - } - - saveCertRes(newCert, conf) - - return nil -} diff --git a/vendor/github.com/xenolf/lego/configuration.go b/vendor/github.com/xenolf/lego/configuration.go deleted file mode 100644 index f92c1fe96..000000000 --- a/vendor/github.com/xenolf/lego/configuration.go +++ /dev/null @@ -1,76 +0,0 @@ -package main - -import ( - "fmt" - "net/url" - "os" - "path" - "strings" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" -) - -// Configuration type from CLI and config files. -type Configuration struct { - context *cli.Context -} - -// NewConfiguration creates a new configuration from CLI data. -func NewConfiguration(c *cli.Context) *Configuration { - return &Configuration{context: c} -} - -// KeyType the type from which private keys should be generated -func (c *Configuration) KeyType() (acme.KeyType, error) { - switch strings.ToUpper(c.context.GlobalString("key-type")) { - case "RSA2048": - return acme.RSA2048, nil - case "RSA4096": - return acme.RSA4096, nil - case "RSA8192": - return acme.RSA8192, nil - case "EC256": - return acme.EC256, nil - case "EC384": - return acme.EC384, nil - } - - return "", fmt.Errorf("Unsupported KeyType: %s", c.context.GlobalString("key-type")) -} - -// ExcludedSolvers is a list of solvers that are to be excluded. -func (c *Configuration) ExcludedSolvers() (cc []acme.Challenge) { - for _, s := range c.context.GlobalStringSlice("exclude") { - cc = append(cc, acme.Challenge(s)) - } - return -} - -// ServerPath returns the OS dependent path to the data for a specific CA -func (c *Configuration) ServerPath() string { - srv, _ := url.Parse(c.context.GlobalString("server")) - srvStr := strings.Replace(srv.Host, ":", "_", -1) - return strings.Replace(srvStr, "/", string(os.PathSeparator), -1) -} - -// CertPath gets the path for certificates. -func (c *Configuration) CertPath() string { - return path.Join(c.context.GlobalString("path"), "certificates") -} - -// AccountsPath returns the OS dependent path to the -// local accounts for a specific CA -func (c *Configuration) AccountsPath() string { - return path.Join(c.context.GlobalString("path"), "accounts", c.ServerPath()) -} - -// AccountPath returns the OS dependent path to a particular account -func (c *Configuration) AccountPath(acc string) string { - return path.Join(c.AccountsPath(), acc) -} - -// AccountKeysPath returns the OS dependent path to the keys of a particular account -func (c *Configuration) AccountKeysPath(acc string) string { - return path.Join(c.AccountPath(acc), "keys") -} diff --git a/vendor/github.com/xenolf/lego/crypto.go b/vendor/github.com/xenolf/lego/crypto.go deleted file mode 100644 index 8b23e2fc1..000000000 --- a/vendor/github.com/xenolf/lego/crypto.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "errors" - "io/ioutil" - "os" -) - -func generatePrivateKey(file string) (crypto.PrivateKey, error) { - - privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - if err != nil { - return nil, err - } - - keyBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - pemKey := pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - - certOut, err := os.Create(file) - if err != nil { - return nil, err - } - - pem.Encode(certOut, &pemKey) - certOut.Close() - - return privateKey, nil -} - -func loadPrivateKey(file string) (crypto.PrivateKey, error) { - keyBytes, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - - keyBlock, _ := pem.Decode(keyBytes) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - } - - return nil, errors.New("Unknown private key type.") -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go b/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go deleted file mode 100644 index 55b48f9b4..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go +++ /dev/null @@ -1,141 +0,0 @@ -package auroradns - -import ( - "fmt" - "github.com/edeckers/auroradnsclient" - "github.com/edeckers/auroradnsclient/records" - "github.com/edeckers/auroradnsclient/zones" - "github.com/xenolf/lego/acme" - "os" - "sync" -) - -// DNSProvider describes a provider for AuroraDNS -type DNSProvider struct { - recordIDs map[string]string - recordIDsMu sync.Mutex - client *auroradnsclient.AuroraDNSClient -} - -// NewDNSProvider returns a DNSProvider instance configured for AuroraDNS. -// Credentials must be passed in the environment variables: AURORA_USER_ID -// and AURORA_KEY. -func NewDNSProvider() (*DNSProvider, error) { - userID := os.Getenv("AURORA_USER_ID") - key := os.Getenv("AURORA_KEY") - - endpoint := os.Getenv("AURORA_ENDPOINT") - if endpoint == "" { - endpoint = "https://api.auroradns.eu" - } - - return NewDNSProviderCredentials(endpoint, userID, key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for AuroraDNS. -func NewDNSProviderCredentials(baseURL string, userID string, key string) (*DNSProvider, error) { - client, err := auroradnsclient.NewAuroraDNSClient(baseURL, userID, key) - if err != nil { - return nil, err - } - - return &DNSProvider{ - client: client, - recordIDs: make(map[string]string), - }, nil -} - -func (provider *DNSProvider) getZoneInformationByName(name string) (zones.ZoneRecord, error) { - zs, err := provider.client.GetZones() - - if err != nil { - return zones.ZoneRecord{}, err - } - - for _, element := range zs { - if element.Name == name { - return element, nil - } - } - - return zones.ZoneRecord{}, fmt.Errorf("Could not find Zone record") -} - -// Present creates a record with a secret -func (provider *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - // 1. Aurora will happily create the TXT record when it is provided a fqdn, - // but it will only appear in the control panel and will not be - // propagated to DNS servers. Extract and use subdomain instead. - // 2. A trailing dot in the fqdn will cause Aurora to add a trailing dot to - // the subdomain, resulting in _acme-challenge.. rather - // than _acme-challenge. - - subdomain := fqdn[0 : len(fqdn)-len(authZone)-1] - - authZone = acme.UnFqdn(authZone) - - zoneRecord, err := provider.getZoneInformationByName(authZone) - - reqData := - records.CreateRecordRequest{ - RecordType: "TXT", - Name: subdomain, - Content: value, - TTL: 300, - } - - respData, err := provider.client.CreateRecord(zoneRecord.ID, reqData) - if err != nil { - return fmt.Errorf("Could not create record: '%s'.", err) - } - - provider.recordIDsMu.Lock() - provider.recordIDs[fqdn] = respData.ID - provider.recordIDsMu.Unlock() - - return nil -} - -// CleanUp removes a given record that was generated by Present -func (provider *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - provider.recordIDsMu.Lock() - recordID, ok := provider.recordIDs[fqdn] - provider.recordIDsMu.Unlock() - - if !ok { - return fmt.Errorf("Unknown recordID for '%s'", fqdn) - } - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - authZone = acme.UnFqdn(authZone) - - zoneRecord, err := provider.getZoneInformationByName(authZone) - if err != nil { - return err - } - - _, err = provider.client.RemoveRecord(zoneRecord.ID, recordID) - if err != nil { - return err - } - - provider.recordIDsMu.Lock() - delete(provider.recordIDs, fqdn) - provider.recordIDsMu.Unlock() - - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns_test.go b/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns_test.go deleted file mode 100644 index f4df7fa61..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package auroradns - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" -) - -var fakeAuroraDNSUserId = "asdf1234" -var fakeAuroraDNSKey = "key" - -func TestAuroraDNSPresent(t *testing.T) { - var requestReceived bool - - mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && r.URL.Path == "/zones" { - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `[{ - "id": "c56a4180-65aa-42ec-a945-5fd21dec0538", - "name": "example.com" - }]`) - return - } - - requestReceived = true - - if got, want := r.Method, "POST"; got != want { - t.Errorf("Expected method to be '%s' but got '%s'", want, got) - } - - if got, want := r.URL.Path, "/zones/c56a4180-65aa-42ec-a945-5fd21dec0538/records"; got != want { - t.Errorf("Expected path to be '%s' but got '%s'", want, got) - } - - if got, want := r.Header.Get("Content-Type"), "application/json"; got != want { - t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got) - } - - reqBody, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("Error reading request body: %v", err) - } - - if got, want := string(reqBody), - `{"type":"TXT","name":"_acme-challenge","content":"w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI","ttl":300}`; got != want { - - t.Errorf("Expected body data to be: `%s` but got `%s`", want, got) - } - - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `{ - "id": "c56a4180-65aa-42ec-a945-5fd21dec0538", - "type": "TXT", - "name": "_acme-challenge", - "ttl": 300 - }`) - })) - - defer mock.Close() - - auroraProvider, err := NewDNSProviderCredentials(mock.URL, fakeAuroraDNSUserId, fakeAuroraDNSKey) - if auroraProvider == nil { - t.Fatal("Expected non-nil AuroraDNS provider, but was nil") - } - - if err != nil { - t.Fatalf("Expected no error creating provider, but got: %v", err) - } - - err = auroraProvider.Present("example.com", "", "foobar") - if err != nil { - t.Fatalf("Expected no error creating TXT record, but got: %v", err) - } - - if !requestReceived { - t.Error("Expected request to be received by mock backend, but it wasn't") - } -} - -func TestAuroraDNSCleanUp(t *testing.T) { - var requestReceived bool - - mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && r.URL.Path == "/zones" { - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `[{ - "id": "c56a4180-65aa-42ec-a945-5fd21dec0538", - "name": "example.com" - }]`) - return - } - - if r.Method == "POST" && r.URL.Path == "/zones/c56a4180-65aa-42ec-a945-5fd21dec0538/records" { - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `{ - "id": "ec56a4180-65aa-42ec-a945-5fd21dec0538", - "type": "TXT", - "name": "_acme-challenge", - "ttl": 300 - }`) - return - } - - requestReceived = true - - if got, want := r.Method, "DELETE"; got != want { - t.Errorf("Expected method to be '%s' but got '%s'", want, got) - } - - if got, want := r.URL.Path, - "/zones/c56a4180-65aa-42ec-a945-5fd21dec0538/records/ec56a4180-65aa-42ec-a945-5fd21dec0538"; got != want { - t.Errorf("Expected path to be '%s' but got '%s'", want, got) - } - - if got, want := r.Header.Get("Content-Type"), "application/json"; got != want { - t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got) - } - - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `{}`) - })) - defer mock.Close() - - auroraProvider, err := NewDNSProviderCredentials(mock.URL, fakeAuroraDNSUserId, fakeAuroraDNSKey) - if auroraProvider == nil { - t.Fatal("Expected non-nil AuroraDNS provider, but was nil") - } - - if err != nil { - t.Fatalf("Expected no error creating provider, but got: %v", err) - } - - err = auroraProvider.Present("example.com", "", "foobar") - if err != nil { - t.Fatalf("Expected no error creating TXT record, but got: %v", err) - } - - err = auroraProvider.CleanUp("example.com", "", "foobar") - if err != nil { - t.Fatalf("Expected no error removing TXT record, but got: %v", err) - } - - if !requestReceived { - t.Error("Expected request to be received by mock backend, but it wasn't") - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go deleted file mode 100644 index 6a30b318a..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go +++ /dev/null @@ -1,151 +0,0 @@ -// Package azure implements a DNS provider for solving the DNS-01 -// challenge using azure DNS. -// Azure doesn't like trailing dots on domain names, most of the acme code does. -package azure - -import ( - "fmt" - "os" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/dns" - - "strings" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -type DNSProvider struct { - clientId string - clientSecret string - subscriptionId string - tenantId string - resourceGroup string -} - -// NewDNSProvider returns a DNSProvider instance configured for azure. -// Credentials must be passed in the environment variables: AZURE_CLIENT_ID, -// AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP -func NewDNSProvider() (*DNSProvider, error) { - clientId := os.Getenv("AZURE_CLIENT_ID") - clientSecret := os.Getenv("AZURE_CLIENT_SECRET") - subscriptionId := os.Getenv("AZURE_SUBSCRIPTION_ID") - tenantId := os.Getenv("AZURE_TENANT_ID") - resourceGroup := os.Getenv("AZURE_RESOURCE_GROUP") - return NewDNSProviderCredentials(clientId, clientSecret, subscriptionId, tenantId, resourceGroup) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for azure. -func NewDNSProviderCredentials(clientId, clientSecret, subscriptionId, tenantId, resourceGroup string) (*DNSProvider, error) { - if clientId == "" || clientSecret == "" || subscriptionId == "" || tenantId == "" || resourceGroup == "" { - return nil, fmt.Errorf("Azure configuration missing") - } - - return &DNSProvider{ - clientId: clientId, - clientSecret: clientSecret, - subscriptionId: subscriptionId, - tenantId: tenantId, - resourceGroup: resourceGroup, - }, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Adjusting here to cope with spikes in propagation times. -func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 120 * time.Second, 2 * time.Second -} - -// Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zone, err := c.getHostedZoneID(fqdn) - if err != nil { - return err - } - - rsc := dns.NewRecordSetsClient(c.subscriptionId) - spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) - rsc.Authorizer = autorest.NewBearerAuthorizer(spt) - - relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) - rec := dns.RecordSet{ - Name: &relative, - RecordSetProperties: &dns.RecordSetProperties{ - TTL: to.Int64Ptr(60), - TxtRecords: &[]dns.TxtRecord{dns.TxtRecord{Value: &[]string{value}}}, - }, - } - _, err = rsc.CreateOrUpdate(c.resourceGroup, zone, relative, dns.TXT, rec, "", "") - - if err != nil { - return err - } - - return nil -} - -// Returns the relative record to the domain -func toRelativeRecord(domain, zone string) string { - return acme.UnFqdn(strings.TrimSuffix(domain, zone)) -} - -// CleanUp removes the TXT record matching the specified parameters -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZoneID(fqdn) - if err != nil { - return err - } - - relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) - rsc := dns.NewRecordSetsClient(c.subscriptionId) - spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) - rsc.Authorizer = autorest.NewBearerAuthorizer(spt) - _, err = rsc.Delete(c.resourceGroup, zone, relative, dns.TXT, "") - if err != nil { - return err - } - - return nil -} - -// Checks that azure has a zone for this domain name. -func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return "", err - } - - // Now we want to to Azure and get the zone. - spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint) - - dc := dns.NewZonesClient(c.subscriptionId) - dc.Authorizer = autorest.NewBearerAuthorizer(spt) - - zone, err := dc.Get(c.resourceGroup, acme.UnFqdn(authZone)) - - if err != nil { - return "", err - } - - // zone.Name shouldn't have a trailing dot(.) - return to.String(zone.Name), nil -} - -// NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the -// passed credentials map. -func (c *DNSProvider) newServicePrincipalTokenFromCredentials(scope string) (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, c.tenantId) - if err != nil { - panic(err) - } - return adal.NewServicePrincipalToken(*oauthConfig, c.clientId, c.clientSecret, scope) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure_test.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure_test.go deleted file mode 100644 index db55f578a..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package azure - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - azureLiveTest bool - azureClientID string - azureClientSecret string - azureSubscriptionID string - azureTenantID string - azureResourceGroup string - azureDomain string -) - -func init() { - azureClientID = os.Getenv("AZURE_CLIENT_ID") - azureClientSecret = os.Getenv("AZURE_CLIENT_SECRET") - azureSubscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID") - azureTenantID = os.Getenv("AZURE_TENANT_ID") - azureResourceGroup = os.Getenv("AZURE_RESOURCE_GROUP") - azureDomain = os.Getenv("AZURE_DOMAIN") - if len(azureClientID) > 0 && len(azureClientSecret) > 0 { - azureLiveTest = true - } -} - -func restoreAzureEnv() { - os.Setenv("AZURE_CLIENT_ID", azureClientID) - os.Setenv("AZURE_SUBSCRIPTION_ID", azureSubscriptionID) -} - -func TestNewDNSProviderValid(t *testing.T) { - if !azureLiveTest { - t.Skip("skipping live test (requires credentials)") - } - os.Setenv("AZURE_CLIENT_ID", "") - _, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) - assert.NoError(t, err) - restoreAzureEnv() -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - if !azureLiveTest { - t.Skip("skipping live test (requires credentials)") - } - os.Setenv("AZURE_CLIENT_ID", "other") - _, err := NewDNSProvider() - assert.NoError(t, err) - restoreAzureEnv() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("AZURE_SUBSCRIPTION_ID", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "Azure configuration missing") - restoreAzureEnv() -} - -func TestLiveAzurePresent(t *testing.T) { - if !azureLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) - assert.NoError(t, err) - - err = provider.Present(azureDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveAzureCleanUp(t *testing.T) { - if !azureLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) - time.Sleep(time.Second * 1) - - assert.NoError(t, err) - - err = provider.CleanUp(azureDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go deleted file mode 100644 index 84952238d..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package cloudflare implements a DNS provider for solving the DNS-01 -// challenge using cloudflare DNS. -package cloudflare - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "time" - - "github.com/xenolf/lego/acme" -) - -// CloudFlareAPIURL represents the API endpoint to call. -// TODO: Unexport? -const CloudFlareAPIURL = "https://api.cloudflare.com/client/v4" - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -type DNSProvider struct { - authEmail string - authKey string -} - -// NewDNSProvider returns a DNSProvider instance configured for cloudflare. -// Credentials must be passed in the environment variables: CLOUDFLARE_EMAIL -// and CLOUDFLARE_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - email := os.Getenv("CLOUDFLARE_EMAIL") - key := os.Getenv("CLOUDFLARE_API_KEY") - return NewDNSProviderCredentials(email, key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for cloudflare. -func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) { - if email == "" || key == "" { - return nil, fmt.Errorf("CloudFlare credentials missing") - } - - return &DNSProvider{ - authEmail: email, - authKey: key, - }, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Adjusting here to cope with spikes in propagation times. -func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 120 * time.Second, 2 * time.Second -} - -// Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zoneID, err := c.getHostedZoneID(fqdn) - if err != nil { - return err - } - - rec := cloudFlareRecord{ - Type: "TXT", - Name: acme.UnFqdn(fqdn), - Content: value, - TTL: 120, - } - - body, err := json.Marshal(rec) - if err != nil { - return err - } - - _, err = c.makeRequest("POST", fmt.Sprintf("/zones/%s/dns_records", zoneID), bytes.NewReader(body)) - if err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - record, err := c.findTxtRecord(fqdn) - if err != nil { - return err - } - - _, err = c.makeRequest("DELETE", fmt.Sprintf("/zones/%s/dns_records/%s", record.ZoneID, record.ID), nil) - if err != nil { - return err - } - - return nil -} - -func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { - // HostedZone represents a CloudFlare DNS zone - type HostedZone struct { - ID string `json:"id"` - Name string `json:"name"` - } - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return "", err - } - - result, err := c.makeRequest("GET", "/zones?name="+acme.UnFqdn(authZone), nil) - if err != nil { - return "", err - } - - var hostedZone []HostedZone - err = json.Unmarshal(result, &hostedZone) - if err != nil { - return "", err - } - - if len(hostedZone) != 1 { - return "", fmt.Errorf("Zone %s not found in CloudFlare for domain %s", authZone, fqdn) - } - - return hostedZone[0].ID, nil -} - -func (c *DNSProvider) findTxtRecord(fqdn string) (*cloudFlareRecord, error) { - zoneID, err := c.getHostedZoneID(fqdn) - if err != nil { - return nil, err - } - - result, err := c.makeRequest( - "GET", - fmt.Sprintf("/zones/%s/dns_records?per_page=1000&type=TXT&name=%s", zoneID, acme.UnFqdn(fqdn)), - nil, - ) - if err != nil { - return nil, err - } - - var records []cloudFlareRecord - err = json.Unmarshal(result, &records) - if err != nil { - return nil, err - } - - for _, rec := range records { - if rec.Name == acme.UnFqdn(fqdn) { - return &rec, nil - } - } - - return nil, fmt.Errorf("No existing record found for %s", fqdn) -} - -func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { - // APIError contains error details for failed requests - type APIError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` - ErrorChain []APIError `json:"error_chain,omitempty"` - } - - // APIResponse represents a response from CloudFlare API - type APIResponse struct { - Success bool `json:"success"` - Errors []*APIError `json:"errors"` - Result json.RawMessage `json:"result"` - } - - req, err := http.NewRequest(method, fmt.Sprintf("%s%s", CloudFlareAPIURL, uri), body) - if err != nil { - return nil, err - } - - req.Header.Set("X-Auth-Email", c.authEmail) - req.Header.Set("X-Auth-Key", c.authKey) - //req.Header.Set("User-Agent", userAgent()) - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Error querying Cloudflare API -> %v", err) - } - - defer resp.Body.Close() - - var r APIResponse - err = json.NewDecoder(resp.Body).Decode(&r) - if err != nil { - return nil, err - } - - if !r.Success { - if len(r.Errors) > 0 { - errStr := "" - for _, apiErr := range r.Errors { - errStr += fmt.Sprintf("\t Error: %d: %s", apiErr.Code, apiErr.Message) - for _, chainErr := range apiErr.ErrorChain { - errStr += fmt.Sprintf("<- %d: %s", chainErr.Code, chainErr.Message) - } - } - return nil, fmt.Errorf("Cloudflare API Error \n%s", errStr) - } - return nil, fmt.Errorf("Cloudflare API error") - } - - return r.Result, nil -} - -// cloudFlareRecord represents a CloudFlare DNS record -type cloudFlareRecord struct { - Name string `json:"name"` - Type string `json:"type"` - Content string `json:"content"` - ID string `json:"id,omitempty"` - TTL int `json:"ttl,omitempty"` - ZoneID string `json:"zone_id,omitempty"` -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go deleted file mode 100644 index 19b5a40b9..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package cloudflare - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - cflareLiveTest bool - cflareEmail string - cflareAPIKey string - cflareDomain string -) - -func init() { - cflareEmail = os.Getenv("CLOUDFLARE_EMAIL") - cflareAPIKey = os.Getenv("CLOUDFLARE_API_KEY") - cflareDomain = os.Getenv("CLOUDFLARE_DOMAIN") - if len(cflareEmail) > 0 && len(cflareAPIKey) > 0 && len(cflareDomain) > 0 { - cflareLiveTest = true - } -} - -func restoreCloudFlareEnv() { - os.Setenv("CLOUDFLARE_EMAIL", cflareEmail) - os.Setenv("CLOUDFLARE_API_KEY", cflareAPIKey) -} - -func TestNewDNSProviderValid(t *testing.T) { - os.Setenv("CLOUDFLARE_EMAIL", "") - os.Setenv("CLOUDFLARE_API_KEY", "") - _, err := NewDNSProviderCredentials("123", "123") - assert.NoError(t, err) - restoreCloudFlareEnv() -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("CLOUDFLARE_EMAIL", "test@example.com") - os.Setenv("CLOUDFLARE_API_KEY", "123") - _, err := NewDNSProvider() - assert.NoError(t, err) - restoreCloudFlareEnv() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("CLOUDFLARE_EMAIL", "") - os.Setenv("CLOUDFLARE_API_KEY", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "CloudFlare credentials missing") - restoreCloudFlareEnv() -} - -func TestCloudFlarePresent(t *testing.T) { - if !cflareLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(cflareEmail, cflareAPIKey) - assert.NoError(t, err) - - err = provider.Present(cflareDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestCloudFlareCleanUp(t *testing.T) { - if !cflareLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 2) - - provider, err := NewDNSProviderCredentials(cflareEmail, cflareAPIKey) - assert.NoError(t, err) - - err = provider.CleanUp(cflareDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go deleted file mode 100644 index da261b39a..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go +++ /dev/null @@ -1,166 +0,0 @@ -// Package digitalocean implements a DNS provider for solving the DNS-01 -// challenge using digitalocean DNS. -package digitalocean - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "os" - "sync" - "time" - - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -// that uses DigitalOcean's REST API to manage TXT records for a domain. -type DNSProvider struct { - apiAuthToken string - recordIDs map[string]int - recordIDsMu sync.Mutex -} - -// NewDNSProvider returns a DNSProvider instance configured for Digital -// Ocean. Credentials must be passed in the environment variable: -// DO_AUTH_TOKEN. -func NewDNSProvider() (*DNSProvider, error) { - apiAuthToken := os.Getenv("DO_AUTH_TOKEN") - return NewDNSProviderCredentials(apiAuthToken) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Digital Ocean. -func NewDNSProviderCredentials(apiAuthToken string) (*DNSProvider, error) { - if apiAuthToken == "" { - return nil, fmt.Errorf("DigitalOcean credentials missing") - } - return &DNSProvider{ - apiAuthToken: apiAuthToken, - recordIDs: make(map[string]int), - }, nil -} - -// Present creates a TXT record using the specified parameters -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - // txtRecordRequest represents the request body to DO's API to make a TXT record - type txtRecordRequest struct { - RecordType string `json:"type"` - Name string `json:"name"` - Data string `json:"data"` - } - - // txtRecordResponse represents a response from DO's API after making a TXT record - type txtRecordResponse struct { - DomainRecord struct { - ID int `json:"id"` - Type string `json:"type"` - Name string `json:"name"` - Data string `json:"data"` - } `json:"domain_record"` - } - - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - authZone = acme.UnFqdn(authZone) - - reqURL := fmt.Sprintf("%s/v2/domains/%s/records", digitalOceanBaseURL, authZone) - reqData := txtRecordRequest{RecordType: "TXT", Name: fqdn, Data: value} - body, err := json.Marshal(reqData) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", reqURL, bytes.NewReader(body)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - var errInfo digitalOceanAPIError - json.NewDecoder(resp.Body).Decode(&errInfo) - return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message) - } - - // Everything looks good; but we'll need the ID later to delete the record - var respData txtRecordResponse - err = json.NewDecoder(resp.Body).Decode(&respData) - if err != nil { - return err - } - d.recordIDsMu.Lock() - d.recordIDs[fqdn] = respData.DomainRecord.ID - d.recordIDsMu.Unlock() - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - // get the record's unique ID from when we created it - d.recordIDsMu.Lock() - recordID, ok := d.recordIDs[fqdn] - d.recordIDsMu.Unlock() - if !ok { - return fmt.Errorf("unknown record ID for '%s'", fqdn) - } - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - authZone = acme.UnFqdn(authZone) - - reqURL := fmt.Sprintf("%s/v2/domains/%s/records/%d", digitalOceanBaseURL, authZone, recordID) - req, err := http.NewRequest("DELETE", reqURL, nil) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - var errInfo digitalOceanAPIError - json.NewDecoder(resp.Body).Decode(&errInfo) - return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message) - } - - // Delete record ID from map - d.recordIDsMu.Lock() - delete(d.recordIDs, fqdn) - d.recordIDsMu.Unlock() - - return nil -} - -type digitalOceanAPIError struct { - ID string `json:"id"` - Message string `json:"message"` -} - -var digitalOceanBaseURL = "https://api.digitalocean.com" diff --git a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go deleted file mode 100644 index 7498508ba..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package digitalocean - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" -) - -var fakeDigitalOceanAuth = "asdf1234" - -func TestDigitalOceanPresent(t *testing.T) { - var requestReceived bool - - mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestReceived = true - - if got, want := r.Method, "POST"; got != want { - t.Errorf("Expected method to be '%s' but got '%s'", want, got) - } - if got, want := r.URL.Path, "/v2/domains/example.com/records"; got != want { - t.Errorf("Expected path to be '%s' but got '%s'", want, got) - } - if got, want := r.Header.Get("Content-Type"), "application/json"; got != want { - t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got) - } - if got, want := r.Header.Get("Authorization"), "Bearer asdf1234"; got != want { - t.Errorf("Expected Authorization to be '%s' but got '%s'", want, got) - } - - reqBody, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("Error reading request body: %v", err) - } - if got, want := string(reqBody), `{"type":"TXT","name":"_acme-challenge.example.com.","data":"w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI"}`; got != want { - t.Errorf("Expected body data to be: `%s` but got `%s`", want, got) - } - - w.WriteHeader(http.StatusCreated) - fmt.Fprintf(w, `{ - "domain_record": { - "id": 1234567, - "type": "TXT", - "name": "_acme-challenge", - "data": "w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI", - "priority": null, - "port": null, - "weight": null - } - }`) - })) - defer mock.Close() - digitalOceanBaseURL = mock.URL - - doprov, err := NewDNSProviderCredentials(fakeDigitalOceanAuth) - if doprov == nil { - t.Fatal("Expected non-nil DigitalOcean provider, but was nil") - } - if err != nil { - t.Fatalf("Expected no error creating provider, but got: %v", err) - } - - err = doprov.Present("example.com", "", "foobar") - if err != nil { - t.Fatalf("Expected no error creating TXT record, but got: %v", err) - } - if !requestReceived { - t.Error("Expected request to be received by mock backend, but it wasn't") - } -} - -func TestDigitalOceanCleanUp(t *testing.T) { - var requestReceived bool - - mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestReceived = true - - if got, want := r.Method, "DELETE"; got != want { - t.Errorf("Expected method to be '%s' but got '%s'", want, got) - } - if got, want := r.URL.Path, "/v2/domains/example.com/records/1234567"; got != want { - t.Errorf("Expected path to be '%s' but got '%s'", want, got) - } - // NOTE: Even though the body is empty, DigitalOcean API docs still show setting this Content-Type... - if got, want := r.Header.Get("Content-Type"), "application/json"; got != want { - t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got) - } - if got, want := r.Header.Get("Authorization"), "Bearer asdf1234"; got != want { - t.Errorf("Expected Authorization to be '%s' but got '%s'", want, got) - } - - w.WriteHeader(http.StatusNoContent) - })) - defer mock.Close() - digitalOceanBaseURL = mock.URL - - doprov, err := NewDNSProviderCredentials(fakeDigitalOceanAuth) - if doprov == nil { - t.Fatal("Expected non-nil DigitalOcean provider, but was nil") - } - if err != nil { - t.Fatalf("Expected no error creating provider, but got: %v", err) - } - - doprov.recordIDsMu.Lock() - doprov.recordIDs["_acme-challenge.example.com."] = 1234567 - doprov.recordIDsMu.Unlock() - - err = doprov.CleanUp("example.com", "", "") - if err != nil { - t.Fatalf("Expected no error removing TXT record, but got: %v", err) - } - if !requestReceived { - t.Error("Expected request to be received by mock backend, but it wasn't") - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go b/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go deleted file mode 100644 index d7530f788..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dns_providers.go +++ /dev/null @@ -1,86 +0,0 @@ -// Factory for DNS providers -package dns - -import ( - "fmt" - - "github.com/xenolf/lego/acme" - "github.com/xenolf/lego/providers/dns/auroradns" - "github.com/xenolf/lego/providers/dns/azure" - "github.com/xenolf/lego/providers/dns/cloudflare" - "github.com/xenolf/lego/providers/dns/digitalocean" - "github.com/xenolf/lego/providers/dns/dnsimple" - "github.com/xenolf/lego/providers/dns/dnsmadeeasy" - "github.com/xenolf/lego/providers/dns/dnspod" - "github.com/xenolf/lego/providers/dns/dyn" - "github.com/xenolf/lego/providers/dns/exoscale" - "github.com/xenolf/lego/providers/dns/gandi" - "github.com/xenolf/lego/providers/dns/godaddy" - "github.com/xenolf/lego/providers/dns/googlecloud" - "github.com/xenolf/lego/providers/dns/linode" - "github.com/xenolf/lego/providers/dns/namecheap" - "github.com/xenolf/lego/providers/dns/ns1" - "github.com/xenolf/lego/providers/dns/otc" - "github.com/xenolf/lego/providers/dns/ovh" - "github.com/xenolf/lego/providers/dns/pdns" - "github.com/xenolf/lego/providers/dns/rackspace" - "github.com/xenolf/lego/providers/dns/rfc2136" - "github.com/xenolf/lego/providers/dns/route53" - "github.com/xenolf/lego/providers/dns/vultr" -) - -func NewDNSChallengeProviderByName(name string) (acme.ChallengeProvider, error) { - var err error - var provider acme.ChallengeProvider - switch name { - case "azure": - provider, err = azure.NewDNSProvider() - case "auroradns": - provider, err = auroradns.NewDNSProvider() - case "cloudflare": - provider, err = cloudflare.NewDNSProvider() - case "digitalocean": - provider, err = digitalocean.NewDNSProvider() - case "dnsimple": - provider, err = dnsimple.NewDNSProvider() - case "dnsmadeeasy": - provider, err = dnsmadeeasy.NewDNSProvider() - case "dnspod": - provider, err = dnspod.NewDNSProvider() - case "dyn": - provider, err = dyn.NewDNSProvider() - case "exoscale": - provider, err = exoscale.NewDNSProvider() - case "gandi": - provider, err = gandi.NewDNSProvider() - case "gcloud": - provider, err = googlecloud.NewDNSProvider() - case "godaddy": - provider, err = godaddy.NewDNSProvider() - case "linode": - provider, err = linode.NewDNSProvider() - case "manual": - provider, err = acme.NewDNSProviderManual() - case "namecheap": - provider, err = namecheap.NewDNSProvider() - case "rackspace": - provider, err = rackspace.NewDNSProvider() - case "route53": - provider, err = route53.NewDNSProvider() - case "rfc2136": - provider, err = rfc2136.NewDNSProvider() - case "vultr": - provider, err = vultr.NewDNSProvider() - case "ovh": - provider, err = ovh.NewDNSProvider() - case "pdns": - provider, err = pdns.NewDNSProvider() - case "ns1": - provider, err = ns1.NewDNSProvider() - case "otc": - provider, err = otc.NewDNSProvider() - default: - err = fmt.Errorf("Unrecognised DNS provider: %s", name) - } - return provider, err -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dns_providers_test.go b/vendor/github.com/xenolf/lego/providers/dns/dns_providers_test.go deleted file mode 100644 index 3f87ffd33..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dns_providers_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package dns - -import ( - "os" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/xenolf/lego/providers/dns/exoscale" -) - -var ( - apiKey string - apiSecret string -) - -func init() { - apiSecret = os.Getenv("EXOSCALE_API_SECRET") - apiKey = os.Getenv("EXOSCALE_API_KEY") -} - -func restoreExoscaleEnv() { - os.Setenv("EXOSCALE_API_KEY", apiKey) - os.Setenv("EXOSCALE_API_SECRET", apiSecret) -} - -func TestKnownDNSProviderSuccess(t *testing.T) { - os.Setenv("EXOSCALE_API_KEY", "abc") - os.Setenv("EXOSCALE_API_SECRET", "123") - provider, err := NewDNSChallengeProviderByName("exoscale") - assert.NoError(t, err) - assert.NotNil(t, provider) - if reflect.TypeOf(provider) != reflect.TypeOf(&exoscale.DNSProvider{}) { - t.Errorf("Not loaded correct DNS proviver: %v is not *exoscale.DNSProvider", reflect.TypeOf(provider)) - } - restoreExoscaleEnv() -} - -func TestKnownDNSProviderError(t *testing.T) { - os.Setenv("EXOSCALE_API_KEY", "") - os.Setenv("EXOSCALE_API_SECRET", "") - _, err := NewDNSChallengeProviderByName("exoscale") - assert.Error(t, err) - restoreExoscaleEnv() -} - -func TestUnknownDNSProvider(t *testing.T) { - _, err := NewDNSChallengeProviderByName("foobar") - assert.Error(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go deleted file mode 100644 index e3fea79ec..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go +++ /dev/null @@ -1,180 +0,0 @@ -// Package dnsimple implements a DNS provider for solving the DNS-01 challenge -// using dnsimple DNS. -package dnsimple - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/dnsimple/dnsimple-go/dnsimple" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface. -type DNSProvider struct { - client *dnsimple.Client -} - -// NewDNSProvider returns a DNSProvider instance configured for dnsimple. -// Credentials must be passed in the environment variables: DNSIMPLE_OAUTH_TOKEN. -// -// See: https://developer.dnsimple.com/v2/#authentication -func NewDNSProvider() (*DNSProvider, error) { - accessToken := os.Getenv("DNSIMPLE_OAUTH_TOKEN") - baseUrl := os.Getenv("DNSIMPLE_BASE_URL") - - return NewDNSProviderCredentials(accessToken, baseUrl) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for dnsimple. -func NewDNSProviderCredentials(accessToken, baseUrl string) (*DNSProvider, error) { - if accessToken == "" { - return nil, fmt.Errorf("DNSimple OAuth token is missing") - } - - client := dnsimple.NewClient(dnsimple.NewOauthTokenCredentials(accessToken)) - client.UserAgent = "lego" - - if baseUrl != "" { - client.BaseURL = baseUrl - } - - return &DNSProvider{client: client}, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - zoneName, err := c.getHostedZone(domain) - - if err != nil { - return err - } - - accountID, err := c.getAccountID() - if err != nil { - return err - } - - recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl) - _, err = c.client.Zones.CreateRecord(accountID, zoneName, *recordAttributes) - if err != nil { - return fmt.Errorf("DNSimple API call failed: %v", err) - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - records, err := c.findTxtRecords(domain, fqdn) - if err != nil { - return err - } - - accountID, err := c.getAccountID() - if err != nil { - return err - } - - for _, rec := range records { - _, err := c.client.Zones.DeleteRecord(accountID, rec.ZoneID, rec.ID) - if err != nil { - return err - } - } - - return nil -} - -func (c *DNSProvider) getHostedZone(domain string) (string, error) { - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return "", err - } - - accountID, err := c.getAccountID() - if err != nil { - return "", err - } - - zoneName := acme.UnFqdn(authZone) - - zones, err := c.client.Zones.ListZones(accountID, &dnsimple.ZoneListOptions{NameLike: zoneName}) - if err != nil { - return "", fmt.Errorf("DNSimple API call failed: %v", err) - } - - var hostedZone dnsimple.Zone - for _, zone := range zones.Data { - if zone.Name == zoneName { - hostedZone = zone - } - } - - if hostedZone.ID == 0 { - return "", fmt.Errorf("Zone %s not found in DNSimple for domain %s", authZone, domain) - - } - - return hostedZone.Name, nil -} - -func (c *DNSProvider) findTxtRecords(domain, fqdn string) ([]dnsimple.ZoneRecord, error) { - zoneName, err := c.getHostedZone(domain) - if err != nil { - return nil, err - } - - accountID, err := c.getAccountID() - if err != nil { - return nil, err - } - - recordName := c.extractRecordName(fqdn, zoneName) - - result, err := c.client.Zones.ListRecords(accountID, zoneName, &dnsimple.ZoneRecordListOptions{Name: recordName, Type: "TXT", ListOptions: dnsimple.ListOptions{}}) - if err != nil { - return []dnsimple.ZoneRecord{}, fmt.Errorf("DNSimple API call has failed: %v", err) - } - - return result.Data, nil -} - -func (c *DNSProvider) newTxtRecord(zoneName, fqdn, value string, ttl int) *dnsimple.ZoneRecord { - name := c.extractRecordName(fqdn, zoneName) - - return &dnsimple.ZoneRecord{ - Type: "TXT", - Name: name, - Content: value, - TTL: ttl, - } -} - -func (c *DNSProvider) extractRecordName(fqdn, domain string) string { - name := acme.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} - -func (c *DNSProvider) getAccountID() (string, error) { - whoamiResponse, err := c.client.Identity.Whoami() - if err != nil { - return "", err - } - - if whoamiResponse.Data.Account == nil { - return "", fmt.Errorf("DNSimple user tokens are not supported, please use an account token.") - } - - return strconv.Itoa(whoamiResponse.Data.Account.ID), nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go deleted file mode 100644 index bd35790d7..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package dnsimple - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - dnsimpleLiveTest bool - dnsimpleOauthToken string - dnsimpleDomain string - dnsimpleBaseUrl string -) - -func init() { - dnsimpleOauthToken = os.Getenv("DNSIMPLE_OAUTH_TOKEN") - dnsimpleDomain = os.Getenv("DNSIMPLE_DOMAIN") - dnsimpleBaseUrl = "https://api.sandbox.dnsimple.com" - - if len(dnsimpleOauthToken) > 0 && len(dnsimpleDomain) > 0 { - baseUrl := os.Getenv("DNSIMPLE_BASE_URL") - - if baseUrl != "" { - dnsimpleBaseUrl = baseUrl - } - - dnsimpleLiveTest = true - } -} - -func restoreDNSimpleEnv() { - os.Setenv("DNSIMPLE_OAUTH_TOKEN", dnsimpleOauthToken) - os.Setenv("DNSIMPLE_BASE_URL", dnsimpleBaseUrl) -} - -// -// NewDNSProvider -// - -func TestNewDNSProviderValid(t *testing.T) { - defer restoreDNSimpleEnv() - - os.Setenv("DNSIMPLE_OAUTH_TOKEN", "123") - provider, err := NewDNSProvider() - - assert.NotNil(t, provider) - assert.Equal(t, "lego", provider.client.UserAgent) - assert.NoError(t, err) -} - -func TestNewDNSProviderValidWithBaseUrl(t *testing.T) { - defer restoreDNSimpleEnv() - - os.Setenv("DNSIMPLE_OAUTH_TOKEN", "123") - os.Setenv("DNSIMPLE_BASE_URL", "https://api.dnsimple.test") - provider, err := NewDNSProvider() - - assert.NotNil(t, provider) - assert.NoError(t, err) - - assert.Equal(t, provider.client.BaseURL, "https://api.dnsimple.test") -} - -func TestNewDNSProviderInvalidWithMissingOauthToken(t *testing.T) { - if dnsimpleLiveTest { - t.Skip("skipping test in live mode") - } - - defer restoreDNSimpleEnv() - - provider, err := NewDNSProvider() - - assert.Nil(t, provider) - assert.EqualError(t, err, "DNSimple OAuth token is missing") -} - -// -// NewDNSProviderCredentials -// - -func TestNewDNSProviderCredentialsValid(t *testing.T) { - provider, err := NewDNSProviderCredentials("123", "") - - assert.NotNil(t, provider) - assert.Equal(t, "lego", provider.client.UserAgent) - assert.NoError(t, err) -} - -func TestNewDNSProviderCredentialsValidWithBaseUrl(t *testing.T) { - provider, err := NewDNSProviderCredentials("123", "https://api.dnsimple.test") - - assert.NotNil(t, provider) - assert.NoError(t, err) - - assert.Equal(t, provider.client.BaseURL, "https://api.dnsimple.test") -} - -func TestNewDNSProviderCredentialsInvalidWithMissingOauthToken(t *testing.T) { - provider, err := NewDNSProviderCredentials("", "") - - assert.Nil(t, provider) - assert.EqualError(t, err, "DNSimple OAuth token is missing") -} - -// -// Present -// - -func TestLiveDNSimplePresent(t *testing.T) { - if !dnsimpleLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(dnsimpleOauthToken, dnsimpleBaseUrl) - assert.NoError(t, err) - - err = provider.Present(dnsimpleDomain, "", "123d==") - assert.NoError(t, err) -} - -// -// Cleanup -// - -func TestLiveDNSimpleCleanUp(t *testing.T) { - if !dnsimpleLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProviderCredentials(dnsimpleOauthToken, dnsimpleBaseUrl) - assert.NoError(t, err) - - err = provider.CleanUp(dnsimpleDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go deleted file mode 100644 index c4363a4eb..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go +++ /dev/null @@ -1,248 +0,0 @@ -package dnsmadeeasy - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "crypto/tls" - "encoding/hex" - "encoding/json" - "fmt" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses -// DNSMadeEasy's DNS API to manage TXT records for a domain. -type DNSProvider struct { - baseURL string - apiKey string - apiSecret string -} - -// Domain holds the DNSMadeEasy API representation of a Domain -type Domain struct { - ID int `json:"id"` - Name string `json:"name"` -} - -// Record holds the DNSMadeEasy API representation of a Domain Record -type Record struct { - ID int `json:"id"` - Type string `json:"type"` - Name string `json:"name"` - Value string `json:"value"` - TTL int `json:"ttl"` - SourceID int `json:"sourceId"` -} - -// NewDNSProvider returns a DNSProvider instance configured for DNSMadeEasy DNS. -// Credentials must be passed in the environment variables: DNSMADEEASY_API_KEY -// and DNSMADEEASY_API_SECRET. -func NewDNSProvider() (*DNSProvider, error) { - dnsmadeeasyAPIKey := os.Getenv("DNSMADEEASY_API_KEY") - dnsmadeeasyAPISecret := os.Getenv("DNSMADEEASY_API_SECRET") - dnsmadeeasySandbox := os.Getenv("DNSMADEEASY_SANDBOX") - - var baseURL string - - sandbox, _ := strconv.ParseBool(dnsmadeeasySandbox) - if sandbox { - baseURL = "https://api.sandbox.dnsmadeeasy.com/V2.0" - } else { - baseURL = "https://api.dnsmadeeasy.com/V2.0" - } - - return NewDNSProviderCredentials(baseURL, dnsmadeeasyAPIKey, dnsmadeeasyAPISecret) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for DNSMadeEasy. -func NewDNSProviderCredentials(baseURL, apiKey, apiSecret string) (*DNSProvider, error) { - if baseURL == "" || apiKey == "" || apiSecret == "" { - return nil, fmt.Errorf("DNS Made Easy credentials missing") - } - - return &DNSProvider{ - baseURL: baseURL, - apiKey: apiKey, - apiSecret: apiSecret, - }, nil -} - -// Present creates a TXT record using the specified parameters -func (d *DNSProvider) Present(domainName, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domainName, keyAuth) - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - // fetch the domain details - domain, err := d.getDomain(authZone) - if err != nil { - return err - } - - // create the TXT record - name := strings.Replace(fqdn, "."+authZone, "", 1) - record := &Record{Type: "TXT", Name: name, Value: value, TTL: ttl} - - err = d.createRecord(domain, record) - if err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT records matching the specified parameters -func (d *DNSProvider) CleanUp(domainName, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domainName, keyAuth) - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - // fetch the domain details - domain, err := d.getDomain(authZone) - if err != nil { - return err - } - - // find matching records - name := strings.Replace(fqdn, "."+authZone, "", 1) - records, err := d.getRecords(domain, name, "TXT") - if err != nil { - return err - } - - // delete records - for _, record := range *records { - err = d.deleteRecord(record) - if err != nil { - return err - } - } - - return nil -} - -func (d *DNSProvider) getDomain(authZone string) (*Domain, error) { - domainName := authZone[0 : len(authZone)-1] - resource := fmt.Sprintf("%s%s", "/dns/managed/name?domainname=", domainName) - - resp, err := d.sendRequest("GET", resource, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - domain := &Domain{} - err = json.NewDecoder(resp.Body).Decode(&domain) - if err != nil { - return nil, err - } - - return domain, nil -} - -func (d *DNSProvider) getRecords(domain *Domain, recordName, recordType string) (*[]Record, error) { - resource := fmt.Sprintf("%s/%d/%s%s%s%s", "/dns/managed", domain.ID, "records?recordName=", recordName, "&type=", recordType) - - resp, err := d.sendRequest("GET", resource, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - type recordsResponse struct { - Records *[]Record `json:"data"` - } - - records := &recordsResponse{} - err = json.NewDecoder(resp.Body).Decode(&records) - if err != nil { - return nil, err - } - - return records.Records, nil -} - -func (d *DNSProvider) createRecord(domain *Domain, record *Record) error { - url := fmt.Sprintf("%s/%d/%s", "/dns/managed", domain.ID, "records") - - resp, err := d.sendRequest("POST", url, record) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (d *DNSProvider) deleteRecord(record Record) error { - resource := fmt.Sprintf("%s/%d/%s/%d", "/dns/managed", record.SourceID, "records", record.ID) - - resp, err := d.sendRequest("DELETE", resource, nil) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*http.Response, error) { - url := fmt.Sprintf("%s%s", d.baseURL, resource) - - body, err := json.Marshal(payload) - if err != nil { - return nil, err - } - - timestamp := time.Now().UTC().Format(time.RFC1123) - signature := computeHMAC(timestamp, d.apiSecret) - - req, err := http.NewRequest(method, url, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.Header.Set("x-dnsme-apiKey", d.apiKey) - req.Header.Set("x-dnsme-requestDate", timestamp) - req.Header.Set("x-dnsme-hmac", signature) - req.Header.Set("accept", "application/json") - req.Header.Set("content-type", "application/json") - - transport := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{ - Transport: transport, - Timeout: time.Duration(10 * time.Second), - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode > 299 { - return nil, fmt.Errorf("DNSMadeEasy API request failed with HTTP status code %d", resp.StatusCode) - } - - return resp, nil -} - -func computeHMAC(message string, secret string) string { - key := []byte(secret) - h := hmac.New(sha1.New, key) - h.Write([]byte(message)) - return hex.EncodeToString(h.Sum(nil)) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go deleted file mode 100644 index e860ecb69..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package dnsmadeeasy - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - testLive bool - testAPIKey string - testAPISecret string - testDomain string -) - -func init() { - testAPIKey = os.Getenv("DNSMADEEASY_API_KEY") - testAPISecret = os.Getenv("DNSMADEEASY_API_SECRET") - testDomain = os.Getenv("DNSMADEEASY_DOMAIN") - os.Setenv("DNSMADEEASY_SANDBOX", "true") - testLive = len(testAPIKey) > 0 && len(testAPISecret) > 0 -} - -func TestPresentAndCleanup(t *testing.T) { - if !testLive { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - - err = provider.Present(testDomain, "", "123d==") - assert.NoError(t, err) - - err = provider.CleanUp(testDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod.go b/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod.go deleted file mode 100644 index 0ce08a8bb..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package dnspod implements a DNS provider for solving the DNS-01 challenge -// using dnspod DNS. -package dnspod - -import ( - "fmt" - "os" - "strings" - - "github.com/decker502/dnspod-go" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface. -type DNSProvider struct { - client *dnspod.Client -} - -// NewDNSProvider returns a DNSProvider instance configured for dnspod. -// Credentials must be passed in the environment variables: DNSPOD_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - key := os.Getenv("DNSPOD_API_KEY") - return NewDNSProviderCredentials(key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for dnspod. -func NewDNSProviderCredentials(key string) (*DNSProvider, error) { - if key == "" { - return nil, fmt.Errorf("dnspod credentials missing") - } - - params := dnspod.CommonParams{LoginToken: key, Format: "json"} - return &DNSProvider{ - client: dnspod.NewClient(params), - }, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - zoneID, zoneName, err := c.getHostedZone(domain) - if err != nil { - return err - } - - recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl) - _, _, err = c.client.Domains.CreateRecord(zoneID, *recordAttributes) - if err != nil { - return fmt.Errorf("dnspod API call failed: %v", err) - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - records, err := c.findTxtRecords(domain, fqdn) - if err != nil { - return err - } - - zoneID, _, err := c.getHostedZone(domain) - if err != nil { - return err - } - - for _, rec := range records { - _, err := c.client.Domains.DeleteRecord(zoneID, rec.ID) - if err != nil { - return err - } - } - return nil -} - -func (c *DNSProvider) getHostedZone(domain string) (string, string, error) { - zones, _, err := c.client.Domains.List() - if err != nil { - return "", "", fmt.Errorf("dnspod API call failed: %v", err) - } - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return "", "", err - } - - var hostedZone dnspod.Domain - for _, zone := range zones { - if zone.Name == acme.UnFqdn(authZone) { - hostedZone = zone - } - } - - if hostedZone.ID == 0 { - return "", "", fmt.Errorf("Zone %s not found in dnspod for domain %s", authZone, domain) - - } - - return fmt.Sprintf("%v", hostedZone.ID), hostedZone.Name, nil -} - -func (c *DNSProvider) newTxtRecord(zone, fqdn, value string, ttl int) *dnspod.Record { - name := c.extractRecordName(fqdn, zone) - - return &dnspod.Record{ - Type: "TXT", - Name: name, - Value: value, - Line: "默认", - TTL: "600", - } -} - -func (c *DNSProvider) findTxtRecords(domain, fqdn string) ([]dnspod.Record, error) { - zoneID, zoneName, err := c.getHostedZone(domain) - if err != nil { - return nil, err - } - - var records []dnspod.Record - result, _, err := c.client.Domains.ListRecords(zoneID, "") - if err != nil { - return records, fmt.Errorf("dnspod API call has failed: %v", err) - } - - recordName := c.extractRecordName(fqdn, zoneName) - - for _, record := range result { - if record.Name == recordName { - records = append(records, record) - } - } - - return records, nil -} - -func (c *DNSProvider) extractRecordName(fqdn, domain string) string { - name := acme.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod_test.go b/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod_test.go deleted file mode 100644 index 3311eb0a6..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dnspod/dnspod_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package dnspod - -import ( - "github.com/stretchr/testify/assert" - "os" - "testing" - "time" -) - -var ( - dnspodLiveTest bool - dnspodAPIKey string - dnspodDomain string -) - -func init() { - dnspodAPIKey = os.Getenv("DNSPOD_API_KEY") - dnspodDomain = os.Getenv("DNSPOD_DOMAIN") - if len(dnspodAPIKey) > 0 && len(dnspodDomain) > 0 { - dnspodLiveTest = true - } -} - -func restorednspodEnv() { - os.Setenv("DNSPOD_API_KEY", dnspodAPIKey) -} - -func TestNewDNSProviderValid(t *testing.T) { - os.Setenv("DNSPOD_API_KEY", "") - _, err := NewDNSProviderCredentials("123") - assert.NoError(t, err) - restorednspodEnv() -} -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("DNSPOD_API_KEY", "123") - _, err := NewDNSProvider() - assert.NoError(t, err) - restorednspodEnv() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("DNSPOD_API_KEY", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "dnspod credentials missing") - restorednspodEnv() -} - -func TestLivednspodPresent(t *testing.T) { - if !dnspodLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(dnspodAPIKey) - assert.NoError(t, err) - - err = provider.Present(dnspodDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestLivednspodCleanUp(t *testing.T) { - if !dnspodLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProviderCredentials(dnspodAPIKey) - assert.NoError(t, err) - - err = provider.CleanUp(dnspodDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go deleted file mode 100644 index 384bc850c..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go +++ /dev/null @@ -1,274 +0,0 @@ -// Package dyn implements a DNS provider for solving the DNS-01 challenge -// using Dyn Managed DNS. -package dyn - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "os" - "strconv" - "time" - - "github.com/xenolf/lego/acme" -) - -var dynBaseURL = "https://api.dynect.net/REST" - -type dynResponse struct { - // One of 'success', 'failure', or 'incomplete' - Status string `json:"status"` - - // The structure containing the actual results of the request - Data json.RawMessage `json:"data"` - - // The ID of the job that was created in response to a request. - JobID int `json:"job_id"` - - // A list of zero or more messages - Messages json.RawMessage `json:"msgs"` -} - -// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses -// Dyn's Managed DNS API to manage TXT records for a domain. -type DNSProvider struct { - customerName string - userName string - password string - token string -} - -// NewDNSProvider returns a DNSProvider instance configured for Dyn DNS. -// Credentials must be passed in the environment variables: DYN_CUSTOMER_NAME, -// DYN_USER_NAME and DYN_PASSWORD. -func NewDNSProvider() (*DNSProvider, error) { - customerName := os.Getenv("DYN_CUSTOMER_NAME") - userName := os.Getenv("DYN_USER_NAME") - password := os.Getenv("DYN_PASSWORD") - return NewDNSProviderCredentials(customerName, userName, password) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Dyn DNS. -func NewDNSProviderCredentials(customerName, userName, password string) (*DNSProvider, error) { - if customerName == "" || userName == "" || password == "" { - return nil, fmt.Errorf("DynDNS credentials missing") - } - - return &DNSProvider{ - customerName: customerName, - userName: userName, - password: password, - }, nil -} - -func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*dynResponse, error) { - url := fmt.Sprintf("%s/%s", dynBaseURL, resource) - - body, err := json.Marshal(payload) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(method, url, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - if len(d.token) > 0 { - req.Header.Set("Auth-Token", d.token) - } - - client := &http.Client{Timeout: time.Duration(10 * time.Second)} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Dyn API request failed with HTTP status code %d", resp.StatusCode) - } else if resp.StatusCode == 307 { - // TODO add support for HTTP 307 response and long running jobs - return nil, fmt.Errorf("Dyn API request returned HTTP 307. This is currently unsupported") - } - - var dynRes dynResponse - err = json.NewDecoder(resp.Body).Decode(&dynRes) - if err != nil { - return nil, err - } - - if dynRes.Status == "failure" { - // TODO add better error handling - return nil, fmt.Errorf("Dyn API request failed: %s", dynRes.Messages) - } - - return &dynRes, nil -} - -// Starts a new Dyn API Session. Authenticates using customerName, userName, -// password and receives a token to be used in for subsequent requests. -func (d *DNSProvider) login() error { - type creds struct { - Customer string `json:"customer_name"` - User string `json:"user_name"` - Pass string `json:"password"` - } - - type session struct { - Token string `json:"token"` - Version string `json:"version"` - } - - payload := &creds{Customer: d.customerName, User: d.userName, Pass: d.password} - dynRes, err := d.sendRequest("POST", "Session", payload) - if err != nil { - return err - } - - var s session - err = json.Unmarshal(dynRes.Data, &s) - if err != nil { - return err - } - - d.token = s.Token - - return nil -} - -// Destroys Dyn Session -func (d *DNSProvider) logout() error { - if len(d.token) == 0 { - // nothing to do - return nil - } - - url := fmt.Sprintf("%s/Session", dynBaseURL) - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Auth-Token", d.token) - - client := &http.Client{Timeout: time.Duration(10 * time.Second)} - resp, err := client.Do(req) - if err != nil { - return err - } - resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("Dyn API request failed to delete session with HTTP status code %d", resp.StatusCode) - } - - d.token = "" - - return nil -} - -// Present creates a TXT record using the specified parameters -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - err = d.login() - if err != nil { - return err - } - - data := map[string]interface{}{ - "rdata": map[string]string{ - "txtdata": value, - }, - "ttl": strconv.Itoa(ttl), - } - - resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) - _, err = d.sendRequest("POST", resource, data) - if err != nil { - return err - } - - err = d.publish(authZone, "Added TXT record for ACME dns-01 challenge using lego client") - if err != nil { - return err - } - - err = d.logout() - if err != nil { - return err - } - - return nil -} - -func (d *DNSProvider) publish(zone, notes string) error { - type publish struct { - Publish bool `json:"publish"` - Notes string `json:"notes"` - } - - pub := &publish{Publish: true, Notes: notes} - resource := fmt.Sprintf("Zone/%s/", zone) - _, err := d.sendRequest("PUT", resource, pub) - if err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - err = d.login() - if err != nil { - return err - } - - resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) - url := fmt.Sprintf("%s/%s", dynBaseURL, resource) - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Auth-Token", d.token) - - client := &http.Client{Timeout: time.Duration(10 * time.Second)} - resp, err := client.Do(req) - if err != nil { - return err - } - resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("Dyn API request failed to delete TXT record HTTP status code %d", resp.StatusCode) - } - - err = d.publish(authZone, "Removed TXT record for ACME dns-01 challenge using lego client") - if err != nil { - return err - } - - err = d.logout() - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go deleted file mode 100644 index 0d28d5d0e..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package dyn - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - dynLiveTest bool - dynCustomerName string - dynUserName string - dynPassword string - dynDomain string -) - -func init() { - dynCustomerName = os.Getenv("DYN_CUSTOMER_NAME") - dynUserName = os.Getenv("DYN_USER_NAME") - dynPassword = os.Getenv("DYN_PASSWORD") - dynDomain = os.Getenv("DYN_DOMAIN") - if len(dynCustomerName) > 0 && len(dynUserName) > 0 && len(dynPassword) > 0 && len(dynDomain) > 0 { - dynLiveTest = true - } -} - -func TestLiveDynPresent(t *testing.T) { - if !dynLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.Present(dynDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveDynCleanUp(t *testing.T) { - if !dynLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.CleanUp(dynDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go deleted file mode 100644 index 4b125e8df..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package exoscale implements a DNS provider for solving the DNS-01 challenge -// using exoscale DNS. -package exoscale - -import ( - "errors" - "fmt" - "os" - - "github.com/exoscale/egoscale" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface. -type DNSProvider struct { - client *egoscale.Client -} - -// Credentials must be passed in the environment variables: -// EXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT. -func NewDNSProvider() (*DNSProvider, error) { - key := os.Getenv("EXOSCALE_API_KEY") - secret := os.Getenv("EXOSCALE_API_SECRET") - endpoint := os.Getenv("EXOSCALE_ENDPOINT") - return NewDNSProviderClient(key, secret, endpoint) -} - -// Uses the supplied parameters to return a DNSProvider instance -// configured for Exoscale. -func NewDNSProviderClient(key, secret, endpoint string) (*DNSProvider, error) { - if key == "" || secret == "" { - return nil, fmt.Errorf("Exoscale credentials missing") - } - if endpoint == "" { - endpoint = "https://api.exoscale.ch/dns" - } - - return &DNSProvider{ - client: egoscale.NewClient(endpoint, key, secret), - }, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - zone, recordName, err := c.FindZoneAndRecordName(fqdn, domain) - if err != nil { - return err - } - - recordID, err := c.FindExistingRecordId(zone, recordName) - if err != nil { - return err - } - - record := egoscale.DNSRecord{ - Name: recordName, - TTL: ttl, - Content: value, - RecordType: "TXT", - } - - if recordID == 0 { - _, err := c.client.CreateRecord(zone, record) - if err != nil { - return errors.New("Error while creating DNS record: " + err.Error()) - } - } else { - record.ID = recordID - _, err := c.client.UpdateRecord(zone, record) - if err != nil { - return errors.New("Error while updating DNS record: " + err.Error()) - } - } - - return nil -} - -// CleanUp removes the record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - zone, recordName, err := c.FindZoneAndRecordName(fqdn, domain) - if err != nil { - return err - } - - recordID, err := c.FindExistingRecordId(zone, recordName) - if err != nil { - return err - } - - if recordID != 0 { - err = c.client.DeleteRecord(zone, recordID) - if err != nil { - return errors.New("Error while deleting DNS record: " + err.Error()) - } - } - - return nil -} - -// Query Exoscale to find an existing record for this name. -// Returns nil if no record could be found -func (c *DNSProvider) FindExistingRecordId(zone, recordName string) (int64, error) { - records, err := c.client.GetRecords(zone) - if err != nil { - return -1, errors.New("Error while retrievening DNS records: " + err.Error()) - } - for _, record := range records { - if record.Name == recordName { - return record.ID, nil - } - } - return 0, nil -} - -// Extract DNS zone and DNS entry name -func (c *DNSProvider) FindZoneAndRecordName(fqdn, domain string) (string, string, error) { - zone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return "", "", err - } - zone = acme.UnFqdn(zone) - name := acme.UnFqdn(fqdn) - name = name[:len(name)-len("."+zone)] - - return zone, name, nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale_test.go b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale_test.go deleted file mode 100644 index 343dd56f8..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package exoscale - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - exoscaleLiveTest bool - exoscaleAPIKey string - exoscaleAPISecret string - exoscaleDomain string -) - -func init() { - exoscaleAPISecret = os.Getenv("EXOSCALE_API_SECRET") - exoscaleAPIKey = os.Getenv("EXOSCALE_API_KEY") - exoscaleDomain = os.Getenv("EXOSCALE_DOMAIN") - if len(exoscaleAPIKey) > 0 && len(exoscaleAPISecret) > 0 && len(exoscaleDomain) > 0 { - exoscaleLiveTest = true - } -} - -func restoreExoscaleEnv() { - os.Setenv("EXOSCALE_API_KEY", exoscaleAPIKey) - os.Setenv("EXOSCALE_API_SECRET", exoscaleAPISecret) -} - -func TestNewDNSProviderValid(t *testing.T) { - os.Setenv("EXOSCALE_API_KEY", "") - os.Setenv("EXOSCALE_API_SECRET", "") - _, err := NewDNSProviderClient("example@example.com", "123", "") - assert.NoError(t, err) - restoreExoscaleEnv() -} -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("EXOSCALE_API_KEY", "example@example.com") - os.Setenv("EXOSCALE_API_SECRET", "123") - _, err := NewDNSProvider() - assert.NoError(t, err) - restoreExoscaleEnv() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("EXOSCALE_API_KEY", "") - os.Setenv("EXOSCALE_API_SECRET", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "Exoscale credentials missing") - restoreExoscaleEnv() -} - -func TestExtractRootRecordName(t *testing.T) { - provider, err := NewDNSProviderClient("example@example.com", "123", "") - assert.NoError(t, err) - - zone, recordName, err := provider.FindZoneAndRecordName("_acme-challenge.bar.com.", "bar.com") - assert.NoError(t, err) - assert.Equal(t, "bar.com", zone) - assert.Equal(t, "_acme-challenge", recordName) -} - -func TestExtractSubRecordName(t *testing.T) { - provider, err := NewDNSProviderClient("example@example.com", "123", "") - assert.NoError(t, err) - - zone, recordName, err := provider.FindZoneAndRecordName("_acme-challenge.foo.bar.com.", "foo.bar.com") - assert.NoError(t, err) - assert.Equal(t, "bar.com", zone) - assert.Equal(t, "_acme-challenge.foo", recordName) -} - -func TestLiveExoscalePresent(t *testing.T) { - if !exoscaleLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderClient(exoscaleAPIKey, exoscaleAPISecret, "") - assert.NoError(t, err) - - err = provider.Present(exoscaleDomain, "", "123d==") - assert.NoError(t, err) - - // Present Twice to handle create / update - err = provider.Present(exoscaleDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveExoscaleCleanUp(t *testing.T) { - if !exoscaleLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProviderClient(exoscaleAPIKey, exoscaleAPISecret, "") - assert.NoError(t, err) - - err = provider.CleanUp(exoscaleDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go deleted file mode 100644 index 422b02a21..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go +++ /dev/null @@ -1,472 +0,0 @@ -// Package gandi implements a DNS provider for solving the DNS-01 -// challenge using Gandi DNS. -package gandi - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - "sync" - "time" - - "github.com/xenolf/lego/acme" -) - -// Gandi API reference: http://doc.rpc.gandi.net/index.html -// Gandi API domain examples: http://doc.rpc.gandi.net/domain/faq.html - -var ( - // endpoint is the Gandi XML-RPC endpoint used by Present and - // CleanUp. It is overridden during tests. - endpoint = "https://rpc.gandi.net/xmlrpc/" - // findZoneByFqdn determines the DNS zone of an fqdn. It is overridden - // during tests. - findZoneByFqdn = acme.FindZoneByFqdn -) - -// inProgressInfo contains information about an in-progress challenge -type inProgressInfo struct { - zoneID int // zoneID of gandi zone to restore in CleanUp - newZoneID int // zoneID of temporary gandi zone containing TXT record - authZone string // the domain name registered at gandi with trailing "." -} - -// DNSProvider is an implementation of the -// acme.ChallengeProviderTimeout interface that uses Gandi's XML-RPC -// API to manage TXT records for a domain. -type DNSProvider struct { - apiKey string - inProgressFQDNs map[string]inProgressInfo - inProgressAuthZones map[string]struct{} - inProgressMu sync.Mutex -} - -// NewDNSProvider returns a DNSProvider instance configured for Gandi. -// Credentials must be passed in the environment variable: GANDI_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - apiKey := os.Getenv("GANDI_API_KEY") - return NewDNSProviderCredentials(apiKey) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Gandi. -func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) { - if apiKey == "" { - return nil, fmt.Errorf("No Gandi API Key given") - } - return &DNSProvider{ - apiKey: apiKey, - inProgressFQDNs: make(map[string]inProgressInfo), - inProgressAuthZones: make(map[string]struct{}), - }, nil -} - -// Present creates a TXT record using the specified parameters. It -// does this by creating and activating a new temporary Gandi DNS -// zone. This new zone contains the TXT record. -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - if ttl < 300 { - ttl = 300 // 300 is gandi minimum value for ttl - } - // find authZone and Gandi zone_id for fqdn - authZone, err := findZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Gandi DNS: findZoneByFqdn failure: %v", err) - } - zoneID, err := d.getZoneID(authZone) - if err != nil { - return err - } - // determine name of TXT record - if !strings.HasSuffix( - strings.ToLower(fqdn), strings.ToLower("."+authZone)) { - return fmt.Errorf( - "Gandi DNS: unexpected authZone %s for fqdn %s", authZone, fqdn) - } - name := fqdn[:len(fqdn)-len("."+authZone)] - // acquire lock and check there is not a challenge already in - // progress for this value of authZone - d.inProgressMu.Lock() - defer d.inProgressMu.Unlock() - if _, ok := d.inProgressAuthZones[authZone]; ok { - return fmt.Errorf( - "Gandi DNS: challenge already in progress for authZone %s", - authZone) - } - // perform API actions to create and activate new gandi zone - // containing the required TXT record - newZoneName := fmt.Sprintf( - "%s [ACME Challenge %s]", - acme.UnFqdn(authZone), time.Now().Format(time.RFC822Z)) - newZoneID, err := d.cloneZone(zoneID, newZoneName) - if err != nil { - return err - } - newZoneVersion, err := d.newZoneVersion(newZoneID) - if err != nil { - return err - } - err = d.addTXTRecord(newZoneID, newZoneVersion, name, value, ttl) - if err != nil { - return err - } - err = d.setZoneVersion(newZoneID, newZoneVersion) - if err != nil { - return err - } - err = d.setZone(authZone, newZoneID) - if err != nil { - return err - } - // save data necessary for CleanUp - d.inProgressFQDNs[fqdn] = inProgressInfo{ - zoneID: zoneID, - newZoneID: newZoneID, - authZone: authZone, - } - d.inProgressAuthZones[authZone] = struct{}{} - return nil -} - -// CleanUp removes the TXT record matching the specified -// parameters. It does this by restoring the old Gandi DNS zone and -// removing the temporary one created by Present. -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - // acquire lock and retrieve zoneID, newZoneID and authZone - d.inProgressMu.Lock() - defer d.inProgressMu.Unlock() - if _, ok := d.inProgressFQDNs[fqdn]; !ok { - // if there is no cleanup information then just return - return nil - } - zoneID := d.inProgressFQDNs[fqdn].zoneID - newZoneID := d.inProgressFQDNs[fqdn].newZoneID - authZone := d.inProgressFQDNs[fqdn].authZone - delete(d.inProgressFQDNs, fqdn) - delete(d.inProgressAuthZones, authZone) - // perform API actions to restore old gandi zone for authZone - err := d.setZone(authZone, zoneID) - if err != nil { - return err - } - err = d.deleteZone(newZoneID) - if err != nil { - return err - } - return nil -} - -// Timeout returns the values (40*time.Minute, 60*time.Second) which -// are used by the acme package as timeout and check interval values -// when checking for DNS record propagation with Gandi. -func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 40 * time.Minute, 60 * time.Second -} - -// types for XML-RPC method calls and parameters - -type param interface { - param() -} -type paramString struct { - XMLName xml.Name `xml:"param"` - Value string `xml:"value>string"` -} -type paramInt struct { - XMLName xml.Name `xml:"param"` - Value int `xml:"value>int"` -} - -type structMember interface { - structMember() -} -type structMemberString struct { - Name string `xml:"name"` - Value string `xml:"value>string"` -} -type structMemberInt struct { - Name string `xml:"name"` - Value int `xml:"value>int"` -} -type paramStruct struct { - XMLName xml.Name `xml:"param"` - StructMembers []structMember `xml:"value>struct>member"` -} - -func (p paramString) param() {} -func (p paramInt) param() {} -func (m structMemberString) structMember() {} -func (m structMemberInt) structMember() {} -func (p paramStruct) param() {} - -type methodCall struct { - XMLName xml.Name `xml:"methodCall"` - MethodName string `xml:"methodName"` - Params []param `xml:"params"` -} - -// types for XML-RPC responses - -type response interface { - faultCode() int - faultString() string -} - -type responseFault struct { - FaultCode int `xml:"fault>value>struct>member>value>int"` - FaultString string `xml:"fault>value>struct>member>value>string"` -} - -func (r responseFault) faultCode() int { return r.FaultCode } -func (r responseFault) faultString() string { return r.FaultString } - -type responseStruct struct { - responseFault - StructMembers []struct { - Name string `xml:"name"` - ValueInt int `xml:"value>int"` - } `xml:"params>param>value>struct>member"` -} - -type responseInt struct { - responseFault - Value int `xml:"params>param>value>int"` -} - -type responseBool struct { - responseFault - Value bool `xml:"params>param>value>boolean"` -} - -// POSTing/Marshalling/Unmarshalling - -type rpcError struct { - faultCode int - faultString string -} - -func (e rpcError) Error() string { - return fmt.Sprintf( - "Gandi DNS: RPC Error: (%d) %s", e.faultCode, e.faultString) -} - -func httpPost(url string, bodyType string, body io.Reader) ([]byte, error) { - client := http.Client{Timeout: 60 * time.Second} - resp, err := client.Post(url, bodyType, body) - if err != nil { - return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err) - } - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err) - } - return b, nil -} - -// rpcCall makes an XML-RPC call to Gandi's RPC endpoint by -// marshalling the data given in the call argument to XML and sending -// that via HTTP Post to Gandi. The response is then unmarshalled into -// the resp argument. -func rpcCall(call *methodCall, resp response) error { - // marshal - b, err := xml.MarshalIndent(call, "", " ") - if err != nil { - return fmt.Errorf("Gandi DNS: Marshal Error: %v", err) - } - // post - b = append([]byte(``+"\n"), b...) - respBody, err := httpPost(endpoint, "text/xml", bytes.NewReader(b)) - if err != nil { - return err - } - // unmarshal - err = xml.Unmarshal(respBody, resp) - if err != nil { - return fmt.Errorf("Gandi DNS: Unmarshal Error: %v", err) - } - if resp.faultCode() != 0 { - return rpcError{ - faultCode: resp.faultCode(), faultString: resp.faultString()} - } - return nil -} - -// functions to perform API actions - -func (d *DNSProvider) getZoneID(domain string) (int, error) { - resp := &responseStruct{} - err := rpcCall(&methodCall{ - MethodName: "domain.info", - Params: []param{ - paramString{Value: d.apiKey}, - paramString{Value: domain}, - }, - }, resp) - if err != nil { - return 0, err - } - var zoneID int - for _, member := range resp.StructMembers { - if member.Name == "zone_id" { - zoneID = member.ValueInt - } - } - if zoneID == 0 { - return 0, fmt.Errorf( - "Gandi DNS: Could not determine zone_id for %s", domain) - } - return zoneID, nil -} - -func (d *DNSProvider) cloneZone(zoneID int, name string) (int, error) { - resp := &responseStruct{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.clone", - Params: []param{ - paramString{Value: d.apiKey}, - paramInt{Value: zoneID}, - paramInt{Value: 0}, - paramStruct{ - StructMembers: []structMember{ - structMemberString{ - Name: "name", - Value: name, - }}, - }, - }, - }, resp) - if err != nil { - return 0, err - } - var newZoneID int - for _, member := range resp.StructMembers { - if member.Name == "id" { - newZoneID = member.ValueInt - } - } - if newZoneID == 0 { - return 0, fmt.Errorf("Gandi DNS: Could not determine cloned zone_id") - } - return newZoneID, nil -} - -func (d *DNSProvider) newZoneVersion(zoneID int) (int, error) { - resp := &responseInt{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.version.new", - Params: []param{ - paramString{Value: d.apiKey}, - paramInt{Value: zoneID}, - }, - }, resp) - if err != nil { - return 0, err - } - if resp.Value == 0 { - return 0, fmt.Errorf("Gandi DNS: Could not create new zone version") - } - return resp.Value, nil -} - -func (d *DNSProvider) addTXTRecord(zoneID int, version int, name string, value string, ttl int) error { - resp := &responseStruct{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.record.add", - Params: []param{ - paramString{Value: d.apiKey}, - paramInt{Value: zoneID}, - paramInt{Value: version}, - paramStruct{ - StructMembers: []structMember{ - structMemberString{ - Name: "type", - Value: "TXT", - }, structMemberString{ - Name: "name", - Value: name, - }, structMemberString{ - Name: "value", - Value: value, - }, structMemberInt{ - Name: "ttl", - Value: ttl, - }}, - }, - }, - }, resp) - if err != nil { - return err - } - return nil -} - -func (d *DNSProvider) setZoneVersion(zoneID int, version int) error { - resp := &responseBool{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.version.set", - Params: []param{ - paramString{Value: d.apiKey}, - paramInt{Value: zoneID}, - paramInt{Value: version}, - }, - }, resp) - if err != nil { - return err - } - if !resp.Value { - return fmt.Errorf("Gandi DNS: could not set zone version") - } - return nil -} - -func (d *DNSProvider) setZone(domain string, zoneID int) error { - resp := &responseStruct{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.set", - Params: []param{ - paramString{Value: d.apiKey}, - paramString{Value: domain}, - paramInt{Value: zoneID}, - }, - }, resp) - if err != nil { - return err - } - var respZoneID int - for _, member := range resp.StructMembers { - if member.Name == "zone_id" { - respZoneID = member.ValueInt - } - } - if respZoneID != zoneID { - return fmt.Errorf( - "Gandi DNS: Could not set new zone_id for %s", domain) - } - return nil -} - -func (d *DNSProvider) deleteZone(zoneID int) error { - resp := &responseBool{} - err := rpcCall(&methodCall{ - MethodName: "domain.zone.delete", - Params: []param{ - paramString{Value: d.apiKey}, - paramInt{Value: zoneID}, - }, - }, resp) - if err != nil { - return err - } - if !resp.Value { - return fmt.Errorf("Gandi DNS: could not delete zone_id") - } - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go deleted file mode 100644 index 451333ca1..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go +++ /dev/null @@ -1,939 +0,0 @@ -package gandi - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "regexp" - "strings" - "testing" - - "github.com/xenolf/lego/acme" -) - -// stagingServer is the Let's Encrypt staging server used by the live test -const stagingServer = "https://acme-staging.api.letsencrypt.org/directory" - -// user implements acme.User and is used by the live test -type user struct { - Email string - Registration *acme.RegistrationResource - key crypto.PrivateKey -} - -func (u *user) GetEmail() string { - return u.Email -} -func (u *user) GetRegistration() *acme.RegistrationResource { - return u.Registration -} -func (u *user) GetPrivateKey() crypto.PrivateKey { - return u.key -} - -// TestDNSProvider runs Present and CleanUp against a fake Gandi RPC -// Server, whose responses are predetermined for particular requests. -func TestDNSProvider(t *testing.T) { - fakeAPIKey := "123412341234123412341234" - fakeKeyAuth := "XXXX" - provider, err := NewDNSProviderCredentials(fakeAPIKey) - if err != nil { - t.Fatal(err) - } - regexpDate, err := regexp.Compile(`\[ACME Challenge [^\]:]*:[^\]]*\]`) - if err != nil { - t.Fatal(err) - } - // start fake RPC server - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Content-Type") != "text/xml" { - t.Fatalf("Content-Type: text/xml header not found") - } - req, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - req = regexpDate.ReplaceAllLiteral( - req, []byte(`[ACME Challenge 01 Jan 16 00:00 +0000]`)) - resp, ok := serverResponses[string(req)] - if !ok { - t.Fatalf("Server response for request not found") - } - _, err = io.Copy(w, strings.NewReader(resp)) - if err != nil { - t.Fatal(err) - } - })) - defer fakeServer.Close() - // define function to override findZoneByFqdn with - fakeFindZoneByFqdn := func(fqdn string, nameserver []string) (string, error) { - return "example.com.", nil - } - // override gandi endpoint and findZoneByFqdn function - savedEndpoint, savedFindZoneByFqdn := endpoint, findZoneByFqdn - defer func() { - endpoint, findZoneByFqdn = savedEndpoint, savedFindZoneByFqdn - }() - endpoint, findZoneByFqdn = fakeServer.URL+"/", fakeFindZoneByFqdn - // run Present - err = provider.Present("abc.def.example.com", "", fakeKeyAuth) - if err != nil { - t.Fatal(err) - } - // run CleanUp - err = provider.CleanUp("abc.def.example.com", "", fakeKeyAuth) - if err != nil { - t.Fatal(err) - } -} - -// TestDNSProviderLive performs a live test to obtain a certificate -// using the Let's Encrypt staging server. It runs provided that both -// the environment variables GANDI_API_KEY and GANDI_TEST_DOMAIN are -// set. Otherwise the test is skipped. -// -// To complete this test, go test must be run with the -timeout=40m -// flag, since the default timeout of 10m is insufficient. -func TestDNSProviderLive(t *testing.T) { - apiKey := os.Getenv("GANDI_API_KEY") - domain := os.Getenv("GANDI_TEST_DOMAIN") - if apiKey == "" || domain == "" { - t.Skip("skipping live test") - } - // create a user. - const rsaKeySize = 2048 - privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize) - if err != nil { - t.Fatal(err) - } - myUser := user{ - Email: "test@example.com", - key: privateKey, - } - // create a client using staging server - client, err := acme.NewClient(stagingServer, &myUser, acme.RSA2048) - if err != nil { - t.Fatal(err) - } - provider, err := NewDNSProviderCredentials(apiKey) - if err != nil { - t.Fatal(err) - } - err = client.SetChallengeProvider(acme.DNS01, provider) - if err != nil { - t.Fatal(err) - } - client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01}) - // register and agree tos - reg, err := client.Register() - if err != nil { - t.Fatal(err) - } - myUser.Registration = reg - err = client.AgreeToTOS() - if err != nil { - t.Fatal(err) - } - // complete the challenge - bundle := false - _, failures := client.ObtainCertificate([]string{domain}, bundle, nil, false) - if len(failures) > 0 { - t.Fatal(failures) - } -} - -// serverResponses is the XML-RPC Request->Response map used by the -// fake RPC server. It was generated by recording a real RPC session -// which resulted in the successful issue of a cert, and then -// anonymizing the RPC data. -var serverResponses = map[string]string{ - // Present Request->Response 1 (getZoneID) - ` - - domain.info - - - 123412341234123412341234 - - - - - example.com. - - -`: ` - - - - - -date_updated -20160216T16:14:23 - - -date_delete -20170331T16:04:06 - - -is_premium -0 - - -date_hold_begin -20170215T02:04:06 - - -date_registry_end -20170215T02:04:06 - - -authinfo_expiration_date -20161211T21:31:20 - - -contacts - - -owner - - -handle -LEGO-GANDI - - -id -111111 - - - - -admin - - -handle -LEGO-GANDI - - -id -111111 - - - - -bill - - -handle -LEGO-GANDI - - -id -111111 - - - - -tech - - -handle -LEGO-GANDI - - -id -111111 - - - - -reseller - - - - -nameservers - -a.dns.gandi.net -b.dns.gandi.net -c.dns.gandi.net - - - -date_restore_end -20170501T02:04:06 - - -id -2222222 - - -authinfo -ABCDABCDAB - - -status - -clientTransferProhibited -serverTransferProhibited - - - -tags - - - - -date_hold_end -20170401T02:04:06 - - -services - -gandidns -gandimail - - - -date_pending_delete_end -20170506T02:04:06 - - -zone_id -1234567 - - -date_renew_begin -20120101T00:00:00 - - -fqdn -example.com - - -autorenew - - -date_registry_creation -20150215T02:04:06 - - -tld -org - - -date_created -20150215T03:04:06 - - - - - -`, - // Present Request->Response 2 (cloneZone) - ` - - domain.zone.clone - - - 123412341234123412341234 - - - - - 1234567 - - - - - 0 - - - - - - - name - - example.com [ACME Challenge 01 Jan 16 00:00 +0000] - - - - - -`: ` - - - - - -name -example.com [ACME Challenge 01 Jan 16 00:00 +0000] - - -versions - -1 - - - -date_updated -20160216T16:24:29 - - -id -7654321 - - -owner -LEGO-GANDI - - -version -1 - - -domains -0 - - -public -0 - - - - - -`, - // Present Request->Response 3 (newZoneVersion) - ` - - domain.zone.version.new - - - 123412341234123412341234 - - - - - 7654321 - - -`: ` - - - -2 - - - -`, - // Present Request->Response 4 (addTXTRecord) - ` - - domain.zone.record.add - - - 123412341234123412341234 - - - - - 7654321 - - - - - 2 - - - - - - - type - - TXT - - - - name - - _acme-challenge.abc.def - - - - value - - ezRpBPY8wH8djMLYjX2uCKPwiKDkFZ1SFMJ6ZXGlHrQ - - - - ttl - - 300 - - - - - -`: ` - - - - - -name -_acme-challenge.abc.def - - -type -TXT - - -id -333333333 - - -value -"ezRpBPY8wH8djMLYjX2uCKPwiKDkFZ1SFMJ6ZXGlHrQ" - - -ttl -300 - - - - - -`, - // Present Request->Response 5 (setZoneVersion) - ` - - domain.zone.version.set - - - 123412341234123412341234 - - - - - 7654321 - - - - - 2 - - -`: ` - - - -1 - - - -`, - // Present Request->Response 6 (setZone) - ` - - domain.zone.set - - - 123412341234123412341234 - - - - - example.com. - - - - - 7654321 - - -`: ` - - - - - -date_updated -20160216T16:14:23 - - -date_delete -20170331T16:04:06 - - -is_premium -0 - - -date_hold_begin -20170215T02:04:06 - - -date_registry_end -20170215T02:04:06 - - -authinfo_expiration_date -20161211T21:31:20 - - -contacts - - -owner - - -handle -LEGO-GANDI - - -id -111111 - - - - -admin - - -handle -LEGO-GANDI - - -id -111111 - - - - -bill - - -handle -LEGO-GANDI - - -id -111111 - - - - -tech - - -handle -LEGO-GANDI - - -id -111111 - - - - -reseller - - - - -nameservers - -a.dns.gandi.net -b.dns.gandi.net -c.dns.gandi.net - - - -date_restore_end -20170501T02:04:06 - - -id -2222222 - - -authinfo -ABCDABCDAB - - -status - -clientTransferProhibited -serverTransferProhibited - - - -tags - - - - -date_hold_end -20170401T02:04:06 - - -services - -gandidns -gandimail - - - -date_pending_delete_end -20170506T02:04:06 - - -zone_id -7654321 - - -date_renew_begin -20120101T00:00:00 - - -fqdn -example.com - - -autorenew - - -date_registry_creation -20150215T02:04:06 - - -tld -org - - -date_created -20150215T03:04:06 - - - - - -`, - // CleanUp Request->Response 1 (setZone) - ` - - domain.zone.set - - - 123412341234123412341234 - - - - - example.com. - - - - - 1234567 - - -`: ` - - - - - -date_updated -20160216T16:24:38 - - -date_delete -20170331T16:04:06 - - -is_premium -0 - - -date_hold_begin -20170215T02:04:06 - - -date_registry_end -20170215T02:04:06 - - -authinfo_expiration_date -20161211T21:31:20 - - -contacts - - -owner - - -handle -LEGO-GANDI - - -id -111111 - - - - -admin - - -handle -LEGO-GANDI - - -id -111111 - - - - -bill - - -handle -LEGO-GANDI - - -id -111111 - - - - -tech - - -handle -LEGO-GANDI - - -id -111111 - - - - -reseller - - - - -nameservers - -a.dns.gandi.net -b.dns.gandi.net -c.dns.gandi.net - - - -date_restore_end -20170501T02:04:06 - - -id -2222222 - - -authinfo -ABCDABCDAB - - -status - -clientTransferProhibited -serverTransferProhibited - - - -tags - - - - -date_hold_end -20170401T02:04:06 - - -services - -gandidns -gandimail - - - -date_pending_delete_end -20170506T02:04:06 - - -zone_id -1234567 - - -date_renew_begin -20120101T00:00:00 - - -fqdn -example.com - - -autorenew - - -date_registry_creation -20150215T02:04:06 - - -tld -org - - -date_created -20150215T03:04:06 - - - - - -`, - // CleanUp Request->Response 2 (deleteZone) - ` - - domain.zone.delete - - - 123412341234123412341234 - - - - - 7654321 - - -`: ` - - - -1 - - - -`, -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy.go b/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy.go deleted file mode 100644 index 4112f6628..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy.go +++ /dev/null @@ -1,155 +0,0 @@ -// Package godaddy implements a DNS provider for solving the DNS-01 challenge using godaddy DNS. -package godaddy - -import ( - "fmt" - "io" - "net/http" - "os" - "time" - - "bytes" - "encoding/json" - "github.com/xenolf/lego/acme" - "io/ioutil" - "strings" -) - -// GoDaddyAPIURL represents the API endpoint to call. -const apiURL = "https://api.godaddy.com" - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -type DNSProvider struct { - apiKey string - apiSecret string -} - -// NewDNSProvider returns a DNSProvider instance configured for godaddy. -// Credentials must be passed in the environment variables: GODADDY_API_KEY -// and GODADDY_API_SECRET. -func NewDNSProvider() (*DNSProvider, error) { - apikey := os.Getenv("GODADDY_API_KEY") - secret := os.Getenv("GODADDY_API_SECRET") - return NewDNSProviderCredentials(apikey, secret) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for godaddy. -func NewDNSProviderCredentials(apiKey, apiSecret string) (*DNSProvider, error) { - if apiKey == "" || apiSecret == "" { - return nil, fmt.Errorf("GoDaddy credentials missing") - } - - return &DNSProvider{apiKey, apiSecret}, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Adjusting here to cope with spikes in propagation times. -func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 120 * time.Second, 2 * time.Second -} - -func (c *DNSProvider) extractRecordName(fqdn, domain string) string { - name := acme.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} - -// Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - domainZone, err := c.getZone(fqdn) - if err != nil { - return err - } - - if ttl < 600 { - ttl = 600 - } - - recordName := c.extractRecordName(fqdn, domainZone) - rec := []DNSRecord{ - { - Type: "TXT", - Name: recordName, - Data: value, - Ttl: ttl, - }, - } - - return c.updateRecords(rec, domainZone, recordName) -} - -func (c *DNSProvider) updateRecords(records []DNSRecord, domainZone string, recordName string) error { - body, err := json.Marshal(records) - if err != nil { - return err - } - - var resp *http.Response - resp, err = c.makeRequest("PUT", fmt.Sprintf("/v1/domains/%s/records/TXT/%s", domainZone, recordName), bytes.NewReader(body)) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - bodyBytes, _ := ioutil.ReadAll(resp.Body) - return fmt.Errorf("Could not create record %v; Status: %v; Body: %s\n", string(body), resp.StatusCode, string(bodyBytes)) - } - return nil -} - -// CleanUp sets null value in the TXT DNS record as GoDaddy has no proper DELETE record method -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - domainZone, err := c.getZone(fqdn) - if err != nil { - return err - } - - recordName := c.extractRecordName(fqdn, domainZone) - rec := []DNSRecord{ - { - Type: "TXT", - Name: recordName, - Data: "null", - }, - } - - return c.updateRecords(rec, domainZone, recordName) -} - -func (c *DNSProvider) getZone(fqdn string) (string, error) { - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return "", err - } - - return acme.UnFqdn(authZone), nil -} - -func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest(method, fmt.Sprintf("%s%s", apiURL, uri), body) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", "application/json") - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("sso-key %s:%s", c.apiKey, c.apiSecret)) - - client := http.Client{Timeout: 30 * time.Second} - return client.Do(req) -} - -type DNSRecord struct { - Type string `json:"type"` - Name string `json:"name"` - Data string `json:"data"` - Priority int `json:"priority,omitempty"` - Ttl int `json:"ttl,omitempty"` -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy_test.go b/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy_test.go deleted file mode 100644 index de84d827e..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/godaddy/godaddy_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package godaddy - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - godaddyAPIKey string - godaddyAPISecret string - godaddyDomain string - godaddyLiveTest bool -) - -func init() { - godaddyAPIKey = os.Getenv("GODADDY_API_KEY") - godaddyAPISecret = os.Getenv("GODADDY_API_SECRET") - godaddyDomain = os.Getenv("GODADDY_DOMAIN") - - if len(godaddyAPIKey) > 0 && len(godaddyAPISecret) > 0 && len(godaddyDomain) > 0 { - godaddyLiveTest = true - } -} - -func TestNewDNSProvider(t *testing.T) { - provider, err := NewDNSProvider() - - if !godaddyLiveTest { - assert.Error(t, err) - } else { - assert.NotNil(t, provider) - assert.NoError(t, err) - } -} - -func TestDNSProvider_Present(t *testing.T) { - if !godaddyLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.Present(godaddyDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestDNSProvider_CleanUp(t *testing.T) { - if !godaddyLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.CleanUp(godaddyDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go deleted file mode 100644 index ba753f6dc..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package googlecloud implements a DNS provider for solving the DNS-01 -// challenge using Google Cloud DNS. -package googlecloud - -import ( - "fmt" - "io/ioutil" - "os" - "time" - - "github.com/xenolf/lego/acme" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - - "google.golang.org/api/dns/v1" -) - -// DNSProvider is an implementation of the DNSProvider interface. -type DNSProvider struct { - project string - client *dns.Service -} - -// NewDNSProvider returns a DNSProvider instance configured for Google Cloud -// DNS. Project name must be passed in the environment variable: GCE_PROJECT. -// A Service Account file can be passed in the environment variable: -// GCE_SERVICE_ACCOUNT_FILE -func NewDNSProvider() (*DNSProvider, error) { - project := os.Getenv("GCE_PROJECT") - if saFile, ok := os.LookupEnv("GCE_SERVICE_ACCOUNT_FILE"); ok { - return NewDNSProviderServiceAccount(project, saFile) - } - return NewDNSProviderCredentials(project) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Google Cloud DNS. -func NewDNSProviderCredentials(project string) (*DNSProvider, error) { - if project == "" { - return nil, fmt.Errorf("Google Cloud project name missing") - } - - client, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope) - if err != nil { - return nil, fmt.Errorf("Unable to get Google Cloud client: %v", err) - } - svc, err := dns.New(client) - if err != nil { - return nil, fmt.Errorf("Unable to create Google Cloud DNS service: %v", err) - } - return &DNSProvider{ - project: project, - client: svc, - }, nil -} - -// NewDNSProviderServiceAccount uses the supplied service account JSON file to -// return a DNSProvider instance configured for Google Cloud DNS. -func NewDNSProviderServiceAccount(project string, saFile string) (*DNSProvider, error) { - if project == "" { - return nil, fmt.Errorf("Google Cloud project name missing") - } - if saFile == "" { - return nil, fmt.Errorf("Google Cloud Service Account file missing") - } - - dat, err := ioutil.ReadFile(saFile) - if err != nil { - return nil, fmt.Errorf("Unable to read Service Account file: %v", err) - } - conf, err := google.JWTConfigFromJSON(dat, dns.NdevClouddnsReadwriteScope) - if err != nil { - return nil, fmt.Errorf("Unable to acquire config: %v", err) - } - client := conf.Client(oauth2.NoContext) - - svc, err := dns.New(client) - if err != nil { - return nil, fmt.Errorf("Unable to create Google Cloud DNS service: %v", err) - } - return &DNSProvider{ - project: project, - client: svc, - }, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZone(domain) - if err != nil { - return err - } - - rec := &dns.ResourceRecordSet{ - Name: fqdn, - Rrdatas: []string{value}, - Ttl: int64(ttl), - Type: "TXT", - } - change := &dns.Change{ - Additions: []*dns.ResourceRecordSet{rec}, - } - - // Look for existing records. - list, err := c.client.ResourceRecordSets.List(c.project, zone).Name(fqdn).Type("TXT").Do() - if err != nil { - return err - } - if len(list.Rrsets) > 0 { - // Attempt to delete the existing records when adding our new one. - change.Deletions = list.Rrsets - } - - chg, err := c.client.Changes.Create(c.project, zone, change).Do() - if err != nil { - return err - } - - // wait for change to be acknowledged - for chg.Status == "pending" { - time.Sleep(time.Second) - - chg, err = c.client.Changes.Get(c.project, zone, chg.Id).Do() - if err != nil { - return err - } - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZone(domain) - if err != nil { - return err - } - - records, err := c.findTxtRecords(zone, fqdn) - if err != nil { - return err - } - - for _, rec := range records { - change := &dns.Change{ - Deletions: []*dns.ResourceRecordSet{rec}, - } - _, err = c.client.Changes.Create(c.project, zone, change).Do() - if err != nil { - return err - } - } - return nil -} - -// Timeout customizes the timeout values used by the ACME package for checking -// DNS record validity. -func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 180 * time.Second, 5 * time.Second -} - -// getHostedZone returns the managed-zone -func (c *DNSProvider) getHostedZone(domain string) (string, error) { - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return "", err - } - - zones, err := c.client.ManagedZones. - List(c.project). - DnsName(authZone). - Do() - if err != nil { - return "", fmt.Errorf("GoogleCloud API call failed: %v", err) - } - - if len(zones.ManagedZones) == 0 { - return "", fmt.Errorf("No matching GoogleCloud domain found for domain %s", authZone) - } - - return zones.ManagedZones[0].Name, nil -} - -func (c *DNSProvider) findTxtRecords(zone, fqdn string) ([]*dns.ResourceRecordSet, error) { - - recs, err := c.client.ResourceRecordSets.List(c.project, zone).Do() - if err != nil { - return nil, err - } - - found := []*dns.ResourceRecordSet{} - for _, r := range recs.Rrsets { - if r.Type == "TXT" && r.Name == fqdn { - found = append(found, r) - } - } - - return found, nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go deleted file mode 100644 index 75a10d9a4..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package googlecloud - -import ( - "os" - "testing" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/api/dns/v1" - - "github.com/stretchr/testify/assert" -) - -var ( - gcloudLiveTest bool - gcloudProject string - gcloudDomain string -) - -func init() { - gcloudProject = os.Getenv("GCE_PROJECT") - gcloudDomain = os.Getenv("GCE_DOMAIN") - _, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope) - if err == nil && len(gcloudProject) > 0 && len(gcloudDomain) > 0 { - gcloudLiveTest = true - } -} - -func restoreGCloudEnv() { - os.Setenv("GCE_PROJECT", gcloudProject) -} - -func TestNewDNSProviderValid(t *testing.T) { - if !gcloudLiveTest { - t.Skip("skipping live test (requires credentials)") - } - os.Setenv("GCE_PROJECT", "") - _, err := NewDNSProviderCredentials("my-project") - assert.NoError(t, err) - restoreGCloudEnv() -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - if !gcloudLiveTest { - t.Skip("skipping live test (requires credentials)") - } - os.Setenv("GCE_PROJECT", "my-project") - _, err := NewDNSProvider() - assert.NoError(t, err) - restoreGCloudEnv() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("GCE_PROJECT", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "Google Cloud project name missing") - restoreGCloudEnv() -} - -func TestLiveGoogleCloudPresent(t *testing.T) { - if !gcloudLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(gcloudProject) - assert.NoError(t, err) - - err = provider.Present(gcloudDomain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveGoogleCloudPresentMultiple(t *testing.T) { - if !gcloudLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(gcloudProject) - assert.NoError(t, err) - - // Check that we're able to create multiple entries - err = provider.Present(gcloudDomain, "1", "123d==") - err = provider.Present(gcloudDomain, "2", "123d==") - assert.NoError(t, err) -} - -func TestLiveGoogleCloudCleanUp(t *testing.T) { - if !gcloudLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProviderCredentials(gcloudProject) - assert.NoError(t, err) - - err = provider.CleanUp(gcloudDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go b/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go deleted file mode 100644 index a91d2b489..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go +++ /dev/null @@ -1,131 +0,0 @@ -// Package linode implements a DNS provider for solving the DNS-01 challenge -// using Linode DNS. -package linode - -import ( - "errors" - "os" - "strings" - "time" - - "github.com/timewasted/linode/dns" - "github.com/xenolf/lego/acme" -) - -const ( - dnsMinTTLSecs = 300 - dnsUpdateFreqMins = 15 - dnsUpdateFudgeSecs = 120 -) - -type hostedZoneInfo struct { - domainId int - resourceName string -} - -// DNSProvider implements the acme.ChallengeProvider interface. -type DNSProvider struct { - linode *dns.DNS -} - -// NewDNSProvider returns a DNSProvider instance configured for Linode. -// Credentials must be passed in the environment variable: LINODE_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - apiKey := os.Getenv("LINODE_API_KEY") - return NewDNSProviderCredentials(apiKey) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Linode. -func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) { - if len(apiKey) == 0 { - return nil, errors.New("Linode credentials missing") - } - - return &DNSProvider{ - linode: dns.New(apiKey), - }, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Adjusting here to cope with spikes in propagation times. -func (p *DNSProvider) Timeout() (timeout, interval time.Duration) { - // Since Linode only updates their zone files every X minutes, we need - // to figure out how many minutes we have to wait until we hit the next - // interval of X. We then wait another couple of minutes, just to be - // safe. Hopefully at some point during all of this, the record will - // have propagated throughout Linode's network. - minsRemaining := dnsUpdateFreqMins - (time.Now().Minute() % dnsUpdateFreqMins) - - timeout = (time.Duration(minsRemaining) * time.Minute) + - (dnsMinTTLSecs * time.Second) + - (dnsUpdateFudgeSecs * time.Second) - interval = 15 * time.Second - return -} - -// Present creates a TXT record using the specified parameters. -func (p *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zone, err := p.getHostedZoneInfo(fqdn) - if err != nil { - return err - } - - if _, err = p.linode.CreateDomainResourceTXT(zone.domainId, acme.UnFqdn(fqdn), value, 60); err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (p *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zone, err := p.getHostedZoneInfo(fqdn) - if err != nil { - return err - } - - // Get all TXT records for the specified domain. - resources, err := p.linode.GetResourcesByType(zone.domainId, "TXT") - if err != nil { - return err - } - - // Remove the specified resource, if it exists. - for _, resource := range resources { - if resource.Name == zone.resourceName && resource.Target == value { - resp, err := p.linode.DeleteDomainResource(resource.DomainID, resource.ResourceID) - if err != nil { - return err - } - if resp.ResourceID != resource.ResourceID { - return errors.New("Error deleting resource: resource IDs do not match!") - } - break - } - } - - return nil -} - -func (p *DNSProvider) getHostedZoneInfo(fqdn string) (*hostedZoneInfo, error) { - // Lookup the zone that handles the specified FQDN. - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return nil, err - } - resourceName := strings.TrimSuffix(fqdn, "."+authZone) - - // Query the authority zone. - domain, err := p.linode.GetDomain(acme.UnFqdn(authZone)) - if err != nil { - return nil, err - } - - return &hostedZoneInfo{ - domainId: domain.DomainID, - resourceName: resourceName, - }, nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go b/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go deleted file mode 100644 index d9713a275..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package linode - -import ( - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/timewasted/linode" - "github.com/timewasted/linode/dns" -) - -type ( - LinodeResponse struct { - Action string `json:"ACTION"` - Data interface{} `json:"DATA"` - Errors []linode.ResponseError `json:"ERRORARRAY"` - } - MockResponse struct { - Response interface{} - Errors []linode.ResponseError - } - MockResponseMap map[string]MockResponse -) - -var ( - apiKey string - isTestLive bool -) - -func init() { - apiKey = os.Getenv("LINODE_API_KEY") - isTestLive = len(apiKey) != 0 -} - -func restoreEnv() { - os.Setenv("LINODE_API_KEY", apiKey) -} - -func newMockServer(t *testing.T, responses MockResponseMap) *httptest.Server { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Ensure that we support the requested action. - action := r.URL.Query().Get("api_action") - resp, ok := responses[action] - if !ok { - msg := fmt.Sprintf("Unsupported mock action: %s", action) - require.FailNow(t, msg) - } - - // Build the response that the server will return. - linodeResponse := LinodeResponse{ - Action: action, - Data: resp.Response, - Errors: resp.Errors, - } - rawResponse, err := json.Marshal(linodeResponse) - if err != nil { - msg := fmt.Sprintf("Failed to JSON encode response: %v", err) - require.FailNow(t, msg) - } - - // Send the response. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write(rawResponse) - })) - - time.Sleep(100 * time.Millisecond) - return srv -} - -func TestNewDNSProviderWithEnv(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - _, err := NewDNSProvider() - assert.NoError(t, err) -} - -func TestNewDNSProviderWithoutEnv(t *testing.T) { - os.Setenv("LINODE_API_KEY", "") - defer restoreEnv() - _, err := NewDNSProvider() - assert.EqualError(t, err, "Linode credentials missing") -} - -func TestNewDNSProviderCredentialsWithKey(t *testing.T) { - _, err := NewDNSProviderCredentials("testing") - assert.NoError(t, err) -} - -func TestNewDNSProviderCredentialsWithoutKey(t *testing.T) { - _, err := NewDNSProviderCredentials("") - assert.EqualError(t, err, "Linode credentials missing") -} - -func TestDNSProvider_Present(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: domain, - DomainID: 1234, - }, - }, - }, - "domain.resource.create": MockResponse{ - Response: dns.ResourceResponse{ - ResourceID: 1234, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.Present(domain, "", keyAuth) - assert.NoError(t, err) -} - -func TestDNSProvider_PresentNoDomain(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: "foobar.com", - DomainID: 1234, - }, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.Present(domain, "", keyAuth) - assert.EqualError(t, err, "dns: requested domain not found") -} - -func TestDNSProvider_PresentCreateFailed(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: domain, - DomainID: 1234, - }, - }, - }, - "domain.resource.create": MockResponse{ - Response: nil, - Errors: []linode.ResponseError{ - linode.ResponseError{ - Code: 1234, - Message: "Failed to create domain resource", - }, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.Present(domain, "", keyAuth) - assert.EqualError(t, err, "Failed to create domain resource") -} - -func TestDNSProvider_PresentLive(t *testing.T) { - if !isTestLive { - t.Skip("Skipping live test") - } -} - -func TestDNSProvider_CleanUp(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: domain, - DomainID: 1234, - }, - }, - }, - "domain.resource.list": MockResponse{ - Response: []dns.Resource{ - dns.Resource{ - DomainID: 1234, - Name: "_acme-challenge", - ResourceID: 1234, - Target: "ElbOJKOkFWiZLQeoxf-wb3IpOsQCdvoM0y_wn0TEkxM", - Type: "TXT", - }, - }, - }, - "domain.resource.delete": MockResponse{ - Response: dns.ResourceResponse{ - ResourceID: 1234, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.CleanUp(domain, "", keyAuth) - assert.NoError(t, err) -} - -func TestDNSProvider_CleanUpNoDomain(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: "foobar.com", - DomainID: 1234, - }, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.CleanUp(domain, "", keyAuth) - assert.EqualError(t, err, "dns: requested domain not found") -} - -func TestDNSProvider_CleanUpDeleteFailed(t *testing.T) { - os.Setenv("LINODE_API_KEY", "testing") - defer restoreEnv() - p, err := NewDNSProvider() - assert.NoError(t, err) - - domain := "example.com" - keyAuth := "dGVzdGluZw==" - mockResponses := MockResponseMap{ - "domain.list": MockResponse{ - Response: []dns.Domain{ - dns.Domain{ - Domain: domain, - DomainID: 1234, - }, - }, - }, - "domain.resource.list": MockResponse{ - Response: []dns.Resource{ - dns.Resource{ - DomainID: 1234, - Name: "_acme-challenge", - ResourceID: 1234, - Target: "ElbOJKOkFWiZLQeoxf-wb3IpOsQCdvoM0y_wn0TEkxM", - Type: "TXT", - }, - }, - }, - "domain.resource.delete": MockResponse{ - Response: nil, - Errors: []linode.ResponseError{ - linode.ResponseError{ - Code: 1234, - Message: "Failed to delete domain resource", - }, - }, - }, - } - mockSrv := newMockServer(t, mockResponses) - defer mockSrv.Close() - p.linode.ToLinode().SetEndpoint(mockSrv.URL) - - err = p.CleanUp(domain, "", keyAuth) - assert.EqualError(t, err, "Failed to delete domain resource") -} - -func TestDNSProvider_CleanUpLive(t *testing.T) { - if !isTestLive { - t.Skip("Skipping live test") - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go deleted file mode 100644 index d7eb40935..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go +++ /dev/null @@ -1,416 +0,0 @@ -// Package namecheap implements a DNS provider for solving the DNS-01 -// challenge using namecheap DNS. -package namecheap - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/xenolf/lego/acme" -) - -// Notes about namecheap's tool API: -// 1. Using the API requires registration. Once registered, use your account -// name and API key to access the API. -// 2. There is no API to add or modify a single DNS record. Instead you must -// read the entire list of records, make modifications, and then write the -// entire updated list of records. (Yuck.) -// 3. Namecheap's DNS updates can be slow to propagate. I've seen them take -// as long as an hour. -// 4. Namecheap requires you to whitelist the IP address from which you call -// its APIs. It also requires all API calls to include the whitelisted IP -// address as a form or query string value. This code uses a namecheap -// service to query the client's IP address. - -var ( - debug = false - defaultBaseURL = "https://api.namecheap.com/xml.response" - getIPURL = "https://dynamicdns.park-your-domain.com/getip" - httpClient = http.Client{Timeout: 60 * time.Second} -) - -// DNSProvider is an implementation of the ChallengeProviderTimeout interface -// that uses Namecheap's tool API to manage TXT records for a domain. -type DNSProvider struct { - baseURL string - apiUser string - apiKey string - clientIP string -} - -// NewDNSProvider returns a DNSProvider instance configured for namecheap. -// Credentials must be passed in the environment variables: NAMECHEAP_API_USER -// and NAMECHEAP_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - apiUser := os.Getenv("NAMECHEAP_API_USER") - apiKey := os.Getenv("NAMECHEAP_API_KEY") - return NewDNSProviderCredentials(apiUser, apiKey) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for namecheap. -func NewDNSProviderCredentials(apiUser, apiKey string) (*DNSProvider, error) { - if apiUser == "" || apiKey == "" { - return nil, fmt.Errorf("Namecheap credentials missing") - } - - clientIP, err := getClientIP() - if err != nil { - return nil, err - } - - return &DNSProvider{ - baseURL: defaultBaseURL, - apiUser: apiUser, - apiKey: apiKey, - clientIP: clientIP, - }, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Namecheap can sometimes take a long time to complete an -// update, so wait up to 60 minutes for the update to propagate. -func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 60 * time.Minute, 15 * time.Second -} - -// host describes a DNS record returned by the Namecheap DNS gethosts API. -// Namecheap uses the term "host" to refer to all DNS records that include -// a host field (A, AAAA, CNAME, NS, TXT, URL). -type host struct { - Type string `xml:",attr"` - Name string `xml:",attr"` - Address string `xml:",attr"` - MXPref string `xml:",attr"` - TTL string `xml:",attr"` -} - -// apierror describes an error record in a namecheap API response. -type apierror struct { - Number int `xml:",attr"` - Description string `xml:",innerxml"` -} - -// getClientIP returns the client's public IP address. It uses namecheap's -// IP discovery service to perform the lookup. -func getClientIP() (addr string, err error) { - resp, err := httpClient.Get(getIPURL) - if err != nil { - return "", err - } - defer resp.Body.Close() - - clientIP, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - if debug { - fmt.Println("Client IP:", string(clientIP)) - } - return string(clientIP), nil -} - -// A challenge repesents all the data needed to specify a dns-01 challenge -// to lets-encrypt. -type challenge struct { - domain string - key string - keyFqdn string - keyValue string - tld string - sld string - host string -} - -// newChallenge builds a challenge record from a domain name, a challenge -// authentication key, and a map of available TLDs. -func newChallenge(domain, keyAuth string, tlds map[string]string) (*challenge, error) { - domain = acme.UnFqdn(domain) - parts := strings.Split(domain, ".") - - // Find the longest matching TLD. - longest := -1 - for i := len(parts); i > 0; i-- { - t := strings.Join(parts[i-1:], ".") - if _, found := tlds[t]; found { - longest = i - 1 - } - } - if longest < 1 { - return nil, fmt.Errorf("Invalid domain name '%s'", domain) - } - - tld := strings.Join(parts[longest:], ".") - sld := parts[longest-1] - - var host string - if longest >= 1 { - host = strings.Join(parts[:longest-1], ".") - } - - key, keyValue, _ := acme.DNS01Record(domain, keyAuth) - - return &challenge{ - domain: domain, - key: "_acme-challenge." + host, - keyFqdn: key, - keyValue: keyValue, - tld: tld, - sld: sld, - host: host, - }, nil -} - -// setGlobalParams adds the namecheap global parameters to the provided url -// Values record. -func (d *DNSProvider) setGlobalParams(v *url.Values, cmd string) { - v.Set("ApiUser", d.apiUser) - v.Set("ApiKey", d.apiKey) - v.Set("UserName", d.apiUser) - v.Set("ClientIp", d.clientIP) - v.Set("Command", cmd) -} - -// getTLDs requests the list of available TLDs from namecheap. -func (d *DNSProvider) getTLDs() (tlds map[string]string, err error) { - values := make(url.Values) - d.setGlobalParams(&values, "namecheap.domains.getTldList") - - reqURL, _ := url.Parse(d.baseURL) - reqURL.RawQuery = values.Encode() - - resp, err := httpClient.Get(reqURL.String()) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - type GetTldsResponse struct { - XMLName xml.Name `xml:"ApiResponse"` - Errors []apierror `xml:"Errors>Error"` - Result []struct { - Name string `xml:",attr"` - } `xml:"CommandResponse>Tlds>Tld"` - } - - var gtr GetTldsResponse - if err := xml.Unmarshal(body, >r); err != nil { - return nil, err - } - if len(gtr.Errors) > 0 { - return nil, fmt.Errorf("Namecheap error: %s [%d]", - gtr.Errors[0].Description, gtr.Errors[0].Number) - } - - tlds = make(map[string]string) - for _, t := range gtr.Result { - tlds[t.Name] = t.Name - } - return tlds, nil -} - -// getHosts reads the full list of DNS host records using the Namecheap API. -func (d *DNSProvider) getHosts(ch *challenge) (hosts []host, err error) { - values := make(url.Values) - d.setGlobalParams(&values, "namecheap.domains.dns.getHosts") - values.Set("SLD", ch.sld) - values.Set("TLD", ch.tld) - - reqURL, _ := url.Parse(d.baseURL) - reqURL.RawQuery = values.Encode() - - resp, err := httpClient.Get(reqURL.String()) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - type GetHostsResponse struct { - XMLName xml.Name `xml:"ApiResponse"` - Status string `xml:"Status,attr"` - Errors []apierror `xml:"Errors>Error"` - Hosts []host `xml:"CommandResponse>DomainDNSGetHostsResult>host"` - } - - var ghr GetHostsResponse - if err = xml.Unmarshal(body, &ghr); err != nil { - return nil, err - } - if len(ghr.Errors) > 0 { - return nil, fmt.Errorf("Namecheap error: %s [%d]", - ghr.Errors[0].Description, ghr.Errors[0].Number) - } - - return ghr.Hosts, nil -} - -// setHosts writes the full list of DNS host records using the Namecheap API. -func (d *DNSProvider) setHosts(ch *challenge, hosts []host) error { - values := make(url.Values) - d.setGlobalParams(&values, "namecheap.domains.dns.setHosts") - values.Set("SLD", ch.sld) - values.Set("TLD", ch.tld) - - for i, h := range hosts { - ind := fmt.Sprintf("%d", i+1) - values.Add("HostName"+ind, h.Name) - values.Add("RecordType"+ind, h.Type) - values.Add("Address"+ind, h.Address) - values.Add("MXPref"+ind, h.MXPref) - values.Add("TTL"+ind, h.TTL) - } - - resp, err := httpClient.PostForm(d.baseURL, values) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return fmt.Errorf("setHosts HTTP error %d", resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - type SetHostsResponse struct { - XMLName xml.Name `xml:"ApiResponse"` - Status string `xml:"Status,attr"` - Errors []apierror `xml:"Errors>Error"` - Result struct { - IsSuccess string `xml:",attr"` - } `xml:"CommandResponse>DomainDNSSetHostsResult"` - } - - var shr SetHostsResponse - if err := xml.Unmarshal(body, &shr); err != nil { - return err - } - if len(shr.Errors) > 0 { - return fmt.Errorf("Namecheap error: %s [%d]", - shr.Errors[0].Description, shr.Errors[0].Number) - } - if shr.Result.IsSuccess != "true" { - return fmt.Errorf("Namecheap setHosts failed.") - } - - return nil -} - -// addChallengeRecord adds a DNS challenge TXT record to a list of namecheap -// host records. -func (d *DNSProvider) addChallengeRecord(ch *challenge, hosts *[]host) { - host := host{ - Name: ch.key, - Type: "TXT", - Address: ch.keyValue, - MXPref: "10", - TTL: "120", - } - - // If there's already a TXT record with the same name, replace it. - for i, h := range *hosts { - if h.Name == ch.key && h.Type == "TXT" { - (*hosts)[i] = host - return - } - } - - // No record was replaced, so add a new one. - *hosts = append(*hosts, host) -} - -// removeChallengeRecord removes a DNS challenge TXT record from a list of -// namecheap host records. Return true if a record was removed. -func (d *DNSProvider) removeChallengeRecord(ch *challenge, hosts *[]host) bool { - // Find the challenge TXT record and remove it if found. - for i, h := range *hosts { - if h.Name == ch.key && h.Type == "TXT" { - *hosts = append((*hosts)[:i], (*hosts)[i+1:]...) - return true - } - } - - return false -} - -// Present installs a TXT record for the DNS challenge. -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - tlds, err := d.getTLDs() - if err != nil { - return err - } - - ch, err := newChallenge(domain, keyAuth, tlds) - if err != nil { - return err - } - - hosts, err := d.getHosts(ch) - if err != nil { - return err - } - - d.addChallengeRecord(ch, &hosts) - - if debug { - for _, h := range hosts { - fmt.Printf( - "%-5.5s %-30.30s %-6s %-70.70s\n", - h.Type, h.Name, h.TTL, h.Address) - } - } - - return d.setHosts(ch, hosts) -} - -// CleanUp removes a TXT record used for a previous DNS challenge. -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - tlds, err := d.getTLDs() - if err != nil { - return err - } - - ch, err := newChallenge(domain, keyAuth, tlds) - if err != nil { - return err - } - - hosts, err := d.getHosts(ch) - if err != nil { - return err - } - - if removed := d.removeChallengeRecord(ch, &hosts); !removed { - return nil - } - - return d.setHosts(ch, hosts) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go deleted file mode 100644 index 0631d4a3e..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package namecheap - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" -) - -var ( - fakeUser = "foo" - fakeKey = "bar" - fakeClientIP = "10.0.0.1" - - tlds = map[string]string{ - "com.au": "com.au", - "com": "com", - "co.uk": "co.uk", - "uk": "uk", - "edu": "edu", - "co.com": "co.com", - "za.com": "za.com", - } -) - -func assertEq(t *testing.T, variable, got, want string) { - if got != want { - t.Errorf("Expected %s to be '%s' but got '%s'", variable, want, got) - } -} - -func assertHdr(tc *testcase, t *testing.T, values *url.Values) { - ch, _ := newChallenge(tc.domain, "", tlds) - - assertEq(t, "ApiUser", values.Get("ApiUser"), fakeUser) - assertEq(t, "ApiKey", values.Get("ApiKey"), fakeKey) - assertEq(t, "UserName", values.Get("UserName"), fakeUser) - assertEq(t, "ClientIp", values.Get("ClientIp"), fakeClientIP) - assertEq(t, "SLD", values.Get("SLD"), ch.sld) - assertEq(t, "TLD", values.Get("TLD"), ch.tld) -} - -func mockServer(tc *testcase, t *testing.T, w http.ResponseWriter, r *http.Request) { - switch r.Method { - - case "GET": - values := r.URL.Query() - cmd := values.Get("Command") - switch cmd { - case "namecheap.domains.dns.getHosts": - assertHdr(tc, t, &values) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, tc.getHostsResponse) - case "namecheap.domains.getTldList": - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, responseGetTlds) - default: - t.Errorf("Unexpected GET command: %s", cmd) - } - - case "POST": - r.ParseForm() - values := r.Form - cmd := values.Get("Command") - switch cmd { - case "namecheap.domains.dns.setHosts": - assertHdr(tc, t, &values) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, tc.setHostsResponse) - default: - t.Errorf("Unexpected POST command: %s", cmd) - } - - default: - t.Errorf("Unexpected http method: %s", r.Method) - - } -} - -func testGetHosts(tc *testcase, t *testing.T) { - mock := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - mockServer(tc, t, w, r) - })) - defer mock.Close() - - prov := &DNSProvider{ - baseURL: mock.URL, - apiUser: fakeUser, - apiKey: fakeKey, - clientIP: fakeClientIP, - } - - ch, _ := newChallenge(tc.domain, "", tlds) - hosts, err := prov.getHosts(ch) - if tc.errString != "" { - if err == nil || err.Error() != tc.errString { - t.Errorf("Namecheap getHosts case %s expected error", tc.name) - } - } else { - if err != nil { - t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err) - } - } - -next1: - for _, h := range hosts { - for _, th := range tc.hosts { - if h == th { - continue next1 - } - } - t.Errorf("getHosts case %s unexpected record [%s:%s:%s]", - tc.name, h.Type, h.Name, h.Address) - } - -next2: - for _, th := range tc.hosts { - for _, h := range hosts { - if h == th { - continue next2 - } - } - t.Errorf("getHosts case %s missing record [%s:%s:%s]", - tc.name, th.Type, th.Name, th.Address) - } -} - -func mockDNSProvider(url string) *DNSProvider { - return &DNSProvider{ - baseURL: url, - apiUser: fakeUser, - apiKey: fakeKey, - clientIP: fakeClientIP, - } -} - -func testSetHosts(tc *testcase, t *testing.T) { - mock := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - mockServer(tc, t, w, r) - })) - defer mock.Close() - - prov := mockDNSProvider(mock.URL) - ch, _ := newChallenge(tc.domain, "", tlds) - hosts, err := prov.getHosts(ch) - if tc.errString != "" { - if err == nil || err.Error() != tc.errString { - t.Errorf("Namecheap getHosts case %s expected error", tc.name) - } - } else { - if err != nil { - t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err) - } - } - if err != nil { - return - } - - err = prov.setHosts(ch, hosts) - if err != nil { - t.Errorf("Namecheap setHosts case %s failed", tc.name) - } -} - -func testPresent(tc *testcase, t *testing.T) { - mock := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - mockServer(tc, t, w, r) - })) - defer mock.Close() - - prov := mockDNSProvider(mock.URL) - err := prov.Present(tc.domain, "", "dummyKey") - if tc.errString != "" { - if err == nil || err.Error() != tc.errString { - t.Errorf("Namecheap Present case %s expected error", tc.name) - } - } else { - if err != nil { - t.Errorf("Namecheap Present case %s failed\n%v", tc.name, err) - } - } -} - -func testCleanUp(tc *testcase, t *testing.T) { - mock := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - mockServer(tc, t, w, r) - })) - defer mock.Close() - - prov := mockDNSProvider(mock.URL) - err := prov.CleanUp(tc.domain, "", "dummyKey") - if tc.errString != "" { - if err == nil || err.Error() != tc.errString { - t.Errorf("Namecheap CleanUp case %s expected error", tc.name) - } - } else { - if err != nil { - t.Errorf("Namecheap CleanUp case %s failed\n%v", tc.name, err) - } - } -} - -func TestNamecheap(t *testing.T) { - for _, tc := range testcases { - testGetHosts(&tc, t) - testSetHosts(&tc, t) - testPresent(&tc, t) - testCleanUp(&tc, t) - } -} - -func TestNamecheapDomainSplit(t *testing.T) { - tests := []struct { - domain string - valid bool - tld string - sld string - host string - }{ - {"a.b.c.test.co.uk", true, "co.uk", "test", "a.b.c"}, - {"test.co.uk", true, "co.uk", "test", ""}, - {"test.com", true, "com", "test", ""}, - {"test.co.com", true, "co.com", "test", ""}, - {"www.test.com.au", true, "com.au", "test", "www"}, - {"www.za.com", true, "za.com", "www", ""}, - {"", false, "", "", ""}, - {"a", false, "", "", ""}, - {"com", false, "", "", ""}, - {"co.com", false, "", "", ""}, - {"co.uk", false, "", "", ""}, - {"test.au", false, "", "", ""}, - {"za.com", false, "", "", ""}, - {"www.za", false, "", "", ""}, - {"www.test.au", false, "", "", ""}, - {"www.test.unk", false, "", "", ""}, - } - - for _, test := range tests { - valid := true - ch, err := newChallenge(test.domain, "", tlds) - if err != nil { - valid = false - } - - if test.valid && !valid { - t.Errorf("Expected '%s' to split", test.domain) - } else if !test.valid && valid { - t.Errorf("Expected '%s' to produce error", test.domain) - } - - if test.valid && valid { - assertEq(t, "domain", ch.domain, test.domain) - assertEq(t, "tld", ch.tld, test.tld) - assertEq(t, "sld", ch.sld, test.sld) - assertEq(t, "host", ch.host, test.host) - } - } -} - -type testcase struct { - name string - domain string - hosts []host - errString string - getHostsResponse string - setHostsResponse string -} - -var testcases = []testcase{ - { - "Test:Success:1", - "test.example.com", - []host{ - {"A", "home", "10.0.0.1", "10", "1799"}, - {"A", "www", "10.0.0.2", "10", "1200"}, - {"AAAA", "a", "::0", "10", "1799"}, - {"CNAME", "*", "example.com.", "10", "1799"}, - {"MXE", "example.com", "10.0.0.5", "10", "1800"}, - {"URL", "xyz", "https://google.com", "10", "1799"}, - }, - "", - responseGetHostsSuccess1, - responseSetHostsSuccess1, - }, - { - "Test:Success:2", - "example.com", - []host{ - {"A", "@", "10.0.0.2", "10", "1200"}, - {"A", "www", "10.0.0.3", "10", "60"}, - }, - "", - responseGetHostsSuccess2, - responseSetHostsSuccess2, - }, - { - "Test:Error:BadApiKey:1", - "test.example.com", - nil, - "Namecheap error: API Key is invalid or API access has not been enabled [1011102]", - responseGetHostsErrorBadAPIKey1, - "", - }, -} - -var responseGetHostsSuccess1 = ` - - - - namecheap.domains.dns.getHosts - - - - - - - - - - - PHX01SBAPI01 - --5:00 - 3.338 -` - -var responseSetHostsSuccess1 = ` - - - - namecheap.domains.dns.setHosts - - - - - - PHX01SBAPI01 - --5:00 - 2.347 -` - -var responseGetHostsSuccess2 = ` - - - - namecheap.domains.dns.getHosts - - - - - - - PHX01SBAPI01 - --5:00 - 3.338 -` - -var responseSetHostsSuccess2 = ` - - - - namecheap.domains.dns.setHosts - - - - - - PHX01SBAPI01 - --5:00 - 2.347 -` - -var responseGetHostsErrorBadAPIKey1 = ` - - - API Key is invalid or API access has not been enabled - - - - PHX01SBAPI01 - --5:00 - 0 -` - -var responseGetTlds = ` - - - - namecheap.domains.getTldList - - - Most recognized top level domain - - - PHX01SBAPI01 - --5:00 - 0.004 -` diff --git a/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1.go b/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1.go deleted file mode 100644 index 105d73f89..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package ns1 implements a DNS provider for solving the DNS-01 challenge -// using NS1 DNS. -package ns1 - -import ( - "fmt" - "net/http" - "os" - "time" - - "github.com/xenolf/lego/acme" - "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/dns" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface. -type DNSProvider struct { - client *rest.Client -} - -// NewDNSProvider returns a DNSProvider instance configured for NS1. -// Credentials must be passed in the environment variables: NS1_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - key := os.Getenv("NS1_API_KEY") - if key == "" { - return nil, fmt.Errorf("NS1 credentials missing") - } - return NewDNSProviderCredentials(key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for NS1. -func NewDNSProviderCredentials(key string) (*DNSProvider, error) { - if key == "" { - return nil, fmt.Errorf("NS1 credentials missing") - } - - httpClient := &http.Client{Timeout: time.Second * 10} - client := rest.NewClient(httpClient, rest.SetAPIKey(key)) - - return &DNSProvider{client}, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZone(domain) - if err != nil { - return err - } - - record := c.newTxtRecord(zone, fqdn, value, ttl) - _, err = c.client.Records.Create(record) - if err != nil && err != rest.ErrRecordExists { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZone(domain) - if err != nil { - return err - } - - name := acme.UnFqdn(fqdn) - _, err = c.client.Records.Delete(zone.Zone, name, "TXT") - return err -} - -func (c *DNSProvider) getHostedZone(domain string) (*dns.Zone, error) { - zone, _, err := c.client.Zones.Get(domain) - if err != nil { - return nil, err - } - - return zone, nil -} - -func (c *DNSProvider) newTxtRecord(zone *dns.Zone, fqdn, value string, ttl int) *dns.Record { - name := acme.UnFqdn(fqdn) - - return &dns.Record{ - Type: "TXT", - Zone: zone.Zone, - Domain: name, - TTL: ttl, - Answers: []*dns.Answer{ - {Rdata: []string{value}}, - }, - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1_test.go b/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1_test.go deleted file mode 100644 index eb9150dde..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/ns1/ns1_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package ns1 - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - liveTest bool - apiKey string - domain string -) - -func init() { - apiKey = os.Getenv("NS1_API_KEY") - domain = os.Getenv("NS1_DOMAIN") - if len(apiKey) > 0 && len(domain) > 0 { - liveTest = true - } -} - -func restoreNS1Env() { - os.Setenv("NS1_API_KEY", apiKey) -} - -func TestNewDNSProviderValid(t *testing.T) { - os.Setenv("NS1_API_KEY", "") - _, err := NewDNSProviderCredentials("123") - assert.NoError(t, err) - restoreNS1Env() -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("NS1_API_KEY", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "NS1 credentials missing") - restoreNS1Env() -} - -func TestLivePresent(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(apiKey) - assert.NoError(t, err) - - err = provider.Present(domain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveCleanUp(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProviderCredentials(apiKey) - assert.NoError(t, err) - - err = provider.CleanUp(domain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go b/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go deleted file mode 100644 index 0f2acb4b4..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/otc/mock.go +++ /dev/null @@ -1,152 +0,0 @@ -package otc - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" -) - -var fakeOTCUserName = "test" -var fakeOTCPassword = "test" -var fakeOTCDomainName = "test" -var fakeOTCProjectName = "test" -var fakeOTCToken = "62244bc21da68d03ebac94e6636ff01f" - -type DNSMock struct { - t *testing.T - Server *httptest.Server - Mux *http.ServeMux -} - -func NewDNSMock(t *testing.T) *DNSMock { - return &DNSMock{ - t: t, - } -} - -// Setup creates the mock server -func (m *DNSMock) Setup() { - m.Mux = http.NewServeMux() - m.Server = httptest.NewServer(m.Mux) -} - -// ShutdownServer creates the mock server -func (m *DNSMock) ShutdownServer() { - m.Server.Close() -} - -func (m *DNSMock) HandleAuthSuccessfully() { - m.Mux.HandleFunc("/v3/auth/token", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-Subject-Token", fakeOTCToken) - - fmt.Fprintf(w, `{ - "token": { - "catalog": [ - { - "type": "dns", - "id": "56cd81db1f8445d98652479afe07c5ba", - "name": "", - "endpoints": [ - { - "url": "%s", - "region": "eu-de", - "region_id": "eu-de", - "interface": "public", - "id": "0047a06690484d86afe04877074efddf" - } - ] - } - ] - }}`, m.Server.URL) - }) -} - -func (m *DNSMock) HandleListZonesSuccessfully() { - m.Mux.HandleFunc("/v2/zones", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, `{ - "zones":[{ - "id":"123123" - }]} - `) - - assert.Equal(m.t, r.Method, "GET") - assert.Equal(m.t, r.URL.Path, "/v2/zones") - assert.Equal(m.t, r.URL.RawQuery, "name=example.com.") - assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") - }) -} - -func (m *DNSMock) HandleListZonesEmpty() { - m.Mux.HandleFunc("/v2/zones", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, `{ - "zones":[ - ]} - `) - - assert.Equal(m.t, r.Method, "GET") - assert.Equal(m.t, r.URL.Path, "/v2/zones") - assert.Equal(m.t, r.URL.RawQuery, "name=example.com.") - assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") - }) -} - -func (m *DNSMock) HandleDeleteRecordsetsSuccessfully() { - m.Mux.HandleFunc("/v2/zones/123123/recordsets/321321", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, `{ - "zones":[{ - "id":"123123" - }]} - `) - - assert.Equal(m.t, r.Method, "DELETE") - assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets/321321") - assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") - }) -} - -func (m *DNSMock) HandleListRecordsetsEmpty() { - m.Mux.HandleFunc("/v2/zones/123123/recordsets", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, `{ - "recordsets":[ - ]} - `) - - assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets") - assert.Equal(m.t, r.URL.RawQuery, "type=TXT&name=_acme-challenge.example.com.") - }) -} -func (m *DNSMock) HandleListRecordsetsSuccessfully() { - m.Mux.HandleFunc("/v2/zones/123123/recordsets", func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" { - fmt.Fprintf(w, `{ - "recordsets":[{ - "id":"321321" - }]} - `) - - assert.Equal(m.t, r.URL.Path, "/v2/zones/123123/recordsets") - assert.Equal(m.t, r.URL.RawQuery, "type=TXT&name=_acme-challenge.example.com.") - - } else if r.Method == "POST" { - body, err := ioutil.ReadAll(r.Body) - - assert.Nil(m.t, err) - exceptedString := "{\"name\":\"_acme-challenge.example.com.\",\"description\":\"Added TXT record for ACME dns-01 challenge using lego client\",\"type\":\"TXT\",\"ttl\":300,\"records\":[\"\\\"w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI\\\"\"]}" - assert.Equal(m.t, string(body), exceptedString) - - fmt.Fprintf(w, `{ - "recordsets":[{ - "id":"321321" - }]} - `) - - } else { - m.t.Errorf("Expected method to be 'GET' or 'POST' but got '%s'", r.Method) - } - - assert.Equal(m.t, r.Header.Get("Content-Type"), "application/json") - }) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go b/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go deleted file mode 100644 index 86bcaa9b7..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/otc/otc.go +++ /dev/null @@ -1,388 +0,0 @@ -// Package otc implements a DNS provider for solving the DNS-01 challenge -// using Open Telekom Cloud Managed DNS. -package otc - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "time" - - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses -// OTC's Managed DNS API to manage TXT records for a domain. -type DNSProvider struct { - identityEndpoint string - otcBaseURL string - domainName string - projectName string - userName string - password string - token string -} - -// NewDNSProvider returns a DNSProvider instance configured for OTC DNS. -// Credentials must be passed in the environment variables: OTC_USER_NAME, -// OTC_DOMAIN_NAME, OTC_PASSWORD OTC_PROJECT_NAME and OTC_IDENTITY_ENDPOINT. -func NewDNSProvider() (*DNSProvider, error) { - domainName := os.Getenv("OTC_DOMAIN_NAME") - userName := os.Getenv("OTC_USER_NAME") - password := os.Getenv("OTC_PASSWORD") - projectName := os.Getenv("OTC_PROJECT_NAME") - identityEndpoint := os.Getenv("OTC_IDENTITY_ENDPOINT") - return NewDNSProviderCredentials(domainName, userName, password, projectName, identityEndpoint) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for OTC DNS. -func NewDNSProviderCredentials(domainName, userName, password, projectName, identityEndpoint string) (*DNSProvider, error) { - if domainName == "" || userName == "" || password == "" || projectName == "" { - return nil, fmt.Errorf("OTC credentials missing") - } - - if identityEndpoint == "" { - identityEndpoint = "https://iam.eu-de.otc.t-systems.com:443/v3/auth/tokens" - } - - return &DNSProvider{ - identityEndpoint: identityEndpoint, - domainName: domainName, - userName: userName, - password: password, - projectName: projectName, - }, nil -} - -func (d *DNSProvider) SendRequest(method, resource string, payload interface{}) (io.Reader, error) { - url := fmt.Sprintf("%s/%s", d.otcBaseURL, resource) - - body, err := json.Marshal(payload) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(method, url, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - if len(d.token) > 0 { - req.Header.Set("X-Auth-Token", d.token) - } - - // Workaround for keep alive bug in otc api - tr := http.DefaultTransport.(*http.Transport) - tr.DisableKeepAlives = true - - client := &http.Client{ - Timeout: time.Duration(10 * time.Second), - Transport: tr, - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("OTC API request %s failed with HTTP status code %d", url, resp.StatusCode) - } - - body1, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return bytes.NewReader(body1), nil -} - -func (d *DNSProvider) loginRequest() error { - type nameResponse struct { - Name string `json:"name"` - } - - type userResponse struct { - Name string `json:"name"` - Password string `json:"password"` - Domain nameResponse `json:"domain"` - } - - type passwordResponse struct { - User userResponse `json:"user"` - } - type identityResponse struct { - Methods []string `json:"methods"` - Password passwordResponse `json:"password"` - } - - type scopeResponse struct { - Project nameResponse `json:"project"` - } - - type authResponse struct { - Identity identityResponse `json:"identity"` - Scope scopeResponse `json:"scope"` - } - - type loginResponse struct { - Auth authResponse `json:"auth"` - } - - userResp := userResponse{ - Name: d.userName, - Password: d.password, - Domain: nameResponse{ - Name: d.domainName, - }, - } - - loginResp := loginResponse{ - Auth: authResponse{ - Identity: identityResponse{ - Methods: []string{"password"}, - Password: passwordResponse{ - User: userResp, - }, - }, - Scope: scopeResponse{ - Project: nameResponse{ - Name: d.projectName, - }, - }, - }, - } - - body, err := json.Marshal(loginResp) - if err != nil { - return err - } - req, err := http.NewRequest("POST", d.identityEndpoint, bytes.NewReader(body)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{Timeout: time.Duration(10 * time.Second)} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return fmt.Errorf("OTC API request failed with HTTP status code %d", resp.StatusCode) - } - - d.token = resp.Header.Get("X-Subject-Token") - - if d.token == "" { - return fmt.Errorf("unable to get auth token") - } - - type endpointResponse struct { - Token struct { - Catalog []struct { - Type string `json:"type"` - Endpoints []struct { - URL string `json:"url"` - } `json:"endpoints"` - } `json:"catalog"` - } `json:"token"` - } - var endpointResp endpointResponse - - err = json.NewDecoder(resp.Body).Decode(&endpointResp) - if err != nil { - return err - } - - for _, v := range endpointResp.Token.Catalog { - if v.Type == "dns" { - for _, endpoint := range v.Endpoints { - d.otcBaseURL = fmt.Sprintf("%s/v2", endpoint.URL) - continue - } - } - } - - if d.otcBaseURL == "" { - return fmt.Errorf("unable to get dns endpoint") - } - - return nil -} - -// Starts a new OTC API Session. Authenticates using userName, password -// and receives a token to be used in for subsequent requests. -func (d *DNSProvider) login() error { - err := d.loginRequest() - if err != nil { - return err - } - - return nil -} - -func (d *DNSProvider) getZoneID(zone string) (string, error) { - type zoneItem struct { - ID string `json:"id"` - } - - type zonesResponse struct { - Zones []zoneItem `json:"zones"` - } - - resource := fmt.Sprintf("zones?name=%s", zone) - resp, err := d.SendRequest("GET", resource, nil) - if err != nil { - return "", err - } - - var zonesRes zonesResponse - err = json.NewDecoder(resp).Decode(&zonesRes) - if err != nil { - return "", err - } - - if len(zonesRes.Zones) < 1 { - return "", fmt.Errorf("zone %s not found", zone) - } - - if len(zonesRes.Zones) > 1 { - return "", fmt.Errorf("to many zones found") - } - - if zonesRes.Zones[0].ID == "" { - return "", fmt.Errorf("id not found") - } - - return zonesRes.Zones[0].ID, nil -} - -func (d *DNSProvider) getRecordSetID(zoneID string, fqdn string) (string, error) { - type recordSet struct { - ID string `json:"id"` - } - - type recordSetsResponse struct { - RecordSets []recordSet `json:"recordsets"` - } - - resource := fmt.Sprintf("zones/%s/recordsets?type=TXT&name=%s", zoneID, fqdn) - resp, err := d.SendRequest("GET", resource, nil) - if err != nil { - return "", err - } - - var recordSetsRes recordSetsResponse - err = json.NewDecoder(resp).Decode(&recordSetsRes) - if err != nil { - return "", err - } - - if len(recordSetsRes.RecordSets) < 1 { - return "", fmt.Errorf("record not found") - } - - if len(recordSetsRes.RecordSets) > 1 { - return "", fmt.Errorf("to many records found") - } - - if recordSetsRes.RecordSets[0].ID == "" { - return "", fmt.Errorf("id not found") - } - - return recordSetsRes.RecordSets[0].ID, nil -} - -func (d *DNSProvider) deleteRecordSet(zoneID, recordID string) error { - resource := fmt.Sprintf("zones/%s/recordsets/%s", zoneID, recordID) - - _, err := d.SendRequest("DELETE", resource, nil) - if err != nil { - return err - } - return nil -} - -// Present creates a TXT record using the specified parameters -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - if ttl < 300 { - ttl = 300 // 300 is otc minimum value for ttl - } - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - err = d.login() - if err != nil { - return err - } - - zoneID, err := d.getZoneID(authZone) - if err != nil { - return fmt.Errorf("unable to get zone: %s", err) - } - - resource := fmt.Sprintf("zones/%s/recordsets", zoneID) - - type recordset struct { - Name string `json:"name"` - Description string `json:"description"` - Type string `json:"type"` - Ttl int `json:"ttl"` - Records []string `json:"records"` - } - - r1 := &recordset{ - Name: fqdn, - Description: "Added TXT record for ACME dns-01 challenge using lego client", - Type: "TXT", - Ttl: 300, - Records: []string{fmt.Sprintf("\"%s\"", value)}, - } - _, err = d.SendRequest("POST", resource, r1) - - if err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return err - } - - err = d.login() - if err != nil { - return err - } - - zoneID, err := d.getZoneID(authZone) - - if err != nil { - return err - } - - recordID, err := d.getRecordSetID(zoneID, fqdn) - if err != nil { - return fmt.Errorf("unable go get record %s for zone %s: %s", fqdn, domain, err) - } - return d.deleteRecordSet(zoneID, recordID) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/otc/otc_test.go b/vendor/github.com/xenolf/lego/providers/dns/otc/otc_test.go deleted file mode 100644 index 0c05334a9..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/otc/otc_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package otc - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - "os" - "testing" -) - -type OTCDNSTestSuite struct { - suite.Suite - Mock *DNSMock -} - -func (s *OTCDNSTestSuite) TearDownSuite() { - s.Mock.ShutdownServer() -} - -func (s *OTCDNSTestSuite) SetupTest() { - s.Mock = NewDNSMock(s.T()) - s.Mock.Setup() - s.Mock.HandleAuthSuccessfully() - -} - -func TestOTCDNSTestSuite(t *testing.T) { - suite.Run(t, new(OTCDNSTestSuite)) -} - -func (s *OTCDNSTestSuite) createDNSProvider() (*DNSProvider, error) { - url := fmt.Sprintf("%s/v3/auth/token", s.Mock.Server.URL) - return NewDNSProviderCredentials(fakeOTCUserName, fakeOTCPassword, fakeOTCDomainName, fakeOTCProjectName, url) -} - -func (s *OTCDNSTestSuite) TestOTCDNSLoginEnv() { - os.Setenv("OTC_DOMAIN_NAME", "unittest1") - os.Setenv("OTC_USER_NAME", "unittest2") - os.Setenv("OTC_PASSWORD", "unittest3") - os.Setenv("OTC_PROJECT_NAME", "unittest4") - os.Setenv("OTC_IDENTITY_ENDPOINT", "unittest5") - - provider, err := NewDNSProvider() - assert.Nil(s.T(), err) - assert.Equal(s.T(), provider.domainName, "unittest1") - assert.Equal(s.T(), provider.userName, "unittest2") - assert.Equal(s.T(), provider.password, "unittest3") - assert.Equal(s.T(), provider.projectName, "unittest4") - assert.Equal(s.T(), provider.identityEndpoint, "unittest5") - - os.Setenv("OTC_IDENTITY_ENDPOINT", "") - - provider, err = NewDNSProvider() - assert.Nil(s.T(), err) - assert.Equal(s.T(), provider.identityEndpoint, "https://iam.eu-de.otc.t-systems.com:443/v3/auth/tokens") - - os.Clearenv() -} - -func (s *OTCDNSTestSuite) TestOTCDNSLoginEnvEmpty() { - _, err := NewDNSProvider() - assert.Equal(s.T(), "OTC credentials missing", err.Error()) - - os.Clearenv() -} - -func (s *OTCDNSTestSuite) TestOTCDNSLogin() { - otcProvider, err := s.createDNSProvider() - - assert.Nil(s.T(), err) - err = otcProvider.loginRequest() - assert.Nil(s.T(), err) - assert.Equal(s.T(), otcProvider.otcBaseURL, fmt.Sprintf("%s/v2", s.Mock.Server.URL)) - assert.Equal(s.T(), fakeOTCToken, otcProvider.token) -} - -func (s *OTCDNSTestSuite) TestOTCDNSEmptyZone() { - s.Mock.HandleListZonesEmpty() - s.Mock.HandleListRecordsetsSuccessfully() - - otcProvider, _ := s.createDNSProvider() - err := otcProvider.Present("example.com", "", "foobar") - assert.NotNil(s.T(), err) -} - -func (s *OTCDNSTestSuite) TestOTCDNSEmptyRecordset() { - s.Mock.HandleListZonesSuccessfully() - s.Mock.HandleListRecordsetsEmpty() - - otcProvider, _ := s.createDNSProvider() - err := otcProvider.CleanUp("example.com", "", "foobar") - assert.NotNil(s.T(), err) -} - -func (s *OTCDNSTestSuite) TestOTCDNSPresent() { - s.Mock.HandleListZonesSuccessfully() - s.Mock.HandleListRecordsetsSuccessfully() - - otcProvider, _ := s.createDNSProvider() - err := otcProvider.Present("example.com", "", "foobar") - assert.Nil(s.T(), err) -} - -func (s *OTCDNSTestSuite) TestOTCDNSCleanup() { - s.Mock.HandleListZonesSuccessfully() - s.Mock.HandleListRecordsetsSuccessfully() - s.Mock.HandleDeleteRecordsetsSuccessfully() - - otcProvider, _ := s.createDNSProvider() - err := otcProvider.CleanUp("example.com", "", "foobar") - assert.Nil(s.T(), err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go deleted file mode 100644 index 290a8d7df..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go +++ /dev/null @@ -1,159 +0,0 @@ -// Package OVH implements a DNS provider for solving the DNS-01 -// challenge using OVH DNS. -package ovh - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/ovh/go-ovh/ovh" - "github.com/xenolf/lego/acme" -) - -// OVH API reference: https://eu.api.ovh.com/ -// Create a Token: https://eu.api.ovh.com/createToken/ - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -// that uses OVH's REST API to manage TXT records for a domain. -type DNSProvider struct { - client *ovh.Client - recordIDs map[string]int - recordIDsMu sync.Mutex -} - -// NewDNSProvider returns a DNSProvider instance configured for OVH -// Credentials must be passed in the environment variable: -// OVH_ENDPOINT : it must be ovh-eu or ovh-ca -// OVH_APPLICATION_KEY -// OVH_APPLICATION_SECRET -// OVH_CONSUMER_KEY -func NewDNSProvider() (*DNSProvider, error) { - apiEndpoint := os.Getenv("OVH_ENDPOINT") - applicationKey := os.Getenv("OVH_APPLICATION_KEY") - applicationSecret := os.Getenv("OVH_APPLICATION_SECRET") - consumerKey := os.Getenv("OVH_CONSUMER_KEY") - return NewDNSProviderCredentials(apiEndpoint, applicationKey, applicationSecret, consumerKey) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for OVH. -func NewDNSProviderCredentials(apiEndpoint, applicationKey, applicationSecret, consumerKey string) (*DNSProvider, error) { - if apiEndpoint == "" || applicationKey == "" || applicationSecret == "" || consumerKey == "" { - return nil, fmt.Errorf("OVH credentials missing") - } - - ovhClient, _ := ovh.NewClient( - apiEndpoint, - applicationKey, - applicationSecret, - consumerKey, - ) - - return &DNSProvider{ - client: ovhClient, - recordIDs: make(map[string]int), - }, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (d *DNSProvider) Present(domain, token, keyAuth string) error { - - // txtRecordRequest represents the request body to DO's API to make a TXT record - type txtRecordRequest struct { - FieldType string `json:"fieldType"` - SubDomain string `json:"subDomain"` - Target string `json:"target"` - TTL int `json:"ttl"` - } - - // txtRecordResponse represents a response from DO's API after making a TXT record - type txtRecordResponse struct { - ID int `json:"id"` - FieldType string `json:"fieldType"` - SubDomain string `json:"subDomain"` - Target string `json:"target"` - TTL int `json:"ttl"` - Zone string `json:"zone"` - } - - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - // Parse domain name - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - authZone = acme.UnFqdn(authZone) - subDomain := d.extractRecordName(fqdn, authZone) - - reqURL := fmt.Sprintf("/domain/zone/%s/record", authZone) - reqData := txtRecordRequest{FieldType: "TXT", SubDomain: subDomain, Target: value, TTL: ttl} - var respData txtRecordResponse - - // Create TXT record - err = d.client.Post(reqURL, reqData, &respData) - if err != nil { - fmt.Printf("Error when call OVH api to add record : %q \n", err) - return err - } - - // Apply the change - reqURL = fmt.Sprintf("/domain/zone/%s/refresh", authZone) - err = d.client.Post(reqURL, nil, nil) - if err != nil { - fmt.Printf("Error when call OVH api to refresh zone : %q \n", err) - return err - } - - d.recordIDsMu.Lock() - d.recordIDs[fqdn] = respData.ID - d.recordIDsMu.Unlock() - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - // get the record's unique ID from when we created it - d.recordIDsMu.Lock() - recordID, ok := d.recordIDs[fqdn] - d.recordIDsMu.Unlock() - if !ok { - return fmt.Errorf("unknown record ID for '%s'", fqdn) - } - - authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) - if err != nil { - return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) - } - - authZone = acme.UnFqdn(authZone) - - reqURL := fmt.Sprintf("/domain/zone/%s/record/%d", authZone, recordID) - - err = d.client.Delete(reqURL, nil) - if err != nil { - fmt.Printf("Error when call OVH api to delete challenge record : %q \n", err) - return err - } - - // Delete record ID from map - d.recordIDsMu.Lock() - delete(d.recordIDs, fqdn) - d.recordIDsMu.Unlock() - - return nil -} - -func (d *DNSProvider) extractRecordName(fqdn, domain string) string { - name := acme.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go deleted file mode 100644 index 47da60e57..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package ovh - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - liveTest bool - apiEndpoint string - applicationKey string - applicationSecret string - consumerKey string - domain string -) - -func init() { - apiEndpoint = os.Getenv("OVH_ENDPOINT") - applicationKey = os.Getenv("OVH_APPLICATION_KEY") - applicationSecret = os.Getenv("OVH_APPLICATION_SECRET") - consumerKey = os.Getenv("OVH_CONSUMER_KEY") - liveTest = len(apiEndpoint) > 0 && len(applicationKey) > 0 && len(applicationSecret) > 0 && len(consumerKey) > 0 -} - -func restoreEnv() { - os.Setenv("OVH_ENDPOINT", apiEndpoint) - os.Setenv("OVH_APPLICATION_KEY", applicationKey) - os.Setenv("OVH_APPLICATION_SECRET", applicationSecret) - os.Setenv("OVH_CONSUMER_KEY", consumerKey) -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("OVH_ENDPOINT", "ovh-eu") - os.Setenv("OVH_APPLICATION_KEY", "1234") - os.Setenv("OVH_APPLICATION_SECRET", "5678") - os.Setenv("OVH_CONSUMER_KEY", "abcde") - defer restoreEnv() - _, err := NewDNSProvider() - assert.NoError(t, err) -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("OVH_ENDPOINT", "") - os.Setenv("OVH_APPLICATION_KEY", "1234") - os.Setenv("OVH_APPLICATION_SECRET", "5678") - os.Setenv("OVH_CONSUMER_KEY", "abcde") - defer restoreEnv() - _, err := NewDNSProvider() - assert.EqualError(t, err, "OVH credentials missing") - - os.Setenv("OVH_ENDPOINT", "ovh-eu") - os.Setenv("OVH_APPLICATION_KEY", "") - os.Setenv("OVH_APPLICATION_SECRET", "5678") - os.Setenv("OVH_CONSUMER_KEY", "abcde") - defer restoreEnv() - _, err = NewDNSProvider() - assert.EqualError(t, err, "OVH credentials missing") - - os.Setenv("OVH_ENDPOINT", "ovh-eu") - os.Setenv("OVH_APPLICATION_KEY", "1234") - os.Setenv("OVH_APPLICATION_SECRET", "") - os.Setenv("OVH_CONSUMER_KEY", "abcde") - defer restoreEnv() - _, err = NewDNSProvider() - assert.EqualError(t, err, "OVH credentials missing") - - os.Setenv("OVH_ENDPOINT", "ovh-eu") - os.Setenv("OVH_APPLICATION_KEY", "1234") - os.Setenv("OVH_APPLICATION_SECRET", "5678") - os.Setenv("OVH_CONSUMER_KEY", "") - defer restoreEnv() - _, err = NewDNSProvider() - assert.EqualError(t, err, "OVH credentials missing") -} - -func TestLivePresent(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.Present(domain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveCleanUp(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.CleanUp(domain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md b/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md deleted file mode 100644 index 23abb7669..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## PowerDNS provider - -Tested and confirmed to work with PowerDNS authoratative server 3.4.8 and 4.0.1. Refer to [PowerDNS documentation](https://doc.powerdns.com/md/httpapi/README/) instructions on how to enable the built-in API interface. - -PowerDNS Notes: -- PowerDNS API does not currently support SSL, therefore you should take care to ensure that traffic between lego and the PowerDNS API is over a trusted network, VPN etc. -- In order to have the SOA serial automatically increment each time the `_acme-challenge` record is added/modified via the API, set `SOA-API-EDIT` to `INCEPTION-INCREMENT` for the zone in the `domainmetadata` table diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go deleted file mode 100644 index a4fd22b0c..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go +++ /dev/null @@ -1,343 +0,0 @@ -// Package pdns implements a DNS provider for solving the DNS-01 -// challenge using PowerDNS nameserver. -package pdns - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -type DNSProvider struct { - apiKey string - host *url.URL - apiVersion int -} - -// NewDNSProvider returns a DNSProvider instance configured for pdns. -// Credentials must be passed in the environment variable: -// PDNS_API_URL and PDNS_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - key := os.Getenv("PDNS_API_KEY") - hostUrl, err := url.Parse(os.Getenv("PDNS_API_URL")) - if err != nil { - return nil, err - } - - return NewDNSProviderCredentials(hostUrl, key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for pdns. -func NewDNSProviderCredentials(host *url.URL, key string) (*DNSProvider, error) { - if key == "" { - return nil, fmt.Errorf("PDNS API key missing") - } - - if host == nil || host.Host == "" { - return nil, fmt.Errorf("PDNS API URL missing") - } - - provider := &DNSProvider{ - host: host, - apiKey: key, - } - provider.getAPIVersion() - - return provider, nil -} - -// Timeout returns the timeout and interval to use when checking for DNS -// propagation. Adjusting here to cope with spikes in propagation times. -func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { - return 120 * time.Second, 2 * time.Second -} - -// Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zone, err := c.getHostedZone(fqdn) - if err != nil { - return err - } - - name := fqdn - - // pre-v1 API wants non-fqdn - if c.apiVersion == 0 { - name = acme.UnFqdn(fqdn) - } - - rec := pdnsRecord{ - Content: "\"" + value + "\"", - Disabled: false, - - // pre-v1 API - Type: "TXT", - Name: name, - TTL: 120, - } - - rrsets := rrSets{ - RRSets: []rrSet{ - rrSet{ - Name: name, - ChangeType: "REPLACE", - Type: "TXT", - Kind: "Master", - TTL: 120, - Records: []pdnsRecord{rec}, - }, - }, - } - - body, err := json.Marshal(rrsets) - if err != nil { - return err - } - - _, err = c.makeRequest("PATCH", zone.URL, bytes.NewReader(body)) - if err != nil { - fmt.Println("here") - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - zone, err := c.getHostedZone(fqdn) - if err != nil { - return err - } - - set, err := c.findTxtRecord(fqdn) - if err != nil { - return err - } - - rrsets := rrSets{ - RRSets: []rrSet{ - rrSet{ - Name: set.Name, - Type: set.Type, - ChangeType: "DELETE", - }, - }, - } - body, err := json.Marshal(rrsets) - if err != nil { - return err - } - - _, err = c.makeRequest("PATCH", zone.URL, bytes.NewReader(body)) - if err != nil { - return err - } - - return nil -} - -func (c *DNSProvider) getHostedZone(fqdn string) (*hostedZone, error) { - var zone hostedZone - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return nil, err - } - - url := "/servers/localhost/zones" - result, err := c.makeRequest("GET", url, nil) - if err != nil { - return nil, err - } - - zones := []hostedZone{} - err = json.Unmarshal(result, &zones) - if err != nil { - return nil, err - } - - url = "" - for _, zone := range zones { - if acme.UnFqdn(zone.Name) == acme.UnFqdn(authZone) { - url = zone.URL - } - } - - result, err = c.makeRequest("GET", url, nil) - if err != nil { - return nil, err - } - - err = json.Unmarshal(result, &zone) - if err != nil { - return nil, err - } - - // convert pre-v1 API result - if len(zone.Records) > 0 { - zone.RRSets = []rrSet{} - for _, record := range zone.Records { - set := rrSet{ - Name: record.Name, - Type: record.Type, - Records: []pdnsRecord{record}, - } - zone.RRSets = append(zone.RRSets, set) - } - } - - return &zone, nil -} - -func (c *DNSProvider) findTxtRecord(fqdn string) (*rrSet, error) { - zone, err := c.getHostedZone(fqdn) - if err != nil { - return nil, err - } - - _, err = c.makeRequest("GET", zone.URL, nil) - if err != nil { - return nil, err - } - - for _, set := range zone.RRSets { - if (set.Name == acme.UnFqdn(fqdn) || set.Name == fqdn) && set.Type == "TXT" { - return &set, nil - } - } - - return nil, fmt.Errorf("No existing record found for %s", fqdn) -} - -func (c *DNSProvider) getAPIVersion() { - type APIVersion struct { - URL string `json:"url"` - Version int `json:"version"` - } - - result, err := c.makeRequest("GET", "/api", nil) - if err != nil { - return - } - - var versions []APIVersion - err = json.Unmarshal(result, &versions) - if err != nil { - return - } - - latestVersion := 0 - for _, v := range versions { - if v.Version > latestVersion { - latestVersion = v.Version - } - } - c.apiVersion = latestVersion -} - -func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { - type APIError struct { - Error string `json:"error"` - } - var path = "" - if c.host.Path != "/" { - path = c.host.Path - } - if c.apiVersion > 0 { - if !strings.HasPrefix(uri, "api/v") { - uri = "/api/v" + strconv.Itoa(c.apiVersion) + uri - } else { - uri = "/" + uri - } - } - url := c.host.Scheme + "://" + c.host.Host + path + uri - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - req.Header.Set("X-API-Key", c.apiKey) - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Error talking to PDNS API -> %v", err) - } - - defer resp.Body.Close() - - if resp.StatusCode != 422 && (resp.StatusCode < 200 || resp.StatusCode >= 300) { - return nil, fmt.Errorf("Unexpected HTTP status code %d when fetching '%s'", resp.StatusCode, url) - } - - var msg json.RawMessage - err = json.NewDecoder(resp.Body).Decode(&msg) - switch { - case err == io.EOF: - // empty body - return nil, nil - case err != nil: - // other error - return nil, err - } - - // check for PowerDNS error message - if len(msg) > 0 && msg[0] == '{' { - var apiError APIError - err = json.Unmarshal(msg, &apiError) - if err != nil { - return nil, err - } - if apiError.Error != "" { - return nil, fmt.Errorf("Error talking to PDNS API -> %v", apiError.Error) - } - } - return msg, nil -} - -type pdnsRecord struct { - Content string `json:"content"` - Disabled bool `json:"disabled"` - - // pre-v1 API - Name string `json:"name"` - Type string `json:"type"` - TTL int `json:"ttl,omitempty"` -} - -type hostedZone struct { - ID string `json:"id"` - Name string `json:"name"` - URL string `json:"url"` - RRSets []rrSet `json:"rrsets"` - - // pre-v1 API - Records []pdnsRecord `json:"records"` -} - -type rrSet struct { - Name string `json:"name"` - Type string `json:"type"` - Kind string `json:"kind"` - ChangeType string `json:"changetype"` - Records []pdnsRecord `json:"records"` - TTL int `json:"ttl,omitempty"` -} - -type rrSets struct { - RRSets []rrSet `json:"rrsets"` -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go deleted file mode 100644 index 70e7670ed..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package pdns - -import ( - "net/url" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - pdnsLiveTest bool - pdnsURL *url.URL - pdnsURLStr string - pdnsAPIKey string - pdnsDomain string -) - -func init() { - pdnsURLStr = os.Getenv("PDNS_API_URL") - pdnsURL, _ = url.Parse(pdnsURLStr) - pdnsAPIKey = os.Getenv("PDNS_API_KEY") - pdnsDomain = os.Getenv("PDNS_DOMAIN") - if len(pdnsURLStr) > 0 && len(pdnsAPIKey) > 0 && len(pdnsDomain) > 0 { - pdnsLiveTest = true - } -} - -func restorePdnsEnv() { - os.Setenv("PDNS_API_URL", pdnsURLStr) - os.Setenv("PDNS_API_KEY", pdnsAPIKey) -} - -func TestNewDNSProviderValid(t *testing.T) { - os.Setenv("PDNS_API_URL", "") - os.Setenv("PDNS_API_KEY", "") - tmpURL, _ := url.Parse("http://localhost:8081") - _, err := NewDNSProviderCredentials(tmpURL, "123") - assert.NoError(t, err) - restorePdnsEnv() -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("PDNS_API_URL", "http://localhost:8081") - os.Setenv("PDNS_API_KEY", "123") - _, err := NewDNSProvider() - assert.NoError(t, err) - restorePdnsEnv() -} - -func TestNewDNSProviderMissingHostErr(t *testing.T) { - os.Setenv("PDNS_API_URL", "") - os.Setenv("PDNS_API_KEY", "123") - _, err := NewDNSProvider() - assert.EqualError(t, err, "PDNS API URL missing") - restorePdnsEnv() -} - -func TestNewDNSProviderMissingKeyErr(t *testing.T) { - os.Setenv("PDNS_API_URL", pdnsURLStr) - os.Setenv("PDNS_API_KEY", "") - _, err := NewDNSProvider() - assert.EqualError(t, err, "PDNS API key missing") - restorePdnsEnv() -} - -func TestPdnsPresentAndCleanup(t *testing.T) { - if !pdnsLiveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProviderCredentials(pdnsURL, pdnsAPIKey) - assert.NoError(t, err) - - err = provider.Present(pdnsDomain, "", "123d==") - assert.NoError(t, err) - - err = provider.CleanUp(pdnsDomain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go deleted file mode 100644 index 13daa8c8a..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go +++ /dev/null @@ -1,284 +0,0 @@ -// Package rackspace implements a DNS provider for solving the DNS-01 -// challenge using rackspace DNS. -package rackspace - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "time" - - "github.com/xenolf/lego/acme" -) - -// rackspaceAPIURL represents the Identity API endpoint to call -var rackspaceAPIURL = "https://identity.api.rackspacecloud.com/v2.0/tokens" - -// DNSProvider is an implementation of the acme.ChallengeProvider interface -// used to store the reusable token and DNS API endpoint -type DNSProvider struct { - token string - cloudDNSEndpoint string -} - -// NewDNSProvider returns a DNSProvider instance configured for Rackspace. -// Credentials must be passed in the environment variables: RACKSPACE_USER -// and RACKSPACE_API_KEY. -func NewDNSProvider() (*DNSProvider, error) { - user := os.Getenv("RACKSPACE_USER") - key := os.Getenv("RACKSPACE_API_KEY") - return NewDNSProviderCredentials(user, key) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for Rackspace. It authenticates against -// the API, also grabbing the DNS Endpoint. -func NewDNSProviderCredentials(user, key string) (*DNSProvider, error) { - if user == "" || key == "" { - return nil, fmt.Errorf("Rackspace credentials missing") - } - - type APIKeyCredentials struct { - Username string `json:"username"` - APIKey string `json:"apiKey"` - } - - type Auth struct { - APIKeyCredentials `json:"RAX-KSKEY:apiKeyCredentials"` - } - - type RackspaceAuthData struct { - Auth `json:"auth"` - } - - type RackspaceIdentity struct { - Access struct { - ServiceCatalog []struct { - Endpoints []struct { - PublicURL string `json:"publicURL"` - TenantID string `json:"tenantId"` - } `json:"endpoints"` - Name string `json:"name"` - } `json:"serviceCatalog"` - Token struct { - ID string `json:"id"` - } `json:"token"` - } `json:"access"` - } - - authData := RackspaceAuthData{ - Auth: Auth{ - APIKeyCredentials: APIKeyCredentials{ - Username: user, - APIKey: key, - }, - }, - } - - body, err := json.Marshal(authData) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", rackspaceAPIURL, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Error querying Rackspace Identity API: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Rackspace Authentication failed. Response code: %d", resp.StatusCode) - } - - var rackspaceIdentity RackspaceIdentity - err = json.NewDecoder(resp.Body).Decode(&rackspaceIdentity) - if err != nil { - return nil, err - } - - // Iterate through the Service Catalog to get the DNS Endpoint - var dnsEndpoint string - for _, service := range rackspaceIdentity.Access.ServiceCatalog { - if service.Name == "cloudDNS" { - dnsEndpoint = service.Endpoints[0].PublicURL - break - } - } - if dnsEndpoint == "" { - return nil, fmt.Errorf("Failed to populate DNS endpoint, check Rackspace API for changes.") - } - - return &DNSProvider{ - token: rackspaceIdentity.Access.Token.ID, - cloudDNSEndpoint: dnsEndpoint, - }, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - zoneID, err := c.getHostedZoneID(fqdn) - if err != nil { - return err - } - - rec := RackspaceRecords{ - RackspaceRecord: []RackspaceRecord{{ - Name: acme.UnFqdn(fqdn), - Type: "TXT", - Data: value, - TTL: 300, - }}, - } - - body, err := json.Marshal(rec) - if err != nil { - return err - } - - _, err = c.makeRequest("POST", fmt.Sprintf("/domains/%d/records", zoneID), bytes.NewReader(body)) - if err != nil { - return err - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - zoneID, err := c.getHostedZoneID(fqdn) - if err != nil { - return err - } - - record, err := c.findTxtRecord(fqdn, zoneID) - if err != nil { - return err - } - - _, err = c.makeRequest("DELETE", fmt.Sprintf("/domains/%d/records?id=%s", zoneID, record.ID), nil) - if err != nil { - return err - } - - return nil -} - -// getHostedZoneID performs a lookup to get the DNS zone which needs -// modifying for a given FQDN -func (c *DNSProvider) getHostedZoneID(fqdn string) (int, error) { - // HostedZones represents the response when querying Rackspace DNS zones - type ZoneSearchResponse struct { - TotalEntries int `json:"totalEntries"` - HostedZones []struct { - ID int `json:"id"` - Name string `json:"name"` - } `json:"domains"` - } - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return 0, err - } - - result, err := c.makeRequest("GET", fmt.Sprintf("/domains?name=%s", acme.UnFqdn(authZone)), nil) - if err != nil { - return 0, err - } - - var zoneSearchResponse ZoneSearchResponse - err = json.Unmarshal(result, &zoneSearchResponse) - if err != nil { - return 0, err - } - - // If nothing was returned, or for whatever reason more than 1 was returned (the search uses exact match, so should not occur) - if zoneSearchResponse.TotalEntries != 1 { - return 0, fmt.Errorf("Found %d zones for %s in Rackspace for domain %s", zoneSearchResponse.TotalEntries, authZone, fqdn) - } - - return zoneSearchResponse.HostedZones[0].ID, nil -} - -// findTxtRecord searches a DNS zone for a TXT record with a specific name -func (c *DNSProvider) findTxtRecord(fqdn string, zoneID int) (*RackspaceRecord, error) { - result, err := c.makeRequest("GET", fmt.Sprintf("/domains/%d/records?type=TXT&name=%s", zoneID, acme.UnFqdn(fqdn)), nil) - if err != nil { - return nil, err - } - - var records RackspaceRecords - err = json.Unmarshal(result, &records) - if err != nil { - return nil, err - } - - recordsLength := len(records.RackspaceRecord) - switch recordsLength { - case 1: - break - case 0: - return nil, fmt.Errorf("No TXT record found for %s", fqdn) - default: - return nil, fmt.Errorf("More than 1 TXT record found for %s", fqdn) - } - - return &records.RackspaceRecord[0], nil -} - -// makeRequest is a wrapper function used for making DNS API requests -func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { - url := c.cloudDNSEndpoint + uri - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - req.Header.Set("X-Auth-Token", c.token) - req.Header.Set("Content-Type", "application/json") - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Error querying DNS API: %v", err) - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { - return nil, fmt.Errorf("Request failed for %s %s. Response code: %d", method, url, resp.StatusCode) - } - - var r json.RawMessage - err = json.NewDecoder(resp.Body).Decode(&r) - if err != nil { - return nil, fmt.Errorf("JSON decode failed for %s %s. Response code: %d", method, url, resp.StatusCode) - } - - return r, nil -} - -// RackspaceRecords is the list of records sent/received from the DNS API -type RackspaceRecords struct { - RackspaceRecord []RackspaceRecord `json:"records"` -} - -// RackspaceRecord represents a Rackspace DNS record -type RackspaceRecord struct { - Name string `json:"name"` - Type string `json:"type"` - Data string `json:"data"` - TTL int `json:"ttl,omitempty"` - ID string `json:"id,omitempty"` -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace_test.go b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace_test.go deleted file mode 100644 index 22c979cad..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package rackspace - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - rackspaceLiveTest bool - rackspaceUser string - rackspaceAPIKey string - rackspaceDomain string - testAPIURL string -) - -func init() { - rackspaceUser = os.Getenv("RACKSPACE_USER") - rackspaceAPIKey = os.Getenv("RACKSPACE_API_KEY") - rackspaceDomain = os.Getenv("RACKSPACE_DOMAIN") - if len(rackspaceUser) > 0 && len(rackspaceAPIKey) > 0 && len(rackspaceDomain) > 0 { - rackspaceLiveTest = true - } -} - -func testRackspaceEnv() { - rackspaceAPIURL = testAPIURL - os.Setenv("RACKSPACE_USER", "testUser") - os.Setenv("RACKSPACE_API_KEY", "testKey") -} - -func liveRackspaceEnv() { - rackspaceAPIURL = "https://identity.api.rackspacecloud.com/v2.0/tokens" - os.Setenv("RACKSPACE_USER", rackspaceUser) - os.Setenv("RACKSPACE_API_KEY", rackspaceAPIKey) -} - -func startTestServers() (identityAPI, dnsAPI *httptest.Server) { - dnsAPI = httptest.NewServer(dnsMux()) - dnsEndpoint := dnsAPI.URL + "/123456" - - identityAPI = httptest.NewServer(identityHandler(dnsEndpoint)) - testAPIURL = identityAPI.URL + "/" - return -} - -func closeTestServers(identityAPI, dnsAPI *httptest.Server) { - identityAPI.Close() - dnsAPI.Close() -} - -func identityHandler(dnsEndpoint string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - reqBody, err := ioutil.ReadAll(r.Body) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - resp, found := jsonMap[string(reqBody)] - if !found { - w.WriteHeader(http.StatusBadRequest) - return - } - resp = strings.Replace(resp, "https://dns.api.rackspacecloud.com/v1.0/123456", dnsEndpoint, 1) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, resp) - }) -} - -func dnsMux() *http.ServeMux { - mux := http.NewServeMux() - - // Used by `getHostedZoneID()` finding `zoneID` "?name=example.com" - mux.HandleFunc("/123456/domains", func(w http.ResponseWriter, r *http.Request) { - if r.URL.Query().Get("name") == "example.com" { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, jsonMap["zoneDetails"]) - return - } - w.WriteHeader(http.StatusBadRequest) - return - }) - - mux.HandleFunc("/123456/domains/112233/records", func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - // Used by `Present()` creating the TXT record - case http.MethodPost: - reqBody, err := ioutil.ReadAll(r.Body) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - resp, found := jsonMap[string(reqBody)] - if !found { - w.WriteHeader(http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusAccepted) - fmt.Fprintf(w, resp) - // Used by `findTxtRecord()` finding `record.ID` "?type=TXT&name=_acme-challenge.example.com" - case http.MethodGet: - if r.URL.Query().Get("type") == "TXT" && r.URL.Query().Get("name") == "_acme-challenge.example.com" { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, jsonMap["recordDetails"]) - return - } - w.WriteHeader(http.StatusBadRequest) - return - // Used by `CleanUp()` deleting the TXT record "?id=445566" - case http.MethodDelete: - if r.URL.Query().Get("id") == "TXT-654321" { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, jsonMap["recordDelete"]) - return - } - w.WriteHeader(http.StatusBadRequest) - } - }) - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - fmt.Printf("Not Found for Request: (%+v)\n\n", r) - }) - - return mux -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - testRackspaceEnv() - _, err := NewDNSProviderCredentials("", "") - assert.EqualError(t, err, "Rackspace credentials missing") -} - -func TestOfflineRackspaceValid(t *testing.T) { - testRackspaceEnv() - provider, err := NewDNSProviderCredentials(os.Getenv("RACKSPACE_USER"), os.Getenv("RACKSPACE_API_KEY")) - - assert.NoError(t, err) - assert.Equal(t, provider.token, "testToken", "The token should match") -} - -func TestOfflineRackspacePresent(t *testing.T) { - testRackspaceEnv() - provider, err := NewDNSProvider() - - if assert.NoError(t, err) { - err = provider.Present("example.com", "token", "keyAuth") - assert.NoError(t, err) - } -} - -func TestOfflineRackspaceCleanUp(t *testing.T) { - testRackspaceEnv() - provider, err := NewDNSProvider() - - if assert.NoError(t, err) { - err = provider.CleanUp("example.com", "token", "keyAuth") - assert.NoError(t, err) - } -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - if !rackspaceLiveTest { - t.Skip("skipping live test") - } - - liveRackspaceEnv() - provider, err := NewDNSProvider() - assert.NoError(t, err) - assert.Contains(t, provider.cloudDNSEndpoint, "https://dns.api.rackspacecloud.com/v1.0/", "The endpoint URL should contain the base") -} - -func TestRackspacePresent(t *testing.T) { - if !rackspaceLiveTest { - t.Skip("skipping live test") - } - - liveRackspaceEnv() - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.Present(rackspaceDomain, "", "112233445566==") - assert.NoError(t, err) -} - -func TestRackspaceCleanUp(t *testing.T) { - if !rackspaceLiveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 15) - - liveRackspaceEnv() - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.CleanUp(rackspaceDomain, "", "112233445566==") - assert.NoError(t, err) -} - -func TestMain(m *testing.M) { - identityAPI, dnsAPI := startTestServers() - defer closeTestServers(identityAPI, dnsAPI) - os.Exit(m.Run()) -} - -var jsonMap = map[string]string{ - `{"auth":{"RAX-KSKEY:apiKeyCredentials":{"username":"testUser","apiKey":"testKey"}}}`: `{"access":{"token":{"id":"testToken","expires":"1970-01-01T00:00:00.000Z","tenant":{"id":"123456","name":"123456"},"RAX-AUTH:authenticatedBy":["APIKEY"]},"serviceCatalog":[{"type":"rax:dns","endpoints":[{"publicURL":"https://dns.api.rackspacecloud.com/v1.0/123456","tenantId":"123456"}],"name":"cloudDNS"}],"user":{"id":"fakeUseID","name":"testUser"}}}`, - "zoneDetails": `{"domains":[{"name":"example.com","id":112233,"emailAddress":"hostmaster@example.com","updated":"1970-01-01T00:00:00.000+0000","created":"1970-01-01T00:00:00.000+0000"}],"totalEntries":1}`, - `{"records":[{"name":"_acme-challenge.example.com","type":"TXT","data":"pW9ZKG0xz_PCriK-nCMOjADy9eJcgGWIzkkj2fN4uZM","ttl":300}]}`: `{"request":"{\"records\":[{\"name\":\"_acme-challenge.example.com\",\"type\":\"TXT\",\"data\":\"pW9ZKG0xz_PCriK-nCMOjADy9eJcgGWIzkkj2fN4uZM\",\"ttl\":300}]}","status":"RUNNING","verb":"POST","jobId":"00000000-0000-0000-0000-0000000000","callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/123456/status/00000000-0000-0000-0000-0000000000","requestUrl":"https://dns.api.rackspacecloud.com/v1.0/123456/domains/112233/records"}`, - "recordDetails": `{"records":[{"name":"_acme-challenge.example.com","id":"TXT-654321","type":"TXT","data":"pW9ZKG0xz_PCriK-nCMOjADy9eJcgGWIzkkj2fN4uZM","ttl":300,"updated":"1970-01-01T00:00:00.000+0000","created":"1970-01-01T00:00:00.000+0000"}]}`, - "recordDelete": `{"status":"RUNNING","verb":"DELETE","jobId":"00000000-0000-0000-0000-0000000000","callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/123456/status/00000000-0000-0000-0000-0000000000","requestUrl":"https://dns.api.rackspacecloud.com/v1.0/123456/domains/112233/recordsid=TXT-654321"}`, -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go deleted file mode 100644 index dde42ddf1..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go +++ /dev/null @@ -1,152 +0,0 @@ -// Package rfc2136 implements a DNS provider for solving the DNS-01 challenge -// using the rfc2136 dynamic update. -package rfc2136 - -import ( - "fmt" - "net" - "os" - "strings" - "time" - - "github.com/miekg/dns" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface that -// uses dynamic DNS updates (RFC 2136) to create TXT records on a nameserver. -type DNSProvider struct { - nameserver string - tsigAlgorithm string - tsigKey string - tsigSecret string - timeout time.Duration -} - -// NewDNSProvider returns a DNSProvider instance configured for rfc2136 -// dynamic update. Configured with environment variables: -// RFC2136_NAMESERVER: Network address in the form "host" or "host:port". -// RFC2136_TSIG_ALGORITHM: Defaults to hmac-md5.sig-alg.reg.int. (HMAC-MD5). -// See https://github.com/miekg/dns/blob/master/tsig.go for supported values. -// RFC2136_TSIG_KEY: Name of the secret key as defined in DNS server configuration. -// RFC2136_TSIG_SECRET: Secret key payload. -// RFC2136_TIMEOUT: DNS propagation timeout in time.ParseDuration format. (60s) -// To disable TSIG authentication, leave the RFC2136_TSIG* variables unset. -func NewDNSProvider() (*DNSProvider, error) { - nameserver := os.Getenv("RFC2136_NAMESERVER") - tsigAlgorithm := os.Getenv("RFC2136_TSIG_ALGORITHM") - tsigKey := os.Getenv("RFC2136_TSIG_KEY") - tsigSecret := os.Getenv("RFC2136_TSIG_SECRET") - timeout := os.Getenv("RFC2136_TIMEOUT") - return NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret, timeout) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a -// DNSProvider instance configured for rfc2136 dynamic update. To disable TSIG -// authentication, leave the TSIG parameters as empty strings. -// nameserver must be a network address in the form "host" or "host:port". -func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret, timeout string) (*DNSProvider, error) { - if nameserver == "" { - return nil, fmt.Errorf("RFC2136 nameserver missing") - } - - // Append the default DNS port if none is specified. - if _, _, err := net.SplitHostPort(nameserver); err != nil { - if strings.Contains(err.Error(), "missing port") { - nameserver = net.JoinHostPort(nameserver, "53") - } else { - return nil, err - } - } - d := &DNSProvider{ - nameserver: nameserver, - } - if tsigAlgorithm == "" { - tsigAlgorithm = dns.HmacMD5 - } - d.tsigAlgorithm = tsigAlgorithm - if len(tsigKey) > 0 && len(tsigSecret) > 0 { - d.tsigKey = tsigKey - d.tsigSecret = tsigSecret - } - - if timeout == "" { - d.timeout = 60 * time.Second - } else { - t, err := time.ParseDuration(timeout) - if err != nil { - return nil, err - } else if t < 0 { - return nil, fmt.Errorf("Invalid/negative RFC2136_TIMEOUT: %v", timeout) - } else { - d.timeout = t - } - } - - return d, nil -} - -// Returns the timeout configured with RFC2136_TIMEOUT, or 60s. -func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { - return d.timeout, 2 * time.Second -} - -// Present creates a TXT record using the specified parameters -func (r *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - return r.changeRecord("INSERT", fqdn, value, ttl) -} - -// CleanUp removes the TXT record matching the specified parameters -func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - return r.changeRecord("REMOVE", fqdn, value, ttl) -} - -func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { - // Find the zone for the given fqdn - zone, err := acme.FindZoneByFqdn(fqdn, []string{r.nameserver}) - if err != nil { - return err - } - - // Create RR - rr := new(dns.TXT) - rr.Hdr = dns.RR_Header{Name: fqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: uint32(ttl)} - rr.Txt = []string{value} - rrs := []dns.RR{rr} - - // Create dynamic update packet - m := new(dns.Msg) - m.SetUpdate(zone) - switch action { - case "INSERT": - // Always remove old challenge left over from who knows what. - m.RemoveRRset(rrs) - m.Insert(rrs) - case "REMOVE": - m.Remove(rrs) - default: - return fmt.Errorf("Unexpected action: %s", action) - } - - // Setup client - c := new(dns.Client) - c.SingleInflight = true - // TSIG authentication / msg signing - if len(r.tsigKey) > 0 && len(r.tsigSecret) > 0 { - m.SetTsig(dns.Fqdn(r.tsigKey), r.tsigAlgorithm, 300, time.Now().Unix()) - c.TsigSecret = map[string]string{dns.Fqdn(r.tsigKey): r.tsigSecret} - } - - // Send the query - reply, _, err := c.Exchange(m, r.nameserver) - if err != nil { - return fmt.Errorf("DNS update failed: %v", err) - } - if reply != nil && reply.Rcode != dns.RcodeSuccess { - return fmt.Errorf("DNS update failed. Server replied: %s", dns.RcodeToString[reply.Rcode]) - } - - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go deleted file mode 100644 index f3ca65b31..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package rfc2136 - -import ( - "bytes" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "github.com/miekg/dns" - "github.com/xenolf/lego/acme" -) - -var ( - rfc2136TestDomain = "123456789.www.example.com" - rfc2136TestKeyAuth = "123d==" - rfc2136TestValue = "Now36o-3BmlB623-0c1qCIUmgWVVmDJb88KGl24pqpo" - rfc2136TestFqdn = "_acme-challenge.123456789.www.example.com." - rfc2136TestZone = "example.com." - rfc2136TestTTL = 120 - rfc2136TestTsigKey = "example.com." - rfc2136TestTsigSecret = "IwBTJx9wrDp4Y1RyC3H0gA==" -) - -var reqChan = make(chan *dns.Msg, 10) - -func TestRFC2136CanaryLocalTestServer(t *testing.T) { - acme.ClearFqdnCache() - dns.HandleFunc("example.com.", serverHandlerHello) - defer dns.HandleRemove("example.com.") - - server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false) - if err != nil { - t.Fatalf("Failed to start test server: %v", err) - } - defer server.Shutdown() - - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("example.com.", dns.TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatalf("Failed to communicate with test server: %v", err) - } - txt := r.Extra[0].(*dns.TXT).Txt[0] - if txt != "Hello world" { - t.Error("Expected test server to return 'Hello world' but got: ", txt) - } -} - -func TestRFC2136ServerSuccess(t *testing.T) { - acme.ClearFqdnCache() - dns.HandleFunc(rfc2136TestZone, serverHandlerReturnSuccess) - defer dns.HandleRemove(rfc2136TestZone) - - server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false) - if err != nil { - t.Fatalf("Failed to start test server: %v", err) - } - defer server.Shutdown() - - provider, err := NewDNSProviderCredentials(addrstr, "", "", "", "") - if err != nil { - t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err) - } - if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err != nil { - t.Errorf("Expected Present() to return no error but the error was -> %v", err) - } -} - -func TestRFC2136ServerError(t *testing.T) { - acme.ClearFqdnCache() - dns.HandleFunc(rfc2136TestZone, serverHandlerReturnErr) - defer dns.HandleRemove(rfc2136TestZone) - - server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false) - if err != nil { - t.Fatalf("Failed to start test server: %v", err) - } - defer server.Shutdown() - - provider, err := NewDNSProviderCredentials(addrstr, "", "", "", "") - if err != nil { - t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err) - } - if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err == nil { - t.Errorf("Expected Present() to return an error but it did not.") - } else if !strings.Contains(err.Error(), "NOTZONE") { - t.Errorf("Expected Present() to return an error with the 'NOTZONE' rcode string but it did not.") - } -} - -func TestRFC2136TsigClient(t *testing.T) { - acme.ClearFqdnCache() - dns.HandleFunc(rfc2136TestZone, serverHandlerReturnSuccess) - defer dns.HandleRemove(rfc2136TestZone) - - server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", true) - if err != nil { - t.Fatalf("Failed to start test server: %v", err) - } - defer server.Shutdown() - - provider, err := NewDNSProviderCredentials(addrstr, "", rfc2136TestTsigKey, rfc2136TestTsigSecret, "") - if err != nil { - t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err) - } - if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err != nil { - t.Errorf("Expected Present() to return no error but the error was -> %v", err) - } -} - -func TestRFC2136ValidUpdatePacket(t *testing.T) { - acme.ClearFqdnCache() - dns.HandleFunc(rfc2136TestZone, serverHandlerPassBackRequest) - defer dns.HandleRemove(rfc2136TestZone) - - server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false) - if err != nil { - t.Fatalf("Failed to start test server: %v", err) - } - defer server.Shutdown() - - txtRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN TXT %s", rfc2136TestFqdn, rfc2136TestTTL, rfc2136TestValue)) - rrs := []dns.RR{txtRR} - m := new(dns.Msg) - m.SetUpdate(rfc2136TestZone) - m.RemoveRRset(rrs) - m.Insert(rrs) - expectstr := m.String() - expect, err := m.Pack() - if err != nil { - t.Fatalf("Error packing expect msg: %v", err) - } - - provider, err := NewDNSProviderCredentials(addrstr, "", "", "", "") - if err != nil { - t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err) - } - - if err := provider.Present(rfc2136TestDomain, "", "1234d=="); err != nil { - t.Errorf("Expected Present() to return no error but the error was -> %v", err) - } - - rcvMsg := <-reqChan - rcvMsg.Id = m.Id - actual, err := rcvMsg.Pack() - if err != nil { - t.Fatalf("Error packing actual msg: %v", err) - } - - if !bytes.Equal(actual, expect) { - tmp := new(dns.Msg) - if err := tmp.Unpack(actual); err != nil { - t.Fatalf("Error unpacking actual msg: %v", err) - } - t.Errorf("Expected msg:\n%s", expectstr) - t.Errorf("Actual msg:\n%v", tmp) - } -} - -func runLocalDNSTestServer(listenAddr string, tsig bool) (*dns.Server, string, error) { - pc, err := net.ListenPacket("udp", listenAddr) - if err != nil { - return nil, "", err - } - server := &dns.Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - if tsig { - server.TsigSecret = map[string]string{rfc2136TestTsigKey: rfc2136TestTsigSecret} - } - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} - -func serverHandlerHello(w dns.ResponseWriter, req *dns.Msg) { - m := new(dns.Msg) - m.SetReply(req) - m.Extra = make([]dns.RR, 1) - m.Extra[0] = &dns.TXT{ - Hdr: dns.RR_Header{Name: m.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 0}, - Txt: []string{"Hello world"}, - } - w.WriteMsg(m) -} - -func serverHandlerReturnSuccess(w dns.ResponseWriter, req *dns.Msg) { - m := new(dns.Msg) - m.SetReply(req) - if req.Opcode == dns.OpcodeQuery && req.Question[0].Qtype == dns.TypeSOA && req.Question[0].Qclass == dns.ClassINET { - // Return SOA to appease findZoneByFqdn() - soaRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN SOA ns1.%s admin.%s 2016022801 28800 7200 2419200 1200", rfc2136TestZone, rfc2136TestTTL, rfc2136TestZone, rfc2136TestZone)) - m.Answer = []dns.RR{soaRR} - } - - if t := req.IsTsig(); t != nil { - if w.TsigStatus() == nil { - // Validated - m.SetTsig(rfc2136TestZone, dns.HmacMD5, 300, time.Now().Unix()) - } - } - - w.WriteMsg(m) -} - -func serverHandlerReturnErr(w dns.ResponseWriter, req *dns.Msg) { - m := new(dns.Msg) - m.SetRcode(req, dns.RcodeNotZone) - w.WriteMsg(m) -} - -func serverHandlerPassBackRequest(w dns.ResponseWriter, req *dns.Msg) { - m := new(dns.Msg) - m.SetReply(req) - if req.Opcode == dns.OpcodeQuery && req.Question[0].Qtype == dns.TypeSOA && req.Question[0].Qclass == dns.ClassINET { - // Return SOA to appease findZoneByFqdn() - soaRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN SOA ns1.%s admin.%s 2016022801 28800 7200 2419200 1200", rfc2136TestZone, rfc2136TestTTL, rfc2136TestZone, rfc2136TestZone)) - m.Answer = []dns.RR{soaRR} - } - - if t := req.IsTsig(); t != nil { - if w.TsigStatus() == nil { - // Validated - m.SetTsig(rfc2136TestZone, dns.HmacMD5, 300, time.Now().Unix()) - } - } - - w.WriteMsg(m) - if req.Opcode != dns.OpcodeQuery || req.Question[0].Qtype != dns.TypeSOA || req.Question[0].Qclass != dns.ClassINET { - // Only talk back when it is not the SOA RR. - reqChan <- req - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go deleted file mode 100644 index a5cc9c878..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package route53 - -var ChangeResourceRecordSetsResponse = ` - - - /change/123456 - PENDING - 2016-02-10T01:36:41.958Z - -` - -var ListHostedZonesByNameResponse = ` - - - - /hostedzone/ABCDEFG - example.com. - D2224C5B-684A-DB4A-BB9A-E09E3BAFEA7A - - Test comment - false - - 10 - - - true - example2.com - ZLT12321321124 - 1 -` - -var GetChangeResponse = ` - - - 123456 - INSYNC - 2016-02-10T01:36:41.958Z - -` diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go deleted file mode 100644 index 934f0a2d4..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package route53 implements a DNS provider for solving the DNS-01 challenge -// using AWS Route 53 DNS. -package route53 - -import ( - "fmt" - "math/rand" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/xenolf/lego/acme" -) - -const ( - maxRetries = 5 - route53TTL = 10 -) - -// DNSProvider implements the acme.ChallengeProvider interface -type DNSProvider struct { - client *route53.Route53 - hostedZoneID string -} - -// customRetryer implements the client.Retryer interface by composing the -// DefaultRetryer. It controls the logic for retrying recoverable request -// errors (e.g. when rate limits are exceeded). -type customRetryer struct { - client.DefaultRetryer -} - -// RetryRules overwrites the DefaultRetryer's method. -// It uses a basic exponential backoff algorithm that returns an initial -// delay of ~400ms with an upper limit of ~30 seconds which should prevent -// causing a high number of consecutive throttling errors. -// For reference: Route 53 enforces an account-wide(!) 5req/s query limit. -func (d customRetryer) RetryRules(r *request.Request) time.Duration { - retryCount := r.RetryCount - if retryCount > 7 { - retryCount = 7 - } - - delay := (1 << uint(retryCount)) * (rand.Intn(50) + 200) - return time.Duration(delay) * time.Millisecond -} - -// NewDNSProvider returns a DNSProvider instance configured for the AWS -// Route 53 service. -// -// AWS Credentials are automatically detected in the following locations -// and prioritized in the following order: -// 1. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, -// AWS_REGION, [AWS_SESSION_TOKEN] -// 2. Shared credentials file (defaults to ~/.aws/credentials) -// 3. Amazon EC2 IAM role -// -// If AWS_HOSTED_ZONE_ID is not set, Lego tries to determine the correct -// public hosted zone via the FQDN. -// -// See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk -func NewDNSProvider() (*DNSProvider, error) { - hostedZoneID := os.Getenv("AWS_HOSTED_ZONE_ID") - - r := customRetryer{} - r.NumMaxRetries = maxRetries - config := request.WithRetryer(aws.NewConfig(), r) - client := route53.New(session.New(config)) - - return &DNSProvider{ - client: client, - hostedZoneID: hostedZoneID, - }, nil -} - -// Present creates a TXT record using the specified parameters -func (r *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - value = `"` + value + `"` - return r.changeRecord("UPSERT", fqdn, value, route53TTL) -} - -// CleanUp removes the TXT record matching the specified parameters -func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domain, keyAuth) - value = `"` + value + `"` - return r.changeRecord("DELETE", fqdn, value, route53TTL) -} - -func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { - hostedZoneID, err := r.getHostedZoneID(fqdn) - if err != nil { - return fmt.Errorf("Failed to determine Route 53 hosted zone ID: %v", err) - } - - recordSet := newTXTRecordSet(fqdn, value, ttl) - reqParams := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: aws.String(hostedZoneID), - ChangeBatch: &route53.ChangeBatch{ - Comment: aws.String("Managed by Lego"), - Changes: []*route53.Change{ - { - Action: aws.String(action), - ResourceRecordSet: recordSet, - }, - }, - }, - } - - resp, err := r.client.ChangeResourceRecordSets(reqParams) - if err != nil { - return fmt.Errorf("Failed to change Route 53 record set: %v", err) - } - - statusID := resp.ChangeInfo.Id - - return acme.WaitFor(120*time.Second, 4*time.Second, func() (bool, error) { - reqParams := &route53.GetChangeInput{ - Id: statusID, - } - resp, err := r.client.GetChange(reqParams) - if err != nil { - return false, fmt.Errorf("Failed to query Route 53 change status: %v", err) - } - if *resp.ChangeInfo.Status == route53.ChangeStatusInsync { - return true, nil - } - return false, nil - }) -} - -func (r *DNSProvider) getHostedZoneID(fqdn string) (string, error) { - if r.hostedZoneID != "" { - return r.hostedZoneID, nil - } - - authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) - if err != nil { - return "", err - } - - // .DNSName should not have a trailing dot - reqParams := &route53.ListHostedZonesByNameInput{ - DNSName: aws.String(acme.UnFqdn(authZone)), - } - resp, err := r.client.ListHostedZonesByName(reqParams) - if err != nil { - return "", err - } - - var hostedZoneID string - for _, hostedZone := range resp.HostedZones { - // .Name has a trailing dot - if !*hostedZone.Config.PrivateZone && *hostedZone.Name == authZone { - hostedZoneID = *hostedZone.Id - break - } - } - - if len(hostedZoneID) == 0 { - return "", fmt.Errorf("Zone %s not found in Route 53 for domain %s", authZone, fqdn) - } - - if strings.HasPrefix(hostedZoneID, "/hostedzone/") { - hostedZoneID = strings.TrimPrefix(hostedZoneID, "/hostedzone/") - } - - return hostedZoneID, nil -} - -func newTXTRecordSet(fqdn, value string, ttl int) *route53.ResourceRecordSet { - return &route53.ResourceRecordSet{ - Name: aws.String(fqdn), - Type: aws.String("TXT"), - TTL: aws.Int64(int64(ttl)), - ResourceRecords: []*route53.ResourceRecord{ - {Value: aws.String(value)}, - }, - } -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go deleted file mode 100644 index 17ba4a08a..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package route53 - -import ( - "fmt" - "os" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestRoute53TTL(t *testing.T) { - - m, err := testGetAndPreCheck() - if err != nil { - t.Skip(err.Error()) - } - - provider, err := NewDNSProvider() - if err != nil { - t.Fatalf("Fatal: %s", err.Error()) - } - - err = provider.Present(m["route53Domain"], "foo", "bar") - if err != nil { - t.Fatalf("Fatal: %s", err.Error()) - } - // we need a separate R53 client here as the one in the DNS provider is - // unexported. - fqdn := "_acme-challenge." + m["route53Domain"] + "." - svc := route53.New(session.New()) - zoneID, err := provider.getHostedZoneID(fqdn) - if err != nil { - provider.CleanUp(m["route53Domain"], "foo", "bar") - t.Fatalf("Fatal: %s", err.Error()) - } - params := &route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(zoneID), - } - resp, err := svc.ListResourceRecordSets(params) - if err != nil { - provider.CleanUp(m["route53Domain"], "foo", "bar") - t.Fatalf("Fatal: %s", err.Error()) - } - - for _, v := range resp.ResourceRecordSets { - if *v.Name == fqdn && *v.Type == "TXT" && *v.TTL == 10 { - provider.CleanUp(m["route53Domain"], "foo", "bar") - return - } - } - provider.CleanUp(m["route53Domain"], "foo", "bar") - t.Fatalf("Could not find a TXT record for _acme-challenge.%s with a TTL of 10", m["route53Domain"]) -} - -func testGetAndPreCheck() (map[string]string, error) { - m := map[string]string{ - "route53Key": os.Getenv("AWS_ACCESS_KEY_ID"), - "route53Secret": os.Getenv("AWS_SECRET_ACCESS_KEY"), - "route53Region": os.Getenv("AWS_REGION"), - "route53Domain": os.Getenv("R53_DOMAIN"), - } - for _, v := range m { - if v == "" { - return nil, fmt.Errorf("AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, and R53_DOMAIN are needed to run this test") - } - } - return m, nil -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go deleted file mode 100644 index de4e28f3d..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package route53 - -import ( - "net/http/httptest" - "os" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/stretchr/testify/assert" -) - -var ( - route53Secret string - route53Key string - route53Region string - route53Zone string -) - -func init() { - route53Key = os.Getenv("AWS_ACCESS_KEY_ID") - route53Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") - route53Region = os.Getenv("AWS_REGION") - route53Zone = os.Getenv("AWS_HOSTED_ZONE_ID") -} - -func restoreRoute53Env() { - os.Setenv("AWS_ACCESS_KEY_ID", route53Key) - os.Setenv("AWS_SECRET_ACCESS_KEY", route53Secret) - os.Setenv("AWS_REGION", route53Region) - os.Setenv("AWS_HOSTED_ZONE_ID", route53Zone) -} - -func makeRoute53Provider(ts *httptest.Server) *DNSProvider { - config := &aws.Config{ - Credentials: credentials.NewStaticCredentials("abc", "123", " "), - Endpoint: aws.String(ts.URL), - Region: aws.String("mock-region"), - MaxRetries: aws.Int(1), - } - - client := route53.New(session.New(config)) - return &DNSProvider{client: client} -} - -func TestCredentialsFromEnv(t *testing.T) { - os.Setenv("AWS_ACCESS_KEY_ID", "123") - os.Setenv("AWS_SECRET_ACCESS_KEY", "123") - os.Setenv("AWS_REGION", "us-east-1") - - config := &aws.Config{ - CredentialsChainVerboseErrors: aws.Bool(true), - } - - sess := session.New(config) - _, err := sess.Config.Credentials.Get() - assert.NoError(t, err, "Expected credentials to be set from environment") - - restoreRoute53Env() -} - -func TestRegionFromEnv(t *testing.T) { - os.Setenv("AWS_REGION", "us-east-1") - - sess := session.New(aws.NewConfig()) - assert.Equal(t, "us-east-1", *sess.Config.Region, "Expected Region to be set from environment") - - restoreRoute53Env() -} - -func TestHostedZoneIDFromEnv(t *testing.T) { - const testZoneID = "testzoneid" - - defer restoreRoute53Env() - os.Setenv("AWS_HOSTED_ZONE_ID", testZoneID) - - provider, err := NewDNSProvider() - assert.NoError(t, err, "Expected no error constructing DNSProvider") - - fqdn, err := provider.getHostedZoneID("whatever") - assert.NoError(t, err, "Expected FQDN to be resolved to environment variable value") - - assert.Equal(t, testZoneID, fqdn) -} - -func TestRoute53Present(t *testing.T) { - mockResponses := MockResponseMap{ - "/2013-04-01/hostedzonesbyname": MockResponse{StatusCode: 200, Body: ListHostedZonesByNameResponse}, - "/2013-04-01/hostedzone/ABCDEFG/rrset/": MockResponse{StatusCode: 200, Body: ChangeResourceRecordSetsResponse}, - "/2013-04-01/change/123456": MockResponse{StatusCode: 200, Body: GetChangeResponse}, - } - - ts := newMockServer(t, mockResponses) - defer ts.Close() - - provider := makeRoute53Provider(ts) - - domain := "example.com" - keyAuth := "123456d==" - - err := provider.Present(domain, "", keyAuth) - assert.NoError(t, err, "Expected Present to return no error") -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go deleted file mode 100644 index e448a6858..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package route53 - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// MockResponse represents a predefined response used by a mock server -type MockResponse struct { - StatusCode int - Body string -} - -// MockResponseMap maps request paths to responses -type MockResponseMap map[string]MockResponse - -func newMockServer(t *testing.T, responses MockResponseMap) *httptest.Server { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - path := r.URL.Path - resp, ok := responses[path] - if !ok { - msg := fmt.Sprintf("Requested path not found in response map: %s", path) - require.FailNow(t, msg) - } - - w.Header().Set("Content-Type", "application/xml") - w.WriteHeader(resp.StatusCode) - w.Write([]byte(resp.Body)) - })) - - time.Sleep(100 * time.Millisecond) - return ts -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go deleted file mode 100644 index bc2067579..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go +++ /dev/null @@ -1,127 +0,0 @@ -// Package vultr implements a DNS provider for solving the DNS-01 challenge using -// the vultr DNS. -// See https://www.vultr.com/api/#dns -package vultr - -import ( - "fmt" - "os" - "strings" - - vultr "github.com/JamesClonk/vultr/lib" - "github.com/xenolf/lego/acme" -) - -// DNSProvider is an implementation of the acme.ChallengeProvider interface. -type DNSProvider struct { - client *vultr.Client -} - -// NewDNSProvider returns a DNSProvider instance with a configured Vultr client. -// Authentication uses the VULTR_API_KEY environment variable. -func NewDNSProvider() (*DNSProvider, error) { - apiKey := os.Getenv("VULTR_API_KEY") - return NewDNSProviderCredentials(apiKey) -} - -// NewDNSProviderCredentials uses the supplied credentials to return a DNSProvider -// instance configured for Vultr. -func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) { - if apiKey == "" { - return nil, fmt.Errorf("Vultr credentials missing") - } - - c := &DNSProvider{ - client: vultr.NewClient(apiKey, nil), - } - - return c, nil -} - -// Present creates a TXT record to fulfil the DNS-01 challenge. -func (c *DNSProvider) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) - - zoneDomain, err := c.getHostedZone(domain) - if err != nil { - return err - } - - name := c.extractRecordName(fqdn, zoneDomain) - - err = c.client.CreateDNSRecord(zoneDomain, name, "TXT", `"`+value+`"`, 0, ttl) - if err != nil { - return fmt.Errorf("Vultr API call failed: %v", err) - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := acme.DNS01Record(domain, keyAuth) - - zoneDomain, records, err := c.findTxtRecords(domain, fqdn) - if err != nil { - return err - } - - for _, rec := range records { - err := c.client.DeleteDNSRecord(zoneDomain, rec.RecordID) - if err != nil { - return err - } - } - return nil -} - -func (c *DNSProvider) getHostedZone(domain string) (string, error) { - domains, err := c.client.GetDNSDomains() - if err != nil { - return "", fmt.Errorf("Vultr API call failed: %v", err) - } - - var hostedDomain vultr.DNSDomain - for _, d := range domains { - if strings.HasSuffix(domain, d.Domain) { - if len(d.Domain) > len(hostedDomain.Domain) { - hostedDomain = d - } - } - } - if hostedDomain.Domain == "" { - return "", fmt.Errorf("No matching Vultr domain found for domain %s", domain) - } - - return hostedDomain.Domain, nil -} - -func (c *DNSProvider) findTxtRecords(domain, fqdn string) (string, []vultr.DNSRecord, error) { - zoneDomain, err := c.getHostedZone(domain) - if err != nil { - return "", nil, err - } - - var records []vultr.DNSRecord - result, err := c.client.GetDNSRecords(zoneDomain) - if err != nil { - return "", records, fmt.Errorf("Vultr API call has failed: %v", err) - } - - recordName := c.extractRecordName(fqdn, zoneDomain) - for _, record := range result { - if record.Type == "TXT" && record.Name == recordName { - records = append(records, record) - } - } - - return zoneDomain, records, nil -} - -func (c *DNSProvider) extractRecordName(fqdn, domain string) string { - name := acme.UnFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go deleted file mode 100644 index 7c8cdaf1e..000000000 --- a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package vultr - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - liveTest bool - apiKey string - domain string -) - -func init() { - apiKey = os.Getenv("VULTR_API_KEY") - domain = os.Getenv("VULTR_TEST_DOMAIN") - liveTest = len(apiKey) > 0 && len(domain) > 0 -} - -func restoreEnv() { - os.Setenv("VULTR_API_KEY", apiKey) -} - -func TestNewDNSProviderValidEnv(t *testing.T) { - os.Setenv("VULTR_API_KEY", "123") - defer restoreEnv() - _, err := NewDNSProvider() - assert.NoError(t, err) -} - -func TestNewDNSProviderMissingCredErr(t *testing.T) { - os.Setenv("VULTR_API_KEY", "") - defer restoreEnv() - _, err := NewDNSProvider() - assert.EqualError(t, err, "Vultr credentials missing") -} - -func TestLivePresent(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.Present(domain, "", "123d==") - assert.NoError(t, err) -} - -func TestLiveCleanUp(t *testing.T) { - if !liveTest { - t.Skip("skipping live test") - } - - time.Sleep(time.Second * 1) - - provider, err := NewDNSProvider() - assert.NoError(t, err) - - err = provider.CleanUp(domain, "", "123d==") - assert.NoError(t, err) -} diff --git a/vendor/github.com/xenolf/lego/providers/http/memcached/README.md b/vendor/github.com/xenolf/lego/providers/http/memcached/README.md deleted file mode 100644 index f14d216df..000000000 --- a/vendor/github.com/xenolf/lego/providers/http/memcached/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Memcached http provider - -Publishes challenges into memcached where they can be retrieved by nginx. Allows -specifying multiple memcached servers and the responses will be published to all -of them, making it easier to verify when your domain is hosted on a cluster of -servers. - -Example nginx config: - -``` - location /.well-known/acme-challenge/ { - set $memcached_key "$uri"; - memcached_pass 127.0.0.1:11211; - } -``` diff --git a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go b/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go deleted file mode 100644 index 9ac8b811d..000000000 --- a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package memcached implements a HTTP provider for solving the HTTP-01 challenge using memcached -// in combination with a webserver. -package memcached - -import ( - "fmt" - "path" - - "github.com/rainycape/memcache" - "github.com/xenolf/lego/acme" -) - -// HTTPProvider implements ChallengeProvider for `http-01` challenge -type MemcachedProvider struct { - hosts []string -} - -// NewHTTPProvider returns a HTTPProvider instance with a configured webroot path -func NewMemcachedProvider(hosts []string) (*MemcachedProvider, error) { - if len(hosts) == 0 { - return nil, fmt.Errorf("No memcached hosts provided") - } - - c := &MemcachedProvider{ - hosts: hosts, - } - - return c, nil -} - -// Present makes the token available at `HTTP01ChallengePath(token)` by creating a file in the given webroot path -func (w *MemcachedProvider) Present(domain, token, keyAuth string) error { - var errs []error - - challengePath := path.Join("/", acme.HTTP01ChallengePath(token)) - for _, host := range w.hosts { - mc, err := memcache.New(host) - if err != nil { - errs = append(errs, err) - continue - } - mc.Add(&memcache.Item{ - Key: challengePath, - Value: []byte(keyAuth), - Expiration: 60, - }) - } - - if len(errs) == len(w.hosts) { - return fmt.Errorf("Unable to store key in any of the memcache hosts -> %v", errs) - } - - return nil -} - -// CleanUp removes the file created for the challenge -func (w *MemcachedProvider) CleanUp(domain, token, keyAuth string) error { - // Memcached will clean up itself, that's what expiration is for. - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached_test.go b/vendor/github.com/xenolf/lego/providers/http/memcached/memcached_test.go deleted file mode 100644 index 287a33304..000000000 --- a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package memcached - -import ( - "os" - "path" - "strings" - "testing" - - "github.com/rainycape/memcache" - "github.com/stretchr/testify/assert" - "github.com/xenolf/lego/acme" -) - -var ( - memcachedHosts []string -) - -const ( - domain = "lego.test" - token = "foo" - keyAuth = "bar" -) - -func init() { - memcachedHostsStr := os.Getenv("MEMCACHED_HOSTS") - if len(memcachedHostsStr) > 0 { - memcachedHosts = strings.Split(memcachedHostsStr, ",") - } -} - -func TestNewMemcachedProviderEmpty(t *testing.T) { - emptyHosts := make([]string, 0) - _, err := NewMemcachedProvider(emptyHosts) - assert.EqualError(t, err, "No memcached hosts provided") -} - -func TestNewMemcachedProviderValid(t *testing.T) { - if len(memcachedHosts) == 0 { - t.Skip("Skipping memcached tests") - } - _, err := NewMemcachedProvider(memcachedHosts) - assert.NoError(t, err) -} - -func TestMemcachedPresentSingleHost(t *testing.T) { - if len(memcachedHosts) == 0 { - t.Skip("Skipping memcached tests") - } - p, err := NewMemcachedProvider(memcachedHosts[0:1]) - assert.NoError(t, err) - - challengePath := path.Join("/", acme.HTTP01ChallengePath(token)) - - err = p.Present(domain, token, keyAuth) - assert.NoError(t, err) - mc, err := memcache.New(memcachedHosts[0]) - assert.NoError(t, err) - i, err := mc.Get(challengePath) - assert.NoError(t, err) - assert.Equal(t, i.Value, []byte(keyAuth)) -} - -func TestMemcachedPresentMultiHost(t *testing.T) { - if len(memcachedHosts) <= 1 { - t.Skip("Skipping memcached multi-host tests") - } - p, err := NewMemcachedProvider(memcachedHosts) - assert.NoError(t, err) - - challengePath := path.Join("/", acme.HTTP01ChallengePath(token)) - - err = p.Present(domain, token, keyAuth) - assert.NoError(t, err) - for _, host := range memcachedHosts { - mc, err := memcache.New(host) - assert.NoError(t, err) - i, err := mc.Get(challengePath) - assert.NoError(t, err) - assert.Equal(t, i.Value, []byte(keyAuth)) - } -} - -func TestMemcachedPresentPartialFailureMultiHost(t *testing.T) { - if len(memcachedHosts) == 0 { - t.Skip("Skipping memcached tests") - } - hosts := append(memcachedHosts, "5.5.5.5:11211") - p, err := NewMemcachedProvider(hosts) - assert.NoError(t, err) - - challengePath := path.Join("/", acme.HTTP01ChallengePath(token)) - - err = p.Present(domain, token, keyAuth) - assert.NoError(t, err) - for _, host := range memcachedHosts { - mc, err := memcache.New(host) - assert.NoError(t, err) - i, err := mc.Get(challengePath) - assert.NoError(t, err) - assert.Equal(t, i.Value, []byte(keyAuth)) - } -} - -func TestMemcachedCleanup(t *testing.T) { - if len(memcachedHosts) == 0 { - t.Skip("Skipping memcached tests") - } - p, err := NewMemcachedProvider(memcachedHosts) - assert.NoError(t, err) - assert.NoError(t, p.CleanUp(domain, token, keyAuth)) -} diff --git a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go deleted file mode 100644 index 4bf211f39..000000000 --- a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package webroot implements a HTTP provider for solving the HTTP-01 challenge using web server's root path. -package webroot - -import ( - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/xenolf/lego/acme" -) - -// HTTPProvider implements ChallengeProvider for `http-01` challenge -type HTTPProvider struct { - path string -} - -// NewHTTPProvider returns a HTTPProvider instance with a configured webroot path -func NewHTTPProvider(path string) (*HTTPProvider, error) { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, fmt.Errorf("Webroot path does not exist") - } - - c := &HTTPProvider{ - path: path, - } - - return c, nil -} - -// Present makes the token available at `HTTP01ChallengePath(token)` by creating a file in the given webroot path -func (w *HTTPProvider) Present(domain, token, keyAuth string) error { - var err error - - challengeFilePath := path.Join(w.path, acme.HTTP01ChallengePath(token)) - err = os.MkdirAll(path.Dir(challengeFilePath), 0755) - if err != nil { - return fmt.Errorf("Could not create required directories in webroot for HTTP challenge -> %v", err) - } - - err = ioutil.WriteFile(challengeFilePath, []byte(keyAuth), 0644) - if err != nil { - return fmt.Errorf("Could not write file in webroot for HTTP challenge -> %v", err) - } - - return nil -} - -// CleanUp removes the file created for the challenge -func (w *HTTPProvider) CleanUp(domain, token, keyAuth string) error { - var err error - err = os.Remove(path.Join(w.path, acme.HTTP01ChallengePath(token))) - if err != nil { - return fmt.Errorf("Could not remove file in webroot after HTTP challenge -> %v", err) - } - - return nil -} diff --git a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go deleted file mode 100644 index 99c930ed3..000000000 --- a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package webroot - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestHTTPProvider(t *testing.T) { - webroot := "webroot" - domain := "domain" - token := "token" - keyAuth := "keyAuth" - challengeFilePath := webroot + "/.well-known/acme-challenge/" + token - - os.MkdirAll(webroot+"/.well-known/acme-challenge", 0777) - defer os.RemoveAll(webroot) - - provider, err := NewHTTPProvider(webroot) - if err != nil { - t.Errorf("Webroot provider error: got %v, want nil", err) - } - - err = provider.Present(domain, token, keyAuth) - if err != nil { - t.Errorf("Webroot provider present() error: got %v, want nil", err) - } - - if _, err := os.Stat(challengeFilePath); os.IsNotExist(err) { - t.Error("Challenge file was not created in webroot") - } - - data, err := ioutil.ReadFile(challengeFilePath) - if err != nil { - t.Errorf("Webroot provider ReadFile() error: got %v, want nil", err) - } - dataStr := string(data) - if dataStr != keyAuth { - t.Errorf("Challenge file content: got %q, want %q", dataStr, keyAuth) - } - - err = provider.CleanUp(domain, token, keyAuth) - if err != nil { - t.Errorf("Webroot provider CleanUp() error: got %v, want nil", err) - } -} -- cgit v1.2.3-1-g7c22