summaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2016-10-03 16:03:15 -0400
committerGitHub <noreply@github.com>2016-10-03 16:03:15 -0400
commit8f91c777559748fa6e857d9fc1f4ae079a532813 (patch)
tree190f7cef373764a0d47a91045fdb486ee3d6781d /vendor
parent5f8e5c401bd96cba9a98b2db02d72f9cbacb0103 (diff)
downloadchat-8f91c777559748fa6e857d9fc1f4ae079a532813.tar.gz
chat-8f91c777559748fa6e857d9fc1f4ae079a532813.tar.bz2
chat-8f91c777559748fa6e857d9fc1f4ae079a532813.zip
Adding ability to serve TLS directly from Mattermost server (#4119)
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/miekg/dns/.gitignore4
-rw-r--r--vendor/github.com/miekg/dns/.travis.yml7
-rw-r--r--vendor/github.com/miekg/dns/AUTHORS1
-rw-r--r--vendor/github.com/miekg/dns/CONTRIBUTORS9
-rw-r--r--vendor/github.com/miekg/dns/COPYRIGHT9
-rw-r--r--vendor/github.com/miekg/dns/LICENSE32
-rw-r--r--vendor/github.com/miekg/dns/README.md151
-rw-r--r--vendor/github.com/miekg/dns/client.go455
-rw-r--r--vendor/github.com/miekg/dns/client_test.go452
-rw-r--r--vendor/github.com/miekg/dns/clientconfig.go99
-rw-r--r--vendor/github.com/miekg/dns/clientconfig_test.go50
-rw-r--r--vendor/github.com/miekg/dns/defaults.go282
-rw-r--r--vendor/github.com/miekg/dns/dns.go104
-rw-r--r--vendor/github.com/miekg/dns/dns_bench_test.go211
-rw-r--r--vendor/github.com/miekg/dns/dns_test.go433
-rw-r--r--vendor/github.com/miekg/dns/dnssec.go721
-rw-r--r--vendor/github.com/miekg/dns/dnssec_keygen.go156
-rw-r--r--vendor/github.com/miekg/dns/dnssec_keyscan.go249
-rw-r--r--vendor/github.com/miekg/dns/dnssec_privkey.go85
-rw-r--r--vendor/github.com/miekg/dns/dnssec_test.go733
-rw-r--r--vendor/github.com/miekg/dns/dnsutil/util.go79
-rw-r--r--vendor/github.com/miekg/dns/dnsutil/util_test.go130
-rw-r--r--vendor/github.com/miekg/dns/doc.go251
-rw-r--r--vendor/github.com/miekg/dns/dyn_test.go3
-rw-r--r--vendor/github.com/miekg/dns/edns.go532
-rw-r--r--vendor/github.com/miekg/dns/edns_test.go32
-rw-r--r--vendor/github.com/miekg/dns/example_test.go146
-rw-r--r--vendor/github.com/miekg/dns/format.go87
-rw-r--r--vendor/github.com/miekg/dns/fuzz_test.go25
-rw-r--r--vendor/github.com/miekg/dns/generate.go159
-rw-r--r--vendor/github.com/miekg/dns/idn/code_points.go2346
-rw-r--r--vendor/github.com/miekg/dns/idn/example_test.go18
-rw-r--r--vendor/github.com/miekg/dns/idn/punycode.go373
-rw-r--r--vendor/github.com/miekg/dns/idn/punycode_test.go116
-rw-r--r--vendor/github.com/miekg/dns/issue_test.go23
-rw-r--r--vendor/github.com/miekg/dns/labels.go168
-rw-r--r--vendor/github.com/miekg/dns/labels_test.go200
-rw-r--r--vendor/github.com/miekg/dns/msg.go1231
-rw-r--r--vendor/github.com/miekg/dns/msg_generate.go340
-rw-r--r--vendor/github.com/miekg/dns/msg_helpers.go630
-rw-r--r--vendor/github.com/miekg/dns/nsecx.go119
-rw-r--r--vendor/github.com/miekg/dns/nsecx_test.go29
-rw-r--r--vendor/github.com/miekg/dns/parse_test.go1493
-rw-r--r--vendor/github.com/miekg/dns/privaterr.go149
-rw-r--r--vendor/github.com/miekg/dns/privaterr_test.go171
-rw-r--r--vendor/github.com/miekg/dns/rawmsg.go49
-rw-r--r--vendor/github.com/miekg/dns/remote_test.go19
-rw-r--r--vendor/github.com/miekg/dns/reverse.go38
-rw-r--r--vendor/github.com/miekg/dns/sanitize.go84
-rw-r--r--vendor/github.com/miekg/dns/sanitize_test.go84
-rw-r--r--vendor/github.com/miekg/dns/scan.go974
-rw-r--r--vendor/github.com/miekg/dns/scan_rr.go2143
-rw-r--r--vendor/github.com/miekg/dns/scanner.go43
-rw-r--r--vendor/github.com/miekg/dns/server.go732
-rw-r--r--vendor/github.com/miekg/dns/server_test.go679
-rw-r--r--vendor/github.com/miekg/dns/sig0.go219
-rw-r--r--vendor/github.com/miekg/dns/sig0_test.go89
-rw-r--r--vendor/github.com/miekg/dns/singleinflight.go57
-rw-r--r--vendor/github.com/miekg/dns/tlsa.go86
-rw-r--r--vendor/github.com/miekg/dns/tsig.go384
-rw-r--r--vendor/github.com/miekg/dns/tsig_test.go37
-rw-r--r--vendor/github.com/miekg/dns/types.go1249
-rw-r--r--vendor/github.com/miekg/dns/types_generate.go271
-rw-r--r--vendor/github.com/miekg/dns/types_test.go42
-rw-r--r--vendor/github.com/miekg/dns/udp.go58
-rw-r--r--vendor/github.com/miekg/dns/udp_linux.go73
-rw-r--r--vendor/github.com/miekg/dns/udp_other.go17
-rw-r--r--vendor/github.com/miekg/dns/udp_plan9.go34
-rw-r--r--vendor/github.com/miekg/dns/udp_windows.go34
-rw-r--r--vendor/github.com/miekg/dns/update.go106
-rw-r--r--vendor/github.com/miekg/dns/update_test.go145
-rw-r--r--vendor/github.com/miekg/dns/xfr.go244
-rw-r--r--vendor/github.com/miekg/dns/xfr_test.go161
-rw-r--r--vendor/github.com/miekg/dns/zmsg.go3464
-rw-r--r--vendor/github.com/miekg/dns/ztypes.go828
-rw-r--r--vendor/github.com/rsc/letsencrypt/LICENSE27
-rw-r--r--vendor/github.com/rsc/letsencrypt/README152
-rw-r--r--vendor/github.com/rsc/letsencrypt/lets.go757
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE21
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go16
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go638
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go198
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go323
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go93
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go73
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go117
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go41
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go79
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go57
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go100
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go107
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go115
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go28
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go73
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go62
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go65
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go29
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go26
-rw-r--r--vendor/github.com/rsc/letsencrypt/vendor/vendor.json31
-rw-r--r--vendor/github.com/tylerb/graceful/.gitignore23
-rw-r--r--vendor/github.com/tylerb/graceful/.travis.yml13
-rw-r--r--vendor/github.com/tylerb/graceful/LICENSE21
-rw-r--r--vendor/github.com/tylerb/graceful/README.md152
-rw-r--r--vendor/github.com/tylerb/graceful/graceful.go487
-rw-r--r--vendor/github.com/tylerb/graceful/graceful_test.go692
-rw-r--r--vendor/github.com/tylerb/graceful/http2_test.go125
-rw-r--r--vendor/github.com/tylerb/graceful/keepalive_listener.go32
-rw-r--r--vendor/github.com/tylerb/graceful/limit_listen.go77
-rw-r--r--vendor/github.com/tylerb/graceful/test-fixtures/cert.crt43
-rw-r--r--vendor/github.com/tylerb/graceful/test-fixtures/key.pem27
-rw-r--r--vendor/github.com/tylerb/graceful/tests/main.go40
-rw-r--r--vendor/github.com/xenolf/lego/.gitcookies.encbin0 -> 480 bytes
-rw-r--r--vendor/github.com/xenolf/lego/.gitignore4
-rw-r--r--vendor/github.com/xenolf/lego/.travis.yml12
-rw-r--r--vendor/github.com/xenolf/lego/CHANGELOG.md94
-rw-r--r--vendor/github.com/xenolf/lego/CONTRIBUTING.md32
-rw-r--r--vendor/github.com/xenolf/lego/Dockerfile14
-rw-r--r--vendor/github.com/xenolf/lego/LICENSE21
-rw-r--r--vendor/github.com/xenolf/lego/README.md257
-rw-r--r--vendor/github.com/xenolf/lego/account.go109
-rw-r--r--vendor/github.com/xenolf/lego/acme/challenges.go16
-rw-r--r--vendor/github.com/xenolf/lego/acme/client.go804
-rw-r--r--vendor/github.com/xenolf/lego/acme/client_test.go198
-rw-r--r--vendor/github.com/xenolf/lego/acme/crypto.go332
-rw-r--r--vendor/github.com/xenolf/lego/acme/crypto_test.go93
-rw-r--r--vendor/github.com/xenolf/lego/acme/dns_challenge.go282
-rw-r--r--vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go53
-rw-r--r--vendor/github.com/xenolf/lego/acme/dns_challenge_test.go185
-rw-r--r--vendor/github.com/xenolf/lego/acme/error.go86
-rw-r--r--vendor/github.com/xenolf/lego/acme/http.go117
-rw-r--r--vendor/github.com/xenolf/lego/acme/http_challenge.go41
-rw-r--r--vendor/github.com/xenolf/lego/acme/http_challenge_server.go79
-rw-r--r--vendor/github.com/xenolf/lego/acme/http_challenge_test.go57
-rw-r--r--vendor/github.com/xenolf/lego/acme/http_test.go100
-rw-r--r--vendor/github.com/xenolf/lego/acme/jws.go115
-rw-r--r--vendor/github.com/xenolf/lego/acme/messages.go117
-rw-r--r--vendor/github.com/xenolf/lego/acme/pop_challenge.go1
-rw-r--r--vendor/github.com/xenolf/lego/acme/provider.go28
-rw-r--r--vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go67
-rw-r--r--vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go62
-rw-r--r--vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go65
-rw-r--r--vendor/github.com/xenolf/lego/acme/utils.go29
-rw-r--r--vendor/github.com/xenolf/lego/acme/utils_test.go26
-rw-r--r--vendor/github.com/xenolf/lego/cli.go214
-rw-r--r--vendor/github.com/xenolf/lego/cli_handlers.go444
-rw-r--r--vendor/github.com/xenolf/lego/configuration.go76
-rw-r--r--vendor/github.com/xenolf/lego/crypto.go56
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go223
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go80
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go166
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go117
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go141
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go79
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go248
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go37
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go274
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go53
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go472
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go939
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go158
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go85
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/linode/linode.go131
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go317
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go416
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go402
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go159
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go103
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/pdns/README.md7
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go343
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go80
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go129
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go244
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go39
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/route53/route53.go171
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go70
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go87
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go38
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go127
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go65
-rw-r--r--vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go58
-rw-r--r--vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go46
-rw-r--r--vendor/golang.org/x/net/.gitattributes10
-rw-r--r--vendor/golang.org/x/net/.gitignore2
-rw-r--r--vendor/golang.org/x/net/AUTHORS3
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTING.md31
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/net/LICENSE27
-rw-r--r--vendor/golang.org/x/net/PATENTS22
-rw-r--r--vendor/golang.org/x/net/README3
-rw-r--r--vendor/golang.org/x/net/bpf/asm.go41
-rw-r--r--vendor/golang.org/x/net/bpf/constants.go215
-rw-r--r--vendor/golang.org/x/net/bpf/doc.go82
-rw-r--r--vendor/golang.org/x/net/bpf/instructions.go434
-rw-r--r--vendor/golang.org/x/net/bpf/instructions_test.go184
-rw-r--r--vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf1
-rw-r--r--vendor/golang.org/x/net/bpf/testdata/all_instructions.txt79
-rw-r--r--vendor/golang.org/x/net/bpf/vm.go140
-rw-r--r--vendor/golang.org/x/net/bpf/vm_aluop_test.go512
-rw-r--r--vendor/golang.org/x/net/bpf/vm_bpf_test.go192
-rw-r--r--vendor/golang.org/x/net/bpf/vm_extension_test.go49
-rw-r--r--vendor/golang.org/x/net/bpf/vm_instructions.go174
-rw-r--r--vendor/golang.org/x/net/bpf/vm_jump_test.go380
-rw-r--r--vendor/golang.org/x/net/bpf/vm_load_test.go246
-rw-r--r--vendor/golang.org/x/net/bpf/vm_ret_test.go115
-rw-r--r--vendor/golang.org/x/net/bpf/vm_scratch_test.go247
-rw-r--r--vendor/golang.org/x/net/bpf/vm_test.go144
-rw-r--r--vendor/golang.org/x/net/codereview.cfg1
-rw-r--r--vendor/golang.org/x/net/context/context.go156
-rw-r--r--vendor/golang.org/x/net/context/context_test.go577
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go74
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go28
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go147
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go79
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go105
-rw-r--r--vendor/golang.org/x/net/context/go17.go72
-rw-r--r--vendor/golang.org/x/net/context/pre_go17.go300
-rw-r--r--vendor/golang.org/x/net/context/withtimeout_test.go26
-rw-r--r--vendor/golang.org/x/net/dict/dict.go210
-rw-r--r--vendor/golang.org/x/net/html/atom/atom.go78
-rw-r--r--vendor/golang.org/x/net/html/atom/atom_test.go109
-rw-r--r--vendor/golang.org/x/net/html/atom/gen.go648
-rw-r--r--vendor/golang.org/x/net/html/atom/table.go713
-rw-r--r--vendor/golang.org/x/net/html/atom/table_test.go351
-rw-r--r--vendor/golang.org/x/net/html/charset/charset.go257
-rw-r--r--vendor/golang.org/x/net/html/charset/charset_test.go237
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html48
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html48
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html49
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html49
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html47
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/README9
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.htmlbin0 -> 2670 bytes
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.htmlbin0 -> 2682 bytes
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html49
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html48
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html48
-rw-r--r--vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html48
-rw-r--r--vendor/golang.org/x/net/html/const.go102
-rw-r--r--vendor/golang.org/x/net/html/doc.go106
-rw-r--r--vendor/golang.org/x/net/html/doctype.go156
-rw-r--r--vendor/golang.org/x/net/html/entity.go2253
-rw-r--r--vendor/golang.org/x/net/html/entity_test.go29
-rw-r--r--vendor/golang.org/x/net/html/escape.go258
-rw-r--r--vendor/golang.org/x/net/html/escape_test.go97
-rw-r--r--vendor/golang.org/x/net/html/example_test.go40
-rw-r--r--vendor/golang.org/x/net/html/foreign.go226
-rw-r--r--vendor/golang.org/x/net/html/node.go193
-rw-r--r--vendor/golang.org/x/net/html/node_test.go146
-rw-r--r--vendor/golang.org/x/net/html/parse.go2094
-rw-r--r--vendor/golang.org/x/net/html/parse_test.go388
-rw-r--r--vendor/golang.org/x/net/html/render.go271
-rw-r--r--vendor/golang.org/x/net/html/render_test.go156
-rw-r--r--vendor/golang.org/x/net/html/testdata/go1.html2237
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/README28
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat194
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat31
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/comments01.dat135
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat370
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/entities01.dat603
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/entities02.dat249
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat246
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat43
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/isindex.dat40
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.datbin0 -> 115 bytes
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat52
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.datbin0 -> 4166 bytes
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat308
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat15
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat28
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tables01.dat212
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests1.dat1952
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests10.dat799
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests11.dat482
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests12.dat62
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests14.dat74
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests15.dat208
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests16.dat2299
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests17.dat153
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests18.dat269
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests19.dat1237
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests2.dat763
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests20.dat455
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests21.dat221
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests22.dat157
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests23.dat155
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests24.dat79
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests25.dat219
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests26.dat313
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests3.dat305
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests4.dat59
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests5.dat191
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests6.dat663
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests7.dat390
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests8.dat148
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests9.dat457
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat741
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat261
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat610
-rw-r--r--vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat159
-rw-r--r--vendor/golang.org/x/net/html/token.go1219
-rw-r--r--vendor/golang.org/x/net/html/token_test.go748
-rw-r--r--vendor/golang.org/x/net/http2/.gitignore2
-rw-r--r--vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/golang.org/x/net/http2/README20
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go256
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go80
-rw-r--r--vendor/golang.org/x/net/http2/errors.go130
-rw-r--r--vendor/golang.org/x/net/http2/errors_test.go24
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer.go60
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer_test.go128
-rw-r--r--vendor/golang.org/x/net/http2/flow.go50
-rw-r--r--vendor/golang.org/x/net/http2/flow_test.go53
-rw-r--r--vendor/golang.org/x/net/http2/frame.go1539
-rw-r--r--vendor/golang.org/x/net/http2/frame_test.go1102
-rw-r--r--vendor/golang.org/x/net/http2/go16.go43
-rw-r--r--vendor/golang.org/x/net/http2/go17.go94
-rw-r--r--vendor/golang.org/x/net/http2/go17_not18.go36
-rw-r--r--vendor/golang.org/x/net/http2/go18.go11
-rw-r--r--vendor/golang.org/x/net/http2/gotrack.go170
-rw-r--r--vendor/golang.org/x/net/http2/gotrack_test.go33
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/.gitignore5
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/Makefile8
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/README16
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/h2demo.go504
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/launch.go302
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.pem26
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.srl1
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.crt20
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2i/README.md97
-rw-r--r--vendor/golang.org/x/net/http2/h2i/h2i.go501
-rw-r--r--vendor/golang.org/x/net/http2/headermap.go78
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go251
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode_test.go330
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go542
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack_test.go854
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go212
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables.go352
-rw-r--r--vendor/golang.org/x/net/http2/http2.go365
-rw-r--r--vendor/golang.org/x/net/http2/http2_test.go198
-rw-r--r--vendor/golang.org/x/net/http2/not_go16.go46
-rw-r--r--vendor/golang.org/x/net/http2/not_go17.go77
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go153
-rw-r--r--vendor/golang.org/x/net/http2/pipe_test.go109
-rw-r--r--vendor/golang.org/x/net/http2/priority_test.go118
-rw-r--r--vendor/golang.org/x/net/http2/server.go2292
-rw-r--r--vendor/golang.org/x/net/http2/server_test.go3368
-rw-r--r--vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml5021
-rw-r--r--vendor/golang.org/x/net/http2/transport.go2003
-rw-r--r--vendor/golang.org/x/net/http2/transport_test.go2620
-rw-r--r--vendor/golang.org/x/net/http2/write.go264
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go283
-rw-r--r--vendor/golang.org/x/net/http2/z_spec_test.go356
-rw-r--r--vendor/golang.org/x/net/icmp/dstunreach.go41
-rw-r--r--vendor/golang.org/x/net/icmp/echo.go45
-rw-r--r--vendor/golang.org/x/net/icmp/endpoint.go113
-rw-r--r--vendor/golang.org/x/net/icmp/example_test.go63
-rw-r--r--vendor/golang.org/x/net/icmp/extension.go89
-rw-r--r--vendor/golang.org/x/net/icmp/extension_test.go259
-rw-r--r--vendor/golang.org/x/net/icmp/helper.go27
-rw-r--r--vendor/golang.org/x/net/icmp/helper_posix.go75
-rw-r--r--vendor/golang.org/x/net/icmp/interface.go236
-rw-r--r--vendor/golang.org/x/net/icmp/ipv4.go56
-rw-r--r--vendor/golang.org/x/net/icmp/ipv4_test.go82
-rw-r--r--vendor/golang.org/x/net/icmp/ipv6.go23
-rw-r--r--vendor/golang.org/x/net/icmp/listen_posix.go100
-rw-r--r--vendor/golang.org/x/net/icmp/listen_stub.go33
-rw-r--r--vendor/golang.org/x/net/icmp/message.go150
-rw-r--r--vendor/golang.org/x/net/icmp/message_test.go134
-rw-r--r--vendor/golang.org/x/net/icmp/messagebody.go41
-rw-r--r--vendor/golang.org/x/net/icmp/mpls.go77
-rw-r--r--vendor/golang.org/x/net/icmp/multipart.go109
-rw-r--r--vendor/golang.org/x/net/icmp/multipart_test.go442
-rw-r--r--vendor/golang.org/x/net/icmp/packettoobig.go43
-rw-r--r--vendor/golang.org/x/net/icmp/paramprob.go63
-rw-r--r--vendor/golang.org/x/net/icmp/ping_test.go200
-rw-r--r--vendor/golang.org/x/net/icmp/sys_freebsd.go11
-rw-r--r--vendor/golang.org/x/net/icmp/timeexceeded.go39
-rw-r--r--vendor/golang.org/x/net/idna/idna.go68
-rw-r--r--vendor/golang.org/x/net/idna/idna_test.go43
-rw-r--r--vendor/golang.org/x/net/idna/punycode.go200
-rw-r--r--vendor/golang.org/x/net/idna/punycode_test.go198
-rw-r--r--vendor/golang.org/x/net/internal/iana/const.go180
-rw-r--r--vendor/golang.org/x/net/internal/iana/gen.go293
-rw-r--r--vendor/golang.org/x/net/internal/netreflect/socket.go37
-rw-r--r--vendor/golang.org/x/net/internal/netreflect/socket_posix.go30
-rw-r--r--vendor/golang.org/x/net/internal/netreflect/socket_stub.go11
-rw-r--r--vendor/golang.org/x/net/internal/netreflect/socket_test.go123
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_bsd.go48
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_nobsd.go11
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_posix.go31
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_stub.go28
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_unix.go29
-rw-r--r--vendor/golang.org/x/net/internal/nettest/helper_windows.go38
-rw-r--r--vendor/golang.org/x/net/internal/nettest/interface.go94
-rw-r--r--vendor/golang.org/x/net/internal/nettest/rlimit.go11
-rw-r--r--vendor/golang.org/x/net/internal/nettest/stack.go49
-rw-r--r--vendor/golang.org/x/net/internal/timeseries/timeseries.go525
-rw-r--r--vendor/golang.org/x/net/internal/timeseries/timeseries_test.go170
-rw-r--r--vendor/golang.org/x/net/ipv4/bpf_test.go93
-rw-r--r--vendor/golang.org/x/net/ipv4/bpfopt_linux.go28
-rw-r--r--vendor/golang.org/x/net/ipv4/bpfopt_stub.go16
-rw-r--r--vendor/golang.org/x/net/ipv4/control.go70
-rw-r--r--vendor/golang.org/x/net/ipv4/control_bsd.go40
-rw-r--r--vendor/golang.org/x/net/ipv4/control_pktinfo.go37
-rw-r--r--vendor/golang.org/x/net/ipv4/control_stub.go23
-rw-r--r--vendor/golang.org/x/net/ipv4/control_unix.go164
-rw-r--r--vendor/golang.org/x/net/ipv4/control_windows.go27
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_darwin.go77
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_dragonfly.go38
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_freebsd.go75
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_linux.go120
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_netbsd.go37
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_openbsd.go37
-rw-r--r--vendor/golang.org/x/net/ipv4/defs_solaris.go57
-rw-r--r--vendor/golang.org/x/net/ipv4/dgramopt_posix.go253
-rw-r--r--vendor/golang.org/x/net/ipv4/dgramopt_stub.go106
-rw-r--r--vendor/golang.org/x/net/ipv4/doc.go242
-rw-r--r--vendor/golang.org/x/net/ipv4/endpoint.go189
-rw-r--r--vendor/golang.org/x/net/ipv4/example_test.go224
-rw-r--r--vendor/golang.org/x/net/ipv4/gen.go208
-rw-r--r--vendor/golang.org/x/net/ipv4/genericopt_posix.go63
-rw-r--r--vendor/golang.org/x/net/ipv4/genericopt_stub.go29
-rw-r--r--vendor/golang.org/x/net/ipv4/header.go132
-rw-r--r--vendor/golang.org/x/net/ipv4/header_test.go138
-rw-r--r--vendor/golang.org/x/net/ipv4/helper.go59
-rw-r--r--vendor/golang.org/x/net/ipv4/iana.go34
-rw-r--r--vendor/golang.org/x/net/ipv4/icmp.go57
-rw-r--r--vendor/golang.org/x/net/ipv4/icmp_linux.go25
-rw-r--r--vendor/golang.org/x/net/ipv4/icmp_stub.go25
-rw-r--r--vendor/golang.org/x/net/ipv4/icmp_test.go95
-rw-r--r--vendor/golang.org/x/net/ipv4/mocktransponder_test.go21
-rw-r--r--vendor/golang.org/x/net/ipv4/multicast_test.go330
-rw-r--r--vendor/golang.org/x/net/ipv4/multicastlistener_test.go249
-rw-r--r--vendor/golang.org/x/net/ipv4/multicastsockopt_test.go195
-rw-r--r--vendor/golang.org/x/net/ipv4/packet.go97
-rw-r--r--vendor/golang.org/x/net/ipv4/payload.go15
-rw-r--r--vendor/golang.org/x/net/ipv4/payload_cmsg.go81
-rw-r--r--vendor/golang.org/x/net/ipv4/payload_nocmsg.go42
-rw-r--r--vendor/golang.org/x/net/ipv4/readwrite_test.go174
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt.go46
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_asmreq.go83
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go46
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go21
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go17
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go42
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_posix.go122
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go17
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go61
-rw-r--r--vendor/golang.org/x/net/ipv4/sockopt_stub.go11
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_bsd.go34
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_darwin.go96
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_freebsd.go73
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_linux.go55
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_openbsd.go32
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_stub.go13
-rw-r--r--vendor/golang.org/x/net/ipv4/sys_windows.go61
-rw-r--r--vendor/golang.org/x/net/ipv4/syscall_linux_386.go31
-rw-r--r--vendor/golang.org/x/net/ipv4/syscall_unix.go26
-rw-r--r--vendor/golang.org/x/net/ipv4/syscall_windows.go18
-rw-r--r--vendor/golang.org/x/net/ipv4/thunk_linux_386.s8
-rw-r--r--vendor/golang.org/x/net/ipv4/unicast_test.go246
-rw-r--r--vendor/golang.org/x/net/ipv4/unicastsockopt_test.go139
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_darwin.go99
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_dragonfly.go33
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go93
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go95
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go95
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_386.go146
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go148
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_arm.go146
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go148
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go150
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_netbsd.go30
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_openbsd.go30
-rw-r--r--vendor/golang.org/x/net/ipv4/zsys_solaris.go60
-rw-r--r--vendor/golang.org/x/net/ipv6/bpf_test.go93
-rw-r--r--vendor/golang.org/x/net/ipv6/bpfopt_linux.go28
-rw-r--r--vendor/golang.org/x/net/ipv6/bpfopt_stub.go16
-rw-r--r--vendor/golang.org/x/net/ipv6/control.go85
-rw-r--r--vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go55
-rw-r--r--vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go99
-rw-r--r--vendor/golang.org/x/net/ipv6/control_stub.go23
-rw-r--r--vendor/golang.org/x/net/ipv6/control_unix.go166
-rw-r--r--vendor/golang.org/x/net/ipv6/control_windows.go27
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_darwin.go112
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_dragonfly.go84
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_freebsd.go105
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_linux.go145
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_netbsd.go80
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_openbsd.go89
-rw-r--r--vendor/golang.org/x/net/ipv6/defs_solaris.go96
-rw-r--r--vendor/golang.org/x/net/ipv6/dgramopt_posix.go290
-rw-r--r--vendor/golang.org/x/net/ipv6/dgramopt_stub.go119
-rw-r--r--vendor/golang.org/x/net/ipv6/doc.go240
-rw-r--r--vendor/golang.org/x/net/ipv6/endpoint.go125
-rw-r--r--vendor/golang.org/x/net/ipv6/example_test.go216
-rw-r--r--vendor/golang.org/x/net/ipv6/gen.go208
-rw-r--r--vendor/golang.org/x/net/ipv6/genericopt_posix.go64
-rw-r--r--vendor/golang.org/x/net/ipv6/genericopt_stub.go30
-rw-r--r--vendor/golang.org/x/net/ipv6/header.go55
-rw-r--r--vendor/golang.org/x/net/ipv6/header_test.go55
-rw-r--r--vendor/golang.org/x/net/ipv6/helper.go53
-rw-r--r--vendor/golang.org/x/net/ipv6/iana.go82
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp.go57
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_bsd.go29
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_linux.go27
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_solaris.go24
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_stub.go23
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_test.go96
-rw-r--r--vendor/golang.org/x/net/ipv6/icmp_windows.go22
-rw-r--r--vendor/golang.org/x/net/ipv6/main_test.go27
-rw-r--r--vendor/golang.org/x/net/ipv6/mocktransponder_test.go32
-rw-r--r--vendor/golang.org/x/net/ipv6/multicast_test.go260
-rw-r--r--vendor/golang.org/x/net/ipv6/multicastlistener_test.go246
-rw-r--r--vendor/golang.org/x/net/ipv6/multicastsockopt_test.go157
-rw-r--r--vendor/golang.org/x/net/ipv6/payload.go15
-rw-r--r--vendor/golang.org/x/net/ipv6/payload_cmsg.go70
-rw-r--r--vendor/golang.org/x/net/ipv6/payload_nocmsg.go41
-rw-r--r--vendor/golang.org/x/net/ipv6/readwrite_test.go189
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt.go46
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go22
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_posix.go122
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go17
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go59
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_stub.go13
-rw-r--r--vendor/golang.org/x/net/ipv6/sockopt_test.go133
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_bsd.go56
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_darwin.go105
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_freebsd.go91
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_linux.go72
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_stub.go13
-rw-r--r--vendor/golang.org/x/net/ipv6/sys_windows.go74
-rw-r--r--vendor/golang.org/x/net/ipv6/syscall_linux_386.go31
-rw-r--r--vendor/golang.org/x/net/ipv6/syscall_unix.go26
-rw-r--r--vendor/golang.org/x/net/ipv6/syscall_windows.go18
-rw-r--r--vendor/golang.org/x/net/ipv6/thunk_linux_386.s8
-rw-r--r--vendor/golang.org/x/net/ipv6/unicast_test.go182
-rw-r--r--vendor/golang.org/x/net/ipv6/unicastsockopt_test.go111
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_darwin.go131
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_dragonfly.go90
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go122
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go124
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go124
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_386.go168
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go170
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_arm.go168
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go170
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go172
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_netbsd.go84
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_openbsd.go93
-rw-r--r--vendor/golang.org/x/net/ipv6/zsys_solaris.go105
-rw-r--r--vendor/golang.org/x/net/lex/httplex/httplex.go351
-rw-r--r--vendor/golang.org/x/net/lex/httplex/httplex_test.go119
-rw-r--r--vendor/golang.org/x/net/netutil/listen.go48
-rw-r--r--vendor/golang.org/x/net/netutil/listen_test.go101
-rw-r--r--vendor/golang.org/x/net/proxy/direct.go18
-rw-r--r--vendor/golang.org/x/net/proxy/per_host.go140
-rw-r--r--vendor/golang.org/x/net/proxy/per_host_test.go55
-rw-r--r--vendor/golang.org/x/net/proxy/proxy.go94
-rw-r--r--vendor/golang.org/x/net/proxy/proxy_test.go142
-rw-r--r--vendor/golang.org/x/net/proxy/socks5.go210
-rw-r--r--vendor/golang.org/x/net/publicsuffix/gen.go713
-rw-r--r--vendor/golang.org/x/net/publicsuffix/list.go135
-rw-r--r--vendor/golang.org/x/net/publicsuffix/list_test.go416
-rw-r--r--vendor/golang.org/x/net/publicsuffix/table.go8990
-rw-r--r--vendor/golang.org/x/net/publicsuffix/table_test.go16101
-rw-r--r--vendor/golang.org/x/net/route/address.go281
-rw-r--r--vendor/golang.org/x/net/route/address_darwin_test.go63
-rw-r--r--vendor/golang.org/x/net/route/address_test.go103
-rw-r--r--vendor/golang.org/x/net/route/binary.go90
-rw-r--r--vendor/golang.org/x/net/route/defs_darwin.go106
-rw-r--r--vendor/golang.org/x/net/route/defs_dragonfly.go105
-rw-r--r--vendor/golang.org/x/net/route/defs_freebsd.go329
-rw-r--r--vendor/golang.org/x/net/route/defs_netbsd.go104
-rw-r--r--vendor/golang.org/x/net/route/defs_openbsd.go93
-rw-r--r--vendor/golang.org/x/net/route/interface.go64
-rw-r--r--vendor/golang.org/x/net/route/interface_announce.go32
-rw-r--r--vendor/golang.org/x/net/route/interface_classic.go66
-rw-r--r--vendor/golang.org/x/net/route/interface_freebsd.go78
-rw-r--r--vendor/golang.org/x/net/route/interface_multicast.go30
-rw-r--r--vendor/golang.org/x/net/route/interface_openbsd.go90
-rw-r--r--vendor/golang.org/x/net/route/message.go76
-rw-r--r--vendor/golang.org/x/net/route/message_darwin_test.go27
-rw-r--r--vendor/golang.org/x/net/route/message_freebsd_test.go106
-rw-r--r--vendor/golang.org/x/net/route/message_test.go118
-rw-r--r--vendor/golang.org/x/net/route/route.go74
-rw-r--r--vendor/golang.org/x/net/route/route_classic.go31
-rw-r--r--vendor/golang.org/x/net/route/route_openbsd.go32
-rw-r--r--vendor/golang.org/x/net/route/route_test.go386
-rw-r--r--vendor/golang.org/x/net/route/sys.go40
-rw-r--r--vendor/golang.org/x/net/route/sys_darwin.go80
-rw-r--r--vendor/golang.org/x/net/route/sys_dragonfly.go71
-rw-r--r--vendor/golang.org/x/net/route/sys_freebsd.go150
-rw-r--r--vendor/golang.org/x/net/route/sys_netbsd.go67
-rw-r--r--vendor/golang.org/x/net/route/sys_openbsd.go72
-rw-r--r--vendor/golang.org/x/net/route/syscall.go33
-rw-r--r--vendor/golang.org/x/net/route/syscall.s8
-rw-r--r--vendor/golang.org/x/net/route/zsys_darwin.go93
-rw-r--r--vendor/golang.org/x/net/route/zsys_dragonfly.go92
-rw-r--r--vendor/golang.org/x/net/route/zsys_freebsd_386.go120
-rw-r--r--vendor/golang.org/x/net/route/zsys_freebsd_amd64.go117
-rw-r--r--vendor/golang.org/x/net/route/zsys_freebsd_arm.go117
-rw-r--r--vendor/golang.org/x/net/route/zsys_netbsd.go91
-rw-r--r--vendor/golang.org/x/net/route/zsys_openbsd.go80
-rw-r--r--vendor/golang.org/x/net/trace/events.go524
-rw-r--r--vendor/golang.org/x/net/trace/histogram.go356
-rw-r--r--vendor/golang.org/x/net/trace/histogram_test.go325
-rw-r--r--vendor/golang.org/x/net/trace/trace.go1063
-rw-r--r--vendor/golang.org/x/net/trace/trace_test.go71
-rw-r--r--vendor/golang.org/x/net/webdav/file.go794
-rw-r--r--vendor/golang.org/x/net/webdav/file_test.go1169
-rw-r--r--vendor/golang.org/x/net/webdav/if.go173
-rw-r--r--vendor/golang.org/x/net/webdav/if_test.go322
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/README11
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/atom_test.go56
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/example_test.go151
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/marshal.go1223
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go1939
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/read.go692
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/read_test.go744
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go371
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/xml.go1998
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/xml_test.go752
-rw-r--r--vendor/golang.org/x/net/webdav/litmus_test_server.go94
-rw-r--r--vendor/golang.org/x/net/webdav/lock.go445
-rw-r--r--vendor/golang.org/x/net/webdav/lock_test.go731
-rw-r--r--vendor/golang.org/x/net/webdav/prop.go395
-rw-r--r--vendor/golang.org/x/net/webdav/prop_test.go610
-rw-r--r--vendor/golang.org/x/net/webdav/webdav.go689
-rw-r--r--vendor/golang.org/x/net/webdav/webdav_test.go285
-rw-r--r--vendor/golang.org/x/net/webdav/xml.go519
-rw-r--r--vendor/golang.org/x/net/webdav/xml_test.go906
-rw-r--r--vendor/golang.org/x/net/websocket/client.go113
-rw-r--r--vendor/golang.org/x/net/websocket/exampledial_test.go31
-rw-r--r--vendor/golang.org/x/net/websocket/examplehandler_test.go26
-rw-r--r--vendor/golang.org/x/net/websocket/hybi.go583
-rw-r--r--vendor/golang.org/x/net/websocket/hybi_test.go608
-rw-r--r--vendor/golang.org/x/net/websocket/server.go113
-rw-r--r--vendor/golang.org/x/net/websocket/websocket.go411
-rw-r--r--vendor/golang.org/x/net/websocket/websocket_test.go587
-rw-r--r--vendor/golang.org/x/net/xsrftoken/xsrf.go88
-rw-r--r--vendor/golang.org/x/net/xsrftoken/xsrf_test.go83
-rw-r--r--vendor/golang.org/x/time/AUTHORS3
-rw-r--r--vendor/golang.org/x/time/CONTRIBUTING.md31
-rw-r--r--vendor/golang.org/x/time/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/time/LICENSE27
-rw-r--r--vendor/golang.org/x/time/PATENTS22
-rw-r--r--vendor/golang.org/x/time/README1
-rw-r--r--vendor/golang.org/x/time/rate/rate.go370
-rw-r--r--vendor/golang.org/x/time/rate/rate_test.go445
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc1
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/.gitignore7
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/.travis.yml45
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md10
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md14
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/LICENSE202
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/README.md212
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/asymmetric.go520
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go468
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go196
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go498
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go75
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go150
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go62
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go115
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go109
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go133
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/crypter.go416
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/crypter_test.go785
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/doc.go26
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/doc_test.go226
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/encoding.go193
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/encoding_test.go173
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jose-util/README.md59
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t94
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jose-util/main.go189
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/LICENSE27
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/README.md13
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/bench_test.go223
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/decode.go1183
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/decode_test.go1474
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/encode.go1197
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/encode_test.go538
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/indent.go141
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/number_test.go133
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/scanner.go623
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go316
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/stream.go480
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/stream_test.go354
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go115
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/tags.go44
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/tags_test.go28
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gzbin0 -> 120432 bytes
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/json_fork_test.go116
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jwe.go280
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jwe_test.go537
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jwk.go457
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jwk_test.go662
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jws.go272
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/jws_test.go312
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/shared.go224
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/signing.go258
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/signing_test.go451
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/symmetric.go349
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/symmetric_test.go131
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/utils.go74
-rw-r--r--vendor/gopkg.in/square/go-jose.v1/utils_test.go225
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go2
-rw-r--r--vendor/gopkg.in/yaml.v2/decode_test.go1
722 files changed, 189862 insertions, 1 deletions
diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore
new file mode 100644
index 000000000..776cd950c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/.gitignore
@@ -0,0 +1,4 @@
+*.6
+tags
+test.out
+a.out
diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml
new file mode 100644
index 000000000..1f056ab7c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+go:
+ - 1.5
+ - 1.6
+script:
+ - go test -race -v -bench=.
diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS
new file mode 100644
index 000000000..196568352
--- /dev/null
+++ b/vendor/github.com/miekg/dns/AUTHORS
@@ -0,0 +1 @@
+Miek Gieben <miek@miek.nl>
diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS
new file mode 100644
index 000000000..f77e8a895
--- /dev/null
+++ b/vendor/github.com/miekg/dns/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Alex A. Skinner
+Andrew Tunnell-Jones
+Ask Bjørn Hansen
+Dave Cheney
+Dusty Wilson
+Marek Majkowski
+Peter van Dijk
+Omri Bahumi
+Alex Sergeyev
diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT
new file mode 100644
index 000000000..35702b10e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/COPYRIGHT
@@ -0,0 +1,9 @@
+Copyright 2009 The Go Authors. All rights reserved. Use of this source code
+is governed by a BSD-style license that can be found in the LICENSE file.
+Extensions of the original work are copyright (c) 2011 Miek Gieben
+
+Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
+
+Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE
new file mode 100644
index 000000000..5763fa7fe
--- /dev/null
+++ b/vendor/github.com/miekg/dns/LICENSE
@@ -0,0 +1,32 @@
+Extensions of the original work are copyright (c) 2011 Miek Gieben
+
+As this is fork of the official Go code the same license applies:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
new file mode 100644
index 000000000..83b4183eb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/README.md
@@ -0,0 +1,151 @@
+[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
+
+# Alternative (more granular) approach to a DNS library
+
+> Less is more.
+
+Complete and usable DNS library. All widely used Resource Records are
+supported, including the DNSSEC types. It follows a lean and mean philosophy.
+If there is stuff you should know as a DNS programmer there isn't a convenience
+function for it. Server side and client side programming is supported, i.e. you
+can build servers and resolvers with it.
+
+We try to keep the "master" branch as sane as possible and at the bleeding edge
+of standards, avoiding breaking changes wherever reasonable. We support the last
+two versions of Go, currently: 1.5 and 1.6.
+
+# Goals
+
+* KISS;
+* Fast;
+* Small API, if its easy to code in Go, don't make a function for it.
+
+# Users
+
+A not-so-up-to-date-list-that-may-be-actually-current:
+
+* https://cloudflare.com
+* https://github.com/abh/geodns
+* http://www.statdns.com/
+* http://www.dnsinspect.com/
+* https://github.com/chuangbo/jianbing-dictionary-dns
+* http://www.dns-lg.com/
+* https://github.com/fcambus/rrda
+* https://github.com/kenshinx/godns
+* https://github.com/skynetservices/skydns
+* https://github.com/hashicorp/consul
+* https://github.com/DevelopersPL/godnsagent
+* https://github.com/duedil-ltd/discodns
+* https://github.com/StalkR/dns-reverse-proxy
+* https://github.com/tianon/rawdns
+* https://mesosphere.github.io/mesos-dns/
+* https://pulse.turbobytes.com/
+* https://play.google.com/store/apps/details?id=com.turbobytes.dig
+* https://github.com/fcambus/statzone
+* https://github.com/benschw/dns-clb-go
+* https://github.com/corny/dnscheck for http://public-dns.info/
+* https://namesmith.io
+* https://github.com/miekg/unbound
+* https://github.com/miekg/exdns
+* https://dnslookup.org
+* https://github.com/looterz/grimd
+* https://github.com/phamhongviet/serf-dns
+
+Send pull request if you want to be listed here.
+
+# Features
+
+* UDP/TCP queries, IPv4 and IPv6;
+* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
+* Fast:
+ * Reply speed around ~ 80K qps (faster hardware results in more qps);
+ * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
+* Server side programming (mimicking the net/http package);
+* Client side programming;
+* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
+* EDNS0, NSID, Cookies;
+* AXFR/IXFR;
+* TSIG, SIG(0);
+* DNS over TLS: optional encrypted connection between client and server;
+* DNS name compression;
+* Depends only on the standard library.
+
+Have fun!
+
+Miek Gieben - 2010-2012 - <miek@miek.nl>
+
+# Building
+
+Building is done with the `go` tool. If you have setup your GOPATH
+correctly, the following should work:
+
+ go get github.com/miekg/dns
+ go build github.com/miekg/dns
+
+## Examples
+
+A short "how to use the API" is at the beginning of doc.go (this also will show
+when you call `godoc github.com/miekg/dns`).
+
+Example programs can be found in the `github.com/miekg/exdns` repository.
+
+## Supported RFCs
+
+*all of them*
+
+* 103{4,5} - DNS standard
+* 1348 - NSAP record (removed the record)
+* 1982 - Serial Arithmetic
+* 1876 - LOC record
+* 1995 - IXFR
+* 1996 - DNS notify
+* 2136 - DNS Update (dynamic updates)
+* 2181 - RRset definition - there is no RRset type though, just []RR
+* 2537 - RSAMD5 DNS keys
+* 2065 - DNSSEC (updated in later RFCs)
+* 2671 - EDNS record
+* 2782 - SRV record
+* 2845 - TSIG record
+* 2915 - NAPTR record
+* 2929 - DNS IANA Considerations
+* 3110 - RSASHA1 DNS keys
+* 3225 - DO bit (DNSSEC OK)
+* 340{1,2,3} - NAPTR record
+* 3445 - Limiting the scope of (DNS)KEY
+* 3597 - Unknown RRs
+* 403{3,4,5} - DNSSEC + validation functions
+* 4255 - SSHFP record
+* 4343 - Case insensitivity
+* 4408 - SPF record
+* 4509 - SHA256 Hash in DS
+* 4592 - Wildcards in the DNS
+* 4635 - HMAC SHA TSIG
+* 4701 - DHCID
+* 4892 - id.server
+* 5001 - NSID
+* 5155 - NSEC3 record
+* 5205 - HIP record
+* 5702 - SHA2 in the DNS
+* 5936 - AXFR
+* 5966 - TCP implementation recommendations
+* 6605 - ECDSA
+* 6725 - IANA Registry Update
+* 6742 - ILNP DNS
+* 6840 - Clarifications and Implementation Notes for DNS Security
+* 6844 - CAA record
+* 6891 - EDNS0 update
+* 6895 - DNS IANA considerations
+* 6975 - Algorithm Understanding in DNSSEC
+* 7043 - EUI48/EUI64 records
+* 7314 - DNS (EDNS) EXPIRE Option
+* 7553 - URI record
+* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
+* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
+* xxxx - EDNS0 DNS Update Lease (draft)
+
+## Loosely based upon
+
+* `ldns`
+* `NSD`
+* `Net::DNS`
+* `GRONG`
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
new file mode 100644
index 000000000..1302e4e04
--- /dev/null
+++ b/vendor/github.com/miekg/dns/client.go
@@ -0,0 +1,455 @@
+package dns
+
+// A client implementation.
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/binary"
+ "io"
+ "net"
+ "time"
+)
+
+const dnsTimeout time.Duration = 2 * time.Second
+const tcpIdleTimeout time.Duration = 8 * time.Second
+
+// A Conn represents a connection to a DNS server.
+type Conn struct {
+ net.Conn // a net.Conn holding the connection
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
+ rtt time.Duration
+ t time.Time
+ tsigRequestMAC string
+}
+
+// A Client defines parameters for a DNS client.
+type Client struct {
+ Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TLSConfig *tls.Config // TLS connection configuration
+ Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
+ SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
+ group singleflight
+}
+
+// Exchange performs a synchronous UDP query. It sends the message m to the address
+// contained in a and waits for an reply. Exchange does not retry a failed query, nor
+// will it fall back to TCP in case of truncation.
+// See client.Exchange for more information on setting larger buffer sizes.
+func Exchange(m *Msg, a string) (r *Msg, err error) {
+ var co *Conn
+ co, err = DialTimeout("udp", a, dnsTimeout)
+ if err != nil {
+ return nil, err
+ }
+
+ defer co.Close()
+
+ opt := m.IsEdns0()
+ // If EDNS0 is used use that for size.
+ if opt != nil && opt.UDPSize() >= MinMsgSize {
+ co.UDPSize = opt.UDPSize()
+ }
+
+ co.SetWriteDeadline(time.Now().Add(dnsTimeout))
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+
+ co.SetReadDeadline(time.Now().Add(dnsTimeout))
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
+// ExchangeConn performs a synchronous query. It sends the message m via the connection
+// c and waits for a reply. The connection c is not closed by ExchangeConn.
+// This function is going away, but can easily be mimicked:
+//
+// co := &dns.Conn{Conn: c} // c is your net.Conn
+// co.WriteMsg(m)
+// in, _ := co.ReadMsg()
+// co.Close()
+//
+func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
+ println("dns: this function is deprecated")
+ co := new(Conn)
+ co.Conn = c
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
+// Exchange performs an synchronous query. It sends the message m to the address
+// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
+//
+// c := new(dns.Client)
+// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
+//
+// Exchange does not retry a failed query, nor will it fall back to TCP in
+// case of truncation.
+// It is up to the caller to create a message that allows for larger responses to be
+// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
+// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit
+// of 512 bytes.
+func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ if !c.SingleInflight {
+ return c.exchange(m, a)
+ }
+ // This adds a bunch of garbage, TODO(miek).
+ t := "nop"
+ if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
+ t = t1
+ }
+ cl := "nop"
+ if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
+ cl = cl1
+ }
+ r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
+ return c.exchange(m, a)
+ })
+ if err != nil {
+ return r, rtt, err
+ }
+ if shared {
+ return r.Copy(), rtt, nil
+ }
+ return r, rtt, nil
+}
+
+func (c *Client) dialTimeout() time.Duration {
+ if c.Timeout != 0 {
+ return c.Timeout
+ }
+ if c.DialTimeout != 0 {
+ return c.DialTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) readTimeout() time.Duration {
+ if c.ReadTimeout != 0 {
+ return c.ReadTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) writeTimeout() time.Duration {
+ if c.WriteTimeout != 0 {
+ return c.WriteTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ var co *Conn
+ network := "udp"
+ tls := false
+
+ switch c.Net {
+ case "tcp-tls":
+ network = "tcp"
+ tls = true
+ case "tcp4-tls":
+ network = "tcp4"
+ tls = true
+ case "tcp6-tls":
+ network = "tcp6"
+ tls = true
+ default:
+ if c.Net != "" {
+ network = c.Net
+ }
+ }
+
+ var deadline time.Time
+ if c.Timeout != 0 {
+ deadline = time.Now().Add(c.Timeout)
+ }
+
+ if tls {
+ co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout())
+ } else {
+ co, err = DialTimeout(network, a, c.dialTimeout())
+ }
+
+ if err != nil {
+ return nil, 0, err
+ }
+ defer co.Close()
+
+ opt := m.IsEdns0()
+ // If EDNS0 is used use that for size.
+ if opt != nil && opt.UDPSize() >= MinMsgSize {
+ co.UDPSize = opt.UDPSize()
+ }
+ // Otherwise use the client's configured UDP size.
+ if opt == nil && c.UDPSize >= MinMsgSize {
+ co.UDPSize = c.UDPSize
+ }
+
+ co.TsigSecret = c.TsigSecret
+ co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout()))
+ if err = co.WriteMsg(m); err != nil {
+ return nil, 0, err
+ }
+
+ co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout()))
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, co.rtt, err
+}
+
+// ReadMsg reads a message from the connection co.
+// If the received message contains a TSIG record the transaction
+// signature is verified.
+func (co *Conn) ReadMsg() (*Msg, error) {
+ p, err := co.ReadMsgHeader(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ m := new(Msg)
+ if err := m.Unpack(p); err != nil {
+ // If ErrTruncated was returned, we still want to allow the user to use
+ // the message, but naively they can just check err if they don't want
+ // to use a truncated message
+ if err == ErrTruncated {
+ return m, err
+ }
+ return nil, err
+ }
+ if t := m.IsTsig(); t != nil {
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ }
+ return m, err
+}
+
+// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
+// Returns message as a byte slice to be parsed with Msg.Unpack later on.
+// Note that error handling on the message body is not possible as only the header is parsed.
+func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
+ var (
+ p []byte
+ n int
+ err error
+ )
+
+ switch t := co.Conn.(type) {
+ case *net.TCPConn, *tls.Conn:
+ r := t.(io.Reader)
+
+ // First two bytes specify the length of the entire message.
+ l, err := tcpMsgLen(r)
+ if err != nil {
+ return nil, err
+ }
+ p = make([]byte, l)
+ n, err = tcpRead(r, p)
+ co.rtt = time.Since(co.t)
+ default:
+ if co.UDPSize > MinMsgSize {
+ p = make([]byte, co.UDPSize)
+ } else {
+ p = make([]byte, MinMsgSize)
+ }
+ n, err = co.Read(p)
+ co.rtt = time.Since(co.t)
+ }
+
+ if err != nil {
+ return nil, err
+ } else if n < headerSize {
+ return nil, ErrShortRead
+ }
+
+ p = p[:n]
+ if hdr != nil {
+ dh, _, err := unpackMsgHdr(p, 0)
+ if err != nil {
+ return nil, err
+ }
+ *hdr = dh
+ }
+ return p, err
+}
+
+// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
+func tcpMsgLen(t io.Reader) (int, error) {
+ p := []byte{0, 0}
+ n, err := t.Read(p)
+ if err != nil {
+ return 0, err
+ }
+ if n != 2 {
+ return 0, ErrShortRead
+ }
+ l := binary.BigEndian.Uint16(p)
+ if l == 0 {
+ return 0, ErrShortRead
+ }
+ return int(l), nil
+}
+
+// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
+func tcpRead(t io.Reader, p []byte) (int, error) {
+ n, err := t.Read(p)
+ if err != nil {
+ return n, err
+ }
+ for n < len(p) {
+ j, err := t.Read(p[n:])
+ if err != nil {
+ return n, err
+ }
+ n += j
+ }
+ return n, err
+}
+
+// Read implements the net.Conn read method.
+func (co *Conn) Read(p []byte) (n int, err error) {
+ if co.Conn == nil {
+ return 0, ErrConnEmpty
+ }
+ if len(p) < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ switch t := co.Conn.(type) {
+ case *net.TCPConn, *tls.Conn:
+ r := t.(io.Reader)
+
+ l, err := tcpMsgLen(r)
+ if err != nil {
+ return 0, err
+ }
+ if l > len(p) {
+ return int(l), io.ErrShortBuffer
+ }
+ return tcpRead(r, p[:l])
+ }
+ // UDP connection
+ n, err = co.Conn.Read(p)
+ if err != nil {
+ return n, err
+ }
+ return n, err
+}
+
+// WriteMsg sends a message through the connection co.
+// If the message m contains a TSIG record the transaction
+// signature is calculated.
+func (co *Conn) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if t := m.IsTsig(); t != nil {
+ mac := ""
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ // Set for the next read, although only used in zone transfers
+ co.tsigRequestMAC = mac
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ co.t = time.Now()
+ if _, err = co.Write(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Write implements the net.Conn Write method.
+func (co *Conn) Write(p []byte) (n int, err error) {
+ switch t := co.Conn.(type) {
+ case *net.TCPConn, *tls.Conn:
+ w := t.(io.Writer)
+
+ lp := len(p)
+ if lp < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ if lp > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+ l := make([]byte, 2, lp+2)
+ binary.BigEndian.PutUint16(l, uint16(lp))
+ p = append(l, p...)
+ n, err := io.Copy(w, bytes.NewReader(p))
+ return int(n), err
+ }
+ n, err = co.Conn.(*net.UDPConn).Write(p)
+ return n, err
+}
+
+// Dial connects to the address on the named network.
+func Dial(network, address string) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// DialTimeout acts like Dial but takes a timeout.
+func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = net.DialTimeout(network, address, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// DialWithTLS connects to the address on the named network with TLS.
+func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = tls.Dial(network, address, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
+func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
+ var dialer net.Dialer
+ dialer.Timeout = timeout
+
+ conn = new(Conn)
+ conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
+ if deadline.IsZero() {
+ return time.Now().Add(timeout)
+ }
+ return deadline
+}
diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go
new file mode 100644
index 000000000..850bcfcda
--- /dev/null
+++ b/vendor/github.com/miekg/dns/client_test.go
@@ -0,0 +1,452 @@
+package dns
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "strconv"
+ "testing"
+ "time"
+)
+
+func TestClientSync(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+ // And now with plain Exchange().
+ r, err = Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ if r == nil || r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+}
+
+func TestClientTLSSync(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock)
+ if err != nil {
+ t.Fatalf("unable to build certificate: %v", err)
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config)
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+ c.Net = "tcp-tls"
+ c.TLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+}
+
+func TestClientSyncBadId(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServerBadId)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+ if _, _, err := c.Exchange(m, addrstr); err != ErrId {
+ t.Errorf("did not find a bad Id")
+ }
+ // And now with plain Exchange().
+ if _, err := Exchange(m, addrstr); err != ErrId {
+ t.Errorf("did not find a bad Id")
+ }
+}
+
+func TestClientEDNS0(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeDNSKEY)
+
+ m.SetEdns0(2048, true)
+
+ c := new(Client)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+}
+
+// Validates the transmission and parsing of local EDNS0 options.
+func TestClientEDNS0Local(t *testing.T) {
+ optStr1 := "1979:0x0707"
+ optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601"
+
+ handler := func(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+
+ m.Extra = make([]RR, 1, 2)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}}
+
+ // If the local options are what we expect, then reflect them back.
+ ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String()
+ ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String()
+ if ec1 == optStr1 && ec2 == optStr2 {
+ m.Extra = append(m.Extra, req.Extra[0])
+ }
+
+ w.WriteMsg(m)
+ }
+
+ HandleFunc("miek.nl.", handler)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %s", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+
+ // Add two local edns options to the query.
+ ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}}
+ ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}}
+ o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}}
+ m.Extra = append(m.Extra, o)
+
+ c := new(Client)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %s", err)
+ }
+
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Error("failed to get a valid answer")
+ t.Logf("%v\n", r)
+ }
+
+ txt := r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello local edns" {
+ t.Error("Unexpected result for miek.nl", txt, "!= Hello local edns")
+ }
+
+ // Validate the local options in the reply.
+ got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String()
+ if got != optStr1 {
+ t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr1)
+ t.Logf("%v\n", r)
+ }
+
+ got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String()
+ if got != optStr2 {
+ t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr2)
+ t.Logf("%v\n", r)
+ }
+}
+
+// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG
+func ExampleTsigSecret_updateLeaseTSIG() {
+ m := new(Msg)
+ m.SetUpdate("t.local.ip6.io.")
+ rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1")
+ rrs := make([]RR, 1)
+ rrs[0] = rr
+ m.Insert(rrs)
+
+ leaseRr := new(OPT)
+ leaseRr.Hdr.Name = "."
+ leaseRr.Hdr.Rrtype = TypeOPT
+ e := new(EDNS0_UL)
+ e.Code = EDNS0UL
+ e.Lease = 120
+ leaseRr.Option = append(leaseRr.Option, e)
+ m.Extra = append(m.Extra, leaseRr)
+
+ c := new(Client)
+ m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix())
+ c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="}
+
+ _, _, err := c.Exchange(m, "127.0.0.1:53")
+ if err != nil {
+ panic(err)
+ }
+}
+
+func TestClientConn(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ // This uses TCP just to make it slightly different than TestClientSync
+ s, addrstr, err := RunLocalTCPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ cn, err := Dial("tcp", addrstr)
+ if err != nil {
+ t.Errorf("failed to dial %s: %v", addrstr, err)
+ }
+
+ err = cn.WriteMsg(m)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ r, err := cn.ReadMsg()
+ if r == nil || r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+
+ err = cn.WriteMsg(m)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ h := new(Header)
+ buf, err := cn.ReadMsgHeader(h)
+ if buf == nil {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+ if int(h.Bits&0xF) != RcodeSuccess {
+ t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r)
+ }
+ if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 {
+ t.Errorf("expected to have question and additional in response; got something else: %+v", h)
+ }
+ if err = r.Unpack(buf); err != nil {
+ t.Errorf("unable to unpack message fully: %v", err)
+ }
+}
+
+func TestTruncatedMsg(t *testing.T) {
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSRV)
+ cnt := 10
+ for i := 0; i < cnt; i++ {
+ r := &SRV{
+ Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeSRV, Class: ClassINET, Ttl: 0},
+ Port: uint16(i + 8000),
+ Target: "target.miek.nl.",
+ }
+ m.Answer = append(m.Answer, r)
+
+ re := &A{
+ Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeA, Class: ClassINET, Ttl: 0},
+ A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i)).To4(),
+ }
+ m.Extra = append(m.Extra, re)
+ }
+ buf, err := m.Pack()
+ if err != nil {
+ t.Errorf("failed to pack: %v", err)
+ }
+
+ r := new(Msg)
+ if err = r.Unpack(buf); err != nil {
+ t.Errorf("unable to unpack message: %v", err)
+ }
+ if len(r.Answer) != cnt {
+ t.Errorf("answer count after regular unpack doesn't match: %d", len(r.Answer))
+ }
+ if len(r.Extra) != cnt {
+ t.Errorf("extra count after regular unpack doesn't match: %d", len(r.Extra))
+ }
+
+ m.Truncated = true
+ buf, err = m.Pack()
+ if err != nil {
+ t.Errorf("failed to pack truncated: %v", err)
+ }
+
+ r = new(Msg)
+ if err = r.Unpack(buf); err != nil && err != ErrTruncated {
+ t.Errorf("unable to unpack truncated message: %v", err)
+ }
+ if !r.Truncated {
+ t.Errorf("truncated message wasn't unpacked as truncated")
+ }
+ if len(r.Answer) != cnt {
+ t.Errorf("answer count after truncated unpack doesn't match: %d", len(r.Answer))
+ }
+ if len(r.Extra) != cnt {
+ t.Errorf("extra count after truncated unpack doesn't match: %d", len(r.Extra))
+ }
+
+ // Now we want to remove almost all of the extra records
+ // We're going to loop over the extra to get the count of the size of all
+ // of them
+ off := 0
+ buf1 := make([]byte, m.Len())
+ for i := 0; i < len(m.Extra); i++ {
+ off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress)
+ if err != nil {
+ t.Errorf("failed to pack extra: %v", err)
+ }
+ }
+
+ // Remove all of the extra bytes but 10 bytes from the end of buf
+ off -= 10
+ buf1 = buf[:len(buf)-off]
+
+ r = new(Msg)
+ if err = r.Unpack(buf1); err != nil && err != ErrTruncated {
+ t.Errorf("unable to unpack cutoff message: %v", err)
+ }
+ if !r.Truncated {
+ t.Error("truncated cutoff message wasn't unpacked as truncated")
+ }
+ if len(r.Answer) != cnt {
+ t.Errorf("answer count after cutoff unpack doesn't match: %d", len(r.Answer))
+ }
+ if len(r.Extra) != 0 {
+ t.Errorf("extra count after cutoff unpack is not zero: %d", len(r.Extra))
+ }
+
+ // Now we want to remove almost all of the answer records too
+ buf1 = make([]byte, m.Len())
+ as := 0
+ for i := 0; i < len(m.Extra); i++ {
+ off1 := off
+ off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress)
+ as = off - off1
+ if err != nil {
+ t.Errorf("failed to pack extra: %v", err)
+ }
+ }
+
+ // Keep exactly one answer left
+ // This should still cause Answer to be nil
+ off -= as
+ buf1 = buf[:len(buf)-off]
+
+ r = new(Msg)
+ if err = r.Unpack(buf1); err != nil && err != ErrTruncated {
+ t.Errorf("unable to unpack cutoff message: %v", err)
+ }
+ if !r.Truncated {
+ t.Error("truncated cutoff message wasn't unpacked as truncated")
+ }
+ if len(r.Answer) != 0 {
+ t.Errorf("answer count after second cutoff unpack is not zero: %d", len(r.Answer))
+ }
+
+ // Now leave only 1 byte of the question
+ // Since the header is always 12 bytes, we just need to keep 13
+ buf1 = buf[:13]
+
+ r = new(Msg)
+ err = r.Unpack(buf1)
+ if err == nil || err == ErrTruncated {
+ t.Errorf("error should not be ErrTruncated from question cutoff unpack: %v", err)
+ }
+
+ // Finally, if we only have the header, we should still return an error
+ buf1 = buf[:12]
+
+ r = new(Msg)
+ if err = r.Unpack(buf1); err == nil || err != ErrTruncated {
+ t.Errorf("error not ErrTruncated from header-only unpack: %v", err)
+ }
+}
+
+func TestTimeout(t *testing.T) {
+ // Set up a dummy UDP server that won't respond
+ addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to resolve local udp address: %v", err)
+ }
+ conn, err := net.ListenUDP("udp", addr)
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer conn.Close()
+ addrstr := conn.LocalAddr().String()
+
+ // Message to send
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+
+ // Use a channel + timeout to ensure we don't get stuck if the
+ // Client Timeout is not working properly
+ done := make(chan struct{})
+
+ timeout := time.Millisecond
+ allowable := timeout + (10 * time.Millisecond)
+ abortAfter := timeout + (100 * time.Millisecond)
+
+ start := time.Now()
+
+ go func() {
+ c := &Client{Timeout: timeout}
+ _, _, err := c.Exchange(m, addrstr)
+ if err == nil {
+ t.Error("no timeout using Client")
+ }
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(abortAfter):
+ }
+
+ length := time.Since(start)
+
+ if length > allowable {
+ t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
new file mode 100644
index 000000000..cfa9ad0b2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/clientconfig.go
@@ -0,0 +1,99 @@
+package dns
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// ClientConfig wraps the contents of the /etc/resolv.conf file.
+type ClientConfig struct {
+ Servers []string // servers to use
+ Search []string // suffixes to append to local name
+ Port string // what port to use
+ Ndots int // number of dots in name to trigger absolute lookup
+ Timeout int // seconds before giving up on packet
+ Attempts int // lost packets before giving up on server, not used in the package dns
+}
+
+// ClientConfigFromFile parses a resolv.conf(5) like file and returns
+// a *ClientConfig.
+func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
+ file, err := os.Open(resolvconf)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ c := new(ClientConfig)
+ scanner := bufio.NewScanner(file)
+ c.Servers = make([]string, 0)
+ c.Search = make([]string, 0)
+ c.Port = "53"
+ c.Ndots = 1
+ c.Timeout = 5
+ c.Attempts = 2
+
+ for scanner.Scan() {
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ line := scanner.Text()
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ continue
+ }
+ switch f[0] {
+ case "nameserver": // add one name server
+ if len(f) > 1 {
+ // One more check: make sure server name is
+ // just an IP address. Otherwise we need DNS
+ // to look it up.
+ name := f[1]
+ c.Servers = append(c.Servers, name)
+ }
+
+ case "domain": // set search path to just this domain
+ if len(f) > 1 {
+ c.Search = make([]string, 1)
+ c.Search[0] = f[1]
+ } else {
+ c.Search = make([]string, 0)
+ }
+
+ case "search": // set search path to given servers
+ c.Search = make([]string, len(f)-1)
+ for i := 0; i < len(c.Search); i++ {
+ c.Search[i] = f[i+1]
+ }
+
+ case "options": // magic options
+ for i := 1; i < len(f); i++ {
+ s := f[i]
+ switch {
+ case len(s) >= 6 && s[:6] == "ndots:":
+ n, _ := strconv.Atoi(s[6:])
+ if n < 1 {
+ n = 1
+ }
+ c.Ndots = n
+ case len(s) >= 8 && s[:8] == "timeout:":
+ n, _ := strconv.Atoi(s[8:])
+ if n < 1 {
+ n = 1
+ }
+ c.Timeout = n
+ case len(s) >= 8 && s[:9] == "attempts:":
+ n, _ := strconv.Atoi(s[9:])
+ if n < 1 {
+ n = 1
+ }
+ c.Attempts = n
+ case s == "rotate":
+ /* not imp */
+ }
+ }
+ }
+ }
+ return c, nil
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go
new file mode 100644
index 000000000..63bc5c814
--- /dev/null
+++ b/vendor/github.com/miekg/dns/clientconfig_test.go
@@ -0,0 +1,50 @@
+package dns
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+const normal string = `
+# Comment
+domain somedomain.com
+nameserver 10.28.10.2
+nameserver 11.28.10.1
+`
+
+const missingNewline string = `
+domain somedomain.com
+nameserver 10.28.10.2
+nameserver 11.28.10.1` // <- NOTE: NO newline.
+
+func testConfig(t *testing.T, data string) {
+ tempDir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("tempDir: %v", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ path := filepath.Join(tempDir, "resolv.conf")
+ if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil {
+ t.Fatalf("writeFile: %v", err)
+ }
+ cc, err := ClientConfigFromFile(path)
+ if err != nil {
+ t.Errorf("error parsing resolv.conf: %v", err)
+ }
+ if l := len(cc.Servers); l != 2 {
+ t.Errorf("incorrect number of nameservers detected: %d", l)
+ }
+ if l := len(cc.Search); l != 1 {
+ t.Errorf("domain directive not parsed correctly: %v", cc.Search)
+ } else {
+ if cc.Search[0] != "somedomain.com" {
+ t.Errorf("domain is unexpected: %v", cc.Search[0])
+ }
+ }
+}
+
+func TestNameserver(t *testing.T) { testConfig(t, normal) }
+func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) }
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
new file mode 100644
index 000000000..cf456165f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -0,0 +1,282 @@
+package dns
+
+import (
+ "errors"
+ "net"
+ "strconv"
+)
+
+const hexDigit = "0123456789abcdef"
+
+// Everything is assumed in ClassINET.
+
+// SetReply creates a reply message from a request message.
+func (dns *Msg) SetReply(request *Msg) *Msg {
+ dns.Id = request.Id
+ dns.RecursionDesired = request.RecursionDesired // Copy rd bit
+ dns.Response = true
+ dns.Opcode = OpcodeQuery
+ dns.Rcode = RcodeSuccess
+ if len(request.Question) > 0 {
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = request.Question[0]
+ }
+ return dns
+}
+
+// SetQuestion creates a question message, it sets the Question
+// section, generates an Id and sets the RecursionDesired (RD)
+// bit to true.
+func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
+ dns.Id = Id()
+ dns.RecursionDesired = true
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, t, ClassINET}
+ return dns
+}
+
+// SetNotify creates a notify message, it sets the Question
+// section, generates an Id and sets the Authoritative (AA)
+// bit to true.
+func (dns *Msg) SetNotify(z string) *Msg {
+ dns.Opcode = OpcodeNotify
+ dns.Authoritative = true
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetRcode creates an error message suitable for the request.
+func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
+ dns.SetReply(request)
+ dns.Rcode = rcode
+ return dns
+}
+
+// SetRcodeFormatError creates a message with FormError set.
+func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
+ dns.Rcode = RcodeFormatError
+ dns.Opcode = OpcodeQuery
+ dns.Response = true
+ dns.Authoritative = false
+ dns.Id = request.Id
+ return dns
+}
+
+// SetUpdate makes the message a dynamic update message. It
+// sets the ZONE section to: z, TypeSOA, ClassINET.
+func (dns *Msg) SetUpdate(z string) *Msg {
+ dns.Id = Id()
+ dns.Response = false
+ dns.Opcode = OpcodeUpdate
+ dns.Compress = false // BIND9 cannot handle compression
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetIxfr creates message for requesting an IXFR.
+func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Ns = make([]RR, 1)
+ s := new(SOA)
+ s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
+ s.Serial = serial
+ s.Ns = ns
+ s.Mbox = mbox
+ dns.Question[0] = Question{z, TypeIXFR, ClassINET}
+ dns.Ns[0] = s
+ return dns
+}
+
+// SetAxfr creates message for requesting an AXFR.
+func (dns *Msg) SetAxfr(z string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeAXFR, ClassINET}
+ return dns
+}
+
+// SetTsig appends a TSIG RR to the message.
+// This is only a skeleton TSIG RR that is added as the last RR in the
+// additional section. The Tsig is calculated when the message is being send.
+func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
+ t := new(TSIG)
+ t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
+ t.Algorithm = algo
+ t.Fudge = 300
+ t.TimeSigned = uint64(timesigned)
+ t.OrigId = dns.Id
+ dns.Extra = append(dns.Extra, t)
+ return dns
+}
+
+// SetEdns0 appends a EDNS0 OPT RR to the message.
+// TSIG should always the last RR in a message.
+func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
+ e := new(OPT)
+ e.Hdr.Name = "."
+ e.Hdr.Rrtype = TypeOPT
+ e.SetUDPSize(udpsize)
+ if do {
+ e.SetDo()
+ }
+ dns.Extra = append(dns.Extra, e)
+ return dns
+}
+
+// IsTsig checks if the message has a TSIG record as the last record
+// in the additional section. It returns the TSIG record found or nil.
+func (dns *Msg) IsTsig() *TSIG {
+ if len(dns.Extra) > 0 {
+ if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
+ return dns.Extra[len(dns.Extra)-1].(*TSIG)
+ }
+ }
+ return nil
+}
+
+// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
+// record in the additional section will do. It returns the OPT record
+// found or nil.
+func (dns *Msg) IsEdns0() *OPT {
+ // EDNS0 is at the end of the additional section, start there.
+ // We might want to change this to *only* look at the last two
+ // records. So we see TSIG and/or OPT - this a slightly bigger
+ // change though.
+ for i := len(dns.Extra) - 1; i >= 0; i-- {
+ if dns.Extra[i].Header().Rrtype == TypeOPT {
+ return dns.Extra[i].(*OPT)
+ }
+ }
+ return nil
+}
+
+// IsDomainName checks if s is a valid domain name, it returns the number of
+// labels and true, when a domain name is valid. Note that non fully qualified
+// domain name is considered valid, in this case the last label is counted in
+// the number of labels. When false is returned the number of labels is not
+// defined. Also note that this function is extremely liberal; almost any
+// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
+// label fits in 63 characters, but there is no length check for the entire
+// string s. I.e. a domain name longer than 255 characters is considered valid.
+func IsDomainName(s string) (labels int, ok bool) {
+ _, labels, err := packDomainName(s, nil, 0, nil, false)
+ return labels, err == nil
+}
+
+// IsSubDomain checks if child is indeed a child of the parent. If child and parent
+// are the same domain true is returned as well.
+func IsSubDomain(parent, child string) bool {
+ // Entire child is contained in parent
+ return CompareDomainName(parent, child) == CountLabel(parent)
+}
+
+// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
+// The checking is performed on the binary payload.
+func IsMsg(buf []byte) error {
+ // Header
+ if len(buf) < 12 {
+ return errors.New("dns: bad message header")
+ }
+ // Header: Opcode
+ // TODO(miek): more checks here, e.g. check all header bits.
+ return nil
+}
+
+// IsFqdn checks if a domain name is fully qualified.
+func IsFqdn(s string) bool {
+ l := len(s)
+ if l == 0 {
+ return false
+ }
+ return s[l-1] == '.'
+}
+
+// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
+// This means the RRs need to have the same type, name, and class. Returns true
+// if the RR set is valid, otherwise false.
+func IsRRset(rrset []RR) bool {
+ if len(rrset) == 0 {
+ return false
+ }
+ if len(rrset) == 1 {
+ return true
+ }
+ rrHeader := rrset[0].Header()
+ rrType := rrHeader.Rrtype
+ rrClass := rrHeader.Class
+ rrName := rrHeader.Name
+
+ for _, rr := range rrset[1:] {
+ curRRHeader := rr.Header()
+ if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
+ // Mismatch between the records, so this is not a valid rrset for
+ //signing/verifying
+ return false
+ }
+ }
+
+ return true
+}
+
+// Fqdn return the fully qualified domain name from s.
+// If s is already fully qualified, it behaves as the identity function.
+func Fqdn(s string) string {
+ if IsFqdn(s) {
+ return s
+ }
+ return s + "."
+}
+
+// Copied from the official Go code.
+
+// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
+// address suitable for reverse DNS (PTR) record lookups or an error if it fails
+// to parse the IP address.
+func ReverseAddr(addr string) (arpa string, err error) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return "", &Error{err: "unrecognized address: " + addr}
+ }
+ if ip.To4() != nil {
+ return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
+ strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
+ }
+ // Must be IPv6
+ buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
+ // Add it, in reverse, to the buffer
+ for i := len(ip) - 1; i >= 0; i-- {
+ v := ip[i]
+ buf = append(buf, hexDigit[v&0xF])
+ buf = append(buf, '.')
+ buf = append(buf, hexDigit[v>>4])
+ buf = append(buf, '.')
+ }
+ // Append "ip6.arpa." and return (buf already has the final .)
+ buf = append(buf, "ip6.arpa."...)
+ return string(buf), nil
+}
+
+// String returns the string representation for the type t.
+func (t Type) String() string {
+ if t1, ok := TypeToString[uint16(t)]; ok {
+ return t1
+ }
+ return "TYPE" + strconv.Itoa(int(t))
+}
+
+// String returns the string representation for the class c.
+func (c Class) String() string {
+ if c1, ok := ClassToString[uint16(c)]; ok {
+ return c1
+ }
+ return "CLASS" + strconv.Itoa(int(c))
+}
+
+// String returns the string representation for the name n.
+func (n Name) String() string {
+ return sprintName(string(n))
+}
diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go
new file mode 100644
index 000000000..b3292287c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dns.go
@@ -0,0 +1,104 @@
+package dns
+
+import "strconv"
+
+const (
+ year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
+ defaultTtl = 3600 // Default internal TTL.
+
+ DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes.
+ MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet.
+ MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet.
+)
+
+// Error represents a DNS error.
+type Error struct{ err string }
+
+func (e *Error) Error() string {
+ if e == nil {
+ return "dns: <nil>"
+ }
+ return "dns: " + e.err
+}
+
+// An RR represents a resource record.
+type RR interface {
+ // Header returns the header of an resource record. The header contains
+ // everything up to the rdata.
+ Header() *RR_Header
+ // String returns the text representation of the resource record.
+ String() string
+
+ // copy returns a copy of the RR
+ copy() RR
+ // len returns the length (in octets) of the uncompressed RR in wire format.
+ len() int
+ // pack packs an RR into wire format.
+ pack([]byte, int, map[string]int, bool) (int, error)
+}
+
+// RR_Header is the header all DNS resource records share.
+type RR_Header struct {
+ Name string `dns:"cdomain-name"`
+ Rrtype uint16
+ Class uint16
+ Ttl uint32
+ Rdlength uint16 // Length of data after header.
+}
+
+// Header returns itself. This is here to make RR_Header implements the RR interface.
+func (h *RR_Header) Header() *RR_Header { return h }
+
+// Just to implement the RR interface.
+func (h *RR_Header) copy() RR { return nil }
+
+func (h *RR_Header) copyHeader() *RR_Header {
+ r := new(RR_Header)
+ r.Name = h.Name
+ r.Rrtype = h.Rrtype
+ r.Class = h.Class
+ r.Ttl = h.Ttl
+ r.Rdlength = h.Rdlength
+ return r
+}
+
+func (h *RR_Header) String() string {
+ var s string
+
+ if h.Rrtype == TypeOPT {
+ s = ";"
+ // and maybe other things
+ }
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += Class(h.Class).String() + "\t"
+ s += Type(h.Rrtype).String() + "\t"
+ return s
+}
+
+func (h *RR_Header) len() int {
+ l := len(h.Name) + 1
+ l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
+ return l
+}
+
+// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
+func (rr *RFC3597) ToRFC3597(r RR) error {
+ buf := make([]byte, r.len()*2)
+ off, err := PackRR(r, buf, 0, nil, false)
+ if err != nil {
+ return err
+ }
+ buf = buf[:off]
+ if int(r.Header().Rdlength) > off {
+ return ErrBuf
+ }
+
+ rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
+ if err != nil {
+ return err
+ }
+ *rr = *rfc3597.(*RFC3597)
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/miekg/dns/dns_bench_test.go
new file mode 100644
index 000000000..bccc3d540
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dns_bench_test.go
@@ -0,0 +1,211 @@
+package dns
+
+import (
+ "net"
+ "testing"
+)
+
+func BenchmarkMsgLength(b *testing.B) {
+ b.StopTimer()
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ msg.Compress = true
+ return msg
+ }
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ msg.Len()
+ }
+}
+
+func BenchmarkMsgLengthPack(b *testing.B) {
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ msg.Compress = true
+ return msg
+ }
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = msg.Pack()
+ }
+}
+
+func BenchmarkPackDomainName(b *testing.B) {
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ buf := make([]byte, len(name1)+1)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = PackDomainName(name1, buf, 0, nil, false)
+ }
+}
+
+func BenchmarkUnpackDomainName(b *testing.B) {
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ buf := make([]byte, len(name1)+1)
+ _, _ = PackDomainName(name1, buf, 0, nil, false)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = UnpackDomainName(buf, 0)
+ }
+}
+
+func BenchmarkUnpackDomainNameUnprintable(b *testing.B) {
+ name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123."
+ buf := make([]byte, len(name1)+1)
+ _, _ = PackDomainName(name1, buf, 0, nil, false)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = UnpackDomainName(buf, 0)
+ }
+}
+
+func BenchmarkCopy(b *testing.B) {
+ b.ReportAllocs()
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeA)
+ rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1")
+ m.Answer = []RR{rr}
+ rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1")
+ m.Ns = []RR{rr}
+ rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1")
+ m.Extra = []RR{rr}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Copy()
+ }
+}
+
+func BenchmarkPackA(b *testing.B) {
+ a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)}
+
+ buf := make([]byte, a.len())
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = PackRR(a, buf, 0, nil, false)
+ }
+}
+
+func BenchmarkUnpackA(b *testing.B) {
+ a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)}
+
+ buf := make([]byte, a.len())
+ PackRR(a, buf, 0, nil, false)
+ a = nil
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = UnpackRR(buf, 0)
+ }
+}
+
+func BenchmarkPackMX(b *testing.B) {
+ m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."}
+
+ buf := make([]byte, m.len())
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = PackRR(m, buf, 0, nil, false)
+ }
+}
+
+func BenchmarkUnpackMX(b *testing.B) {
+ m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."}
+
+ buf := make([]byte, m.len())
+ PackRR(m, buf, 0, nil, false)
+ m = nil
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = UnpackRR(buf, 0)
+ }
+}
+
+func BenchmarkPackAAAAA(b *testing.B) {
+ aaaa, _ := NewRR(". IN A ::1")
+
+ buf := make([]byte, aaaa.len())
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = PackRR(aaaa, buf, 0, nil, false)
+ }
+}
+
+func BenchmarkUnpackAAAA(b *testing.B) {
+ aaaa, _ := NewRR(". IN A ::1")
+
+ buf := make([]byte, aaaa.len())
+ PackRR(aaaa, buf, 0, nil, false)
+ aaaa = nil
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = UnpackRR(buf, 0)
+ }
+}
+
+func BenchmarkPackMsg(b *testing.B) {
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ msg.Compress = true
+ return msg
+ }
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
+ buf := make([]byte, 512)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = msg.PackBuffer(buf)
+ }
+}
+
+func BenchmarkUnpackMsg(b *testing.B) {
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ msg.Compress = true
+ return msg
+ }
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
+ msgBuf, _ := msg.Pack()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = msg.Unpack(msgBuf)
+ }
+}
+
+func BenchmarkIdGeneration(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _ = id()
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go
new file mode 100644
index 000000000..ad68533fd
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dns_test.go
@@ -0,0 +1,433 @@
+package dns
+
+import (
+ "encoding/hex"
+ "net"
+ "testing"
+)
+
+func TestPackUnpack(t *testing.T) {
+ out := new(Msg)
+ out.Answer = make([]RR, 1)
+ key := new(DNSKEY)
+ key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1}
+ key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600}
+ key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"
+
+ out.Answer[0] = key
+ msg, err := out.Pack()
+ if err != nil {
+ t.Error("failed to pack msg with DNSKEY")
+ }
+ in := new(Msg)
+ if in.Unpack(msg) != nil {
+ t.Error("failed to unpack msg with DNSKEY")
+ }
+
+ sig := new(RRSIG)
+ sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2,
+ OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.",
+ Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"}
+ sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600}
+
+ out.Answer[0] = sig
+ msg, err = out.Pack()
+ if err != nil {
+ t.Error("failed to pack msg with RRSIG")
+ }
+
+ if in.Unpack(msg) != nil {
+ t.Error("failed to unpack msg with RRSIG")
+ }
+}
+
+func TestPackUnpack2(t *testing.T) {
+ m := new(Msg)
+ m.Extra = make([]RR, 1)
+ m.Answer = make([]RR, 1)
+ dom := "miek.nl."
+ rr := new(A)
+ rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0}
+ rr.A = net.IPv4(127, 0, 0, 1)
+
+ x := new(TXT)
+ x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
+ x.Txt = []string{"heelalaollo"}
+
+ m.Extra[0] = x
+ m.Answer[0] = rr
+ _, err := m.Pack()
+ if err != nil {
+ t.Error("Packing failed: ", err)
+ return
+ }
+}
+
+func TestPackUnpack3(t *testing.T) {
+ m := new(Msg)
+ m.Extra = make([]RR, 2)
+ m.Answer = make([]RR, 1)
+ dom := "miek.nl."
+ rr := new(A)
+ rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0}
+ rr.A = net.IPv4(127, 0, 0, 1)
+
+ x1 := new(TXT)
+ x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
+ x1.Txt = []string{}
+
+ x2 := new(TXT)
+ x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
+ x2.Txt = []string{"heelalaollo"}
+
+ m.Extra[0] = x1
+ m.Extra[1] = x2
+ m.Answer[0] = rr
+ b, err := m.Pack()
+ if err != nil {
+ t.Error("packing failed: ", err)
+ return
+ }
+
+ var unpackMsg Msg
+ err = unpackMsg.Unpack(b)
+ if err != nil {
+ t.Error("unpacking failed")
+ return
+ }
+}
+
+func TestBailiwick(t *testing.T) {
+ yes := map[string]string{
+ "miek1.nl": "miek1.nl",
+ "miek.nl": "ns.miek.nl",
+ ".": "miek.nl",
+ }
+ for parent, child := range yes {
+ if !IsSubDomain(parent, child) {
+ t.Errorf("%s should be child of %s", child, parent)
+ t.Errorf("comparelabels %d", CompareDomainName(parent, child))
+ t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
+ }
+ }
+ no := map[string]string{
+ "www.miek.nl": "ns.miek.nl",
+ "m\\.iek.nl": "ns.miek.nl",
+ "w\\.iek.nl": "w.iek.nl",
+ "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name
+ "miek.nl": ".",
+ }
+ for parent, child := range no {
+ if IsSubDomain(parent, child) {
+ t.Errorf("%s should not be child of %s", child, parent)
+ t.Errorf("comparelabels %d", CompareDomainName(parent, child))
+ t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
+ }
+ }
+}
+
+func TestPack(t *testing.T) {
+ rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"}
+ m := new(Msg)
+ var err error
+ m.Answer = make([]RR, 1)
+ for _, r := range rr {
+ m.Answer[0], err = NewRR(r)
+ if err != nil {
+ t.Errorf("failed to create RR: %v", err)
+ continue
+ }
+ if _, err := m.Pack(); err != nil {
+ t.Errorf("packing failed: %v", err)
+ }
+ }
+ x := new(Msg)
+ ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org")
+ ns.(*NS).Ns = "a.ntpns.org"
+ x.Ns = append(m.Ns, ns)
+ x.Ns = append(m.Ns, ns)
+ x.Ns = append(m.Ns, ns)
+ // This crashes due to the fact the a.ntpns.org isn't a FQDN
+ // How to recover() from a remove panic()?
+ if _, err := x.Pack(); err == nil {
+ t.Error("packing should fail")
+ }
+ x.Answer = make([]RR, 1)
+ x.Answer[0], err = NewRR(rr[0])
+ if _, err := x.Pack(); err == nil {
+ t.Error("packing should fail")
+ }
+ x.Question = make([]Question, 1)
+ x.Question[0] = Question{";sd#eddddsé›↙èµÂ‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET}
+ if _, err := x.Pack(); err == nil {
+ t.Error("packing should fail")
+ }
+}
+
+func TestPackNAPTR(t *testing.T) {
+ for _, n := range []string{
+ `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`,
+ `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`,
+ `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`,
+ } {
+ rr, _ := NewRR(n)
+ msg := make([]byte, rr.len())
+ if off, err := PackRR(rr, msg, 0, nil, false); err != nil {
+ t.Errorf("packing failed: %v", err)
+ t.Errorf("length %d, need more than %d", rr.len(), off)
+ } else {
+ t.Logf("buf size needed: %d", off)
+ }
+ }
+}
+
+func TestCompressLength(t *testing.T) {
+ m := new(Msg)
+ m.SetQuestion("miek.nl", TypeMX)
+ ul := m.Len()
+ m.Compress = true
+ if ul != m.Len() {
+ t.Fatalf("should be equal")
+ }
+}
+
+// Does the predicted length match final packed length?
+func TestMsgCompressLength(t *testing.T) {
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ msg.Compress = true
+ return msg
+ }
+
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1")
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ tests := []*Msg{
+ makeMsg(name1, []RR{rrA}, nil, nil),
+ makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)}
+
+ for _, msg := range tests {
+ predicted := msg.Len()
+ buf, err := msg.Pack()
+ if err != nil {
+ t.Error(err)
+ }
+ if predicted < len(buf) {
+ t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d",
+ msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
+ }
+ }
+}
+
+func TestMsgLength(t *testing.T) {
+ makeMsg := func(question string, ans, ns, e []RR) *Msg {
+ msg := new(Msg)
+ msg.SetQuestion(Fqdn(question), TypeANY)
+ msg.Answer = append(msg.Answer, ans...)
+ msg.Ns = append(msg.Ns, ns...)
+ msg.Extra = append(msg.Extra, e...)
+ return msg
+ }
+
+ name1 := "12345678901234567890123456789012345.12345678.123."
+ rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1")
+ rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
+ tests := []*Msg{
+ makeMsg(name1, []RR{rrA}, nil, nil),
+ makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)}
+
+ for _, msg := range tests {
+ predicted := msg.Len()
+ buf, err := msg.Pack()
+ if err != nil {
+ t.Error(err)
+ }
+ if predicted < len(buf) {
+ t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d",
+ msg.Question[0].Name, predicted, len(buf))
+ }
+ }
+}
+
+func TestMsgLength2(t *testing.T) {
+ // Serialized replies
+ var testMessages = []string{
+ // google.com. IN A?
+ "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000",
+ // amazon.com. IN A? (reply has no EDNS0 record)
+ // TODO(miek): this one is off-by-one, need to find out why
+ //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001",
+ // yahoo.com. IN A?
+ "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000",
+ // microsoft.com. IN A?
+ "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000",
+ // google.com. IN MX?
+ "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000",
+ // reddit.com. IN A?
+ "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000",
+ }
+
+ for i, hexData := range testMessages {
+ // we won't fail the decoding of the hex
+ input, _ := hex.DecodeString(hexData)
+
+ m := new(Msg)
+ m.Unpack(input)
+ m.Compress = true
+ lenComp := m.Len()
+ b, _ := m.Pack()
+ pacComp := len(b)
+ m.Compress = false
+ lenUnComp := m.Len()
+ b, _ = m.Pack()
+ pacUnComp := len(b)
+ if pacComp+1 != lenComp {
+ t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i)
+ }
+ if pacUnComp+1 != lenUnComp {
+ t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i)
+ }
+ }
+}
+
+func TestMsgLengthCompressionMalformed(t *testing.T) {
+ // SOA with empty hostmaster, which is illegal
+ soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345},
+ Ns: ".",
+ Mbox: "",
+ Serial: 0,
+ Refresh: 28800,
+ Retry: 7200,
+ Expire: 604800,
+ Minttl: 60}
+ m := new(Msg)
+ m.Compress = true
+ m.Ns = []RR{soa}
+ m.Len() // Should not crash.
+}
+
+func TestToRFC3597(t *testing.T) {
+ a, _ := NewRR("miek.nl. IN A 10.0.1.1")
+ x := new(RFC3597)
+ x.ToRFC3597(a)
+ if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` {
+ t.Errorf("string mismatch, got: %s", x)
+ }
+
+ b, _ := NewRR("miek.nl. IN MX 10 mx.miek.nl.")
+ x.ToRFC3597(b)
+ if x.String() != `miek.nl. 3600 CLASS1 TYPE15 \# 14 000a026d78046d69656b026e6c00` {
+ t.Errorf("string mismatch, got: %s", x)
+ }
+}
+
+func TestNoRdataPack(t *testing.T) {
+ data := make([]byte, 1024)
+ for typ, fn := range TypeToRR {
+ r := fn()
+ *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16}
+ _, err := PackRR(r, data, 0, nil, false)
+ if err != nil {
+ t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err)
+ }
+ }
+}
+
+func TestNoRdataUnpack(t *testing.T) {
+ data := make([]byte, 1024)
+ for typ, fn := range TypeToRR {
+ if typ == TypeSOA || typ == TypeTSIG {
+ // SOA, TSIG will not be seen (like this) in dyn. updates?
+ continue
+ }
+ r := fn()
+ *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16}
+ off, err := PackRR(r, data, 0, nil, false)
+ if err != nil {
+ // Should always works, TestNoDataPack should have caught this
+ t.Errorf("failed to pack RR: %v", err)
+ continue
+ }
+ rr, _, err := UnpackRR(data[:off], 0)
+ if err != nil {
+ t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err)
+ }
+ t.Log(rr)
+ }
+}
+
+func TestRdataOverflow(t *testing.T) {
+ rr := new(RFC3597)
+ rr.Hdr.Name = "."
+ rr.Hdr.Class = ClassINET
+ rr.Hdr.Rrtype = 65280
+ rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF))
+ buf := make([]byte, 0xFFFF*2)
+ if _, err := PackRR(rr, buf, 0, nil, false); err != nil {
+ t.Fatalf("maximum size rrdata pack failed: %v", err)
+ }
+ rr.Rdata += "00"
+ if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata {
+ t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err)
+ }
+}
+
+func TestCopy(t *testing.T) {
+ rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL
+ rr1 := Copy(rr)
+ if rr.String() != rr1.String() {
+ t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String())
+ }
+}
+
+func TestMsgCopy(t *testing.T) {
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeA)
+ rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1")
+ m.Answer = []RR{rr}
+ rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1")
+ m.Ns = []RR{rr}
+
+ m1 := m.Copy()
+ if m.String() != m1.String() {
+ t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String())
+ }
+
+ m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1")
+ if m.String() == m1.String() {
+ t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String())
+ }
+
+ rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2")
+ m1.Answer = append(m1.Answer, rr)
+ if m1.Ns[0].String() == m1.Answer[1].String() {
+ t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String())
+ }
+}
+
+func TestMsgPackBuffer(t *testing.T) {
+ var testMessages = []string{
+ // news.ycombinator.com.in.escapemg.com. IN A, response
+ "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10",
+
+ // news.ycombinator.com.in.escapemg.com. IN A, question
+ "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001",
+
+ "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001",
+ }
+
+ for i, hexData := range testMessages {
+ // we won't fail the decoding of the hex
+ input, _ := hex.DecodeString(hexData)
+ m := new(Msg)
+ if err := m.Unpack(input); err != nil {
+ t.Errorf("packet %d failed to unpack", i)
+ continue
+ }
+ t.Logf("packet %d %s", i, m.String())
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
new file mode 100644
index 000000000..f5f3fbdd8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -0,0 +1,721 @@
+package dns
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/asn1"
+ "encoding/binary"
+ "encoding/hex"
+ "math/big"
+ "sort"
+ "strings"
+ "time"
+)
+
+// DNSSEC encryption algorithm codes.
+const (
+ _ uint8 = iota
+ RSAMD5
+ DH
+ DSA
+ _ // Skip 4, RFC 6725, section 2.1
+ RSASHA1
+ DSANSEC3SHA1
+ RSASHA1NSEC3SHA1
+ RSASHA256
+ _ // Skip 9, RFC 6725, section 2.1
+ RSASHA512
+ _ // Skip 11, RFC 6725, section 2.1
+ ECCGOST
+ ECDSAP256SHA256
+ ECDSAP384SHA384
+ INDIRECT uint8 = 252
+ PRIVATEDNS uint8 = 253 // Private (experimental keys)
+ PRIVATEOID uint8 = 254
+)
+
+// Map for algorithm names.
+var AlgorithmToString = map[uint8]string{
+ RSAMD5: "RSAMD5",
+ DH: "DH",
+ DSA: "DSA",
+ RSASHA1: "RSASHA1",
+ DSANSEC3SHA1: "DSA-NSEC3-SHA1",
+ RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
+ RSASHA256: "RSASHA256",
+ RSASHA512: "RSASHA512",
+ ECCGOST: "ECC-GOST",
+ ECDSAP256SHA256: "ECDSAP256SHA256",
+ ECDSAP384SHA384: "ECDSAP384SHA384",
+ INDIRECT: "INDIRECT",
+ PRIVATEDNS: "PRIVATEDNS",
+ PRIVATEOID: "PRIVATEOID",
+}
+
+// Map of algorithm strings.
+var StringToAlgorithm = reverseInt8(AlgorithmToString)
+
+// Map of algorithm crypto hashes.
+var AlgorithmToHash = map[uint8]crypto.Hash{
+ RSAMD5: crypto.MD5, // Deprecated in RFC 6725
+ RSASHA1: crypto.SHA1,
+ RSASHA1NSEC3SHA1: crypto.SHA1,
+ RSASHA256: crypto.SHA256,
+ ECDSAP256SHA256: crypto.SHA256,
+ ECDSAP384SHA384: crypto.SHA384,
+ RSASHA512: crypto.SHA512,
+}
+
+// DNSSEC hashing algorithm codes.
+const (
+ _ uint8 = iota
+ SHA1 // RFC 4034
+ SHA256 // RFC 4509
+ GOST94 // RFC 5933
+ SHA384 // Experimental
+ SHA512 // Experimental
+)
+
+// Map for hash names.
+var HashToString = map[uint8]string{
+ SHA1: "SHA1",
+ SHA256: "SHA256",
+ GOST94: "GOST94",
+ SHA384: "SHA384",
+ SHA512: "SHA512",
+}
+
+// Map of hash strings.
+var StringToHash = reverseInt8(HashToString)
+
+// DNSKEY flag values.
+const (
+ SEP = 1
+ REVOKE = 1 << 7
+ ZONE = 1 << 8
+)
+
+// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
+type rrsigWireFmt struct {
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ /* No Signature */
+}
+
+// Used for converting DNSKEY's rdata to wirefmt.
+type dnskeyWireFmt struct {
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+ /* Nothing is left out */
+}
+
+func divRoundUp(a, b int) int {
+ return (a + b - 1) / b
+}
+
+// KeyTag calculates the keytag (or key-id) of the DNSKEY.
+func (k *DNSKEY) KeyTag() uint16 {
+ if k == nil {
+ return 0
+ }
+ var keytag int
+ switch k.Algorithm {
+ case RSAMD5:
+ // Look at the bottom two bytes of the modules, which the last
+ // item in the pubkey. We could do this faster by looking directly
+ // at the base64 values. But I'm lazy.
+ modulus, _ := fromBase64([]byte(k.PublicKey))
+ if len(modulus) > 1 {
+ x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
+ keytag = int(x)
+ }
+ default:
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := packKeyWire(keywire, wire)
+ if err != nil {
+ return 0
+ }
+ wire = wire[:n]
+ for i, v := range wire {
+ if i&1 != 0 {
+ keytag += int(v) // must be larger than uint32
+ } else {
+ keytag += int(v) << 8
+ }
+ }
+ keytag += (keytag >> 16) & 0xFFFF
+ keytag &= 0xFFFF
+ }
+ return uint16(keytag)
+}
+
+// ToDS converts a DNSKEY record to a DS record.
+func (k *DNSKEY) ToDS(h uint8) *DS {
+ if k == nil {
+ return nil
+ }
+ ds := new(DS)
+ ds.Hdr.Name = k.Hdr.Name
+ ds.Hdr.Class = k.Hdr.Class
+ ds.Hdr.Rrtype = TypeDS
+ ds.Hdr.Ttl = k.Hdr.Ttl
+ ds.Algorithm = k.Algorithm
+ ds.DigestType = h
+ ds.KeyTag = k.KeyTag()
+
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := packKeyWire(keywire, wire)
+ if err != nil {
+ return nil
+ }
+ wire = wire[:n]
+
+ owner := make([]byte, 255)
+ off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
+ if err1 != nil {
+ return nil
+ }
+ owner = owner[:off]
+ // RFC4034:
+ // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
+ // "|" denotes concatenation
+ // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
+
+ // digest buffer
+ digest := append(owner, wire...) // another copy
+
+ var hash crypto.Hash
+ switch h {
+ case SHA1:
+ hash = crypto.SHA1
+ case SHA256:
+ hash = crypto.SHA256
+ case SHA384:
+ hash = crypto.SHA384
+ case SHA512:
+ hash = crypto.SHA512
+ default:
+ return nil
+ }
+
+ s := hash.New()
+ s.Write(digest)
+ ds.Digest = hex.EncodeToString(s.Sum(nil))
+ return ds
+}
+
+// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
+func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
+ c := &CDNSKEY{DNSKEY: *k}
+ c.Hdr = *k.Hdr.copyHeader()
+ c.Hdr.Rrtype = TypeCDNSKEY
+ return c
+}
+
+// ToCDS converts a DS record to a CDS record.
+func (d *DS) ToCDS() *CDS {
+ c := &CDS{DS: *d}
+ c.Hdr = *d.Hdr.copyHeader()
+ c.Hdr.Rrtype = TypeCDS
+ return c
+}
+
+// Sign signs an RRSet. The signature needs to be filled in with the values:
+// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
+// from the RRset. Sign returns a non-nill error when the signing went OK.
+// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
+// zero, it is used as-is, otherwise the TTL of the RRset is used as the
+// OrigTTL.
+func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
+ if k == nil {
+ return ErrPrivKey
+ }
+ // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ rr.Hdr.Rrtype = TypeRRSIG
+ rr.Hdr.Name = rrset[0].Header().Name
+ rr.Hdr.Class = rrset[0].Header().Class
+ if rr.OrigTtl == 0 { // If set don't override
+ rr.OrigTtl = rrset[0].Header().Ttl
+ }
+ rr.TypeCovered = rrset[0].Header().Rrtype
+ rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
+
+ if strings.HasPrefix(rrset[0].Header().Name, "*") {
+ rr.Labels-- // wildcard, remove from label count
+ }
+
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ // For signing, lowercase this name
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+
+ // Create the desired binary blob
+ signdata := make([]byte, DefaultMsgSize)
+ n, err := packSigWire(sigwire, signdata)
+ if err != nil {
+ return err
+ }
+ signdata = signdata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+ signdata = append(signdata, wire...)
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ h := hash.New()
+ h.Write(signdata)
+
+ signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return err
+ }
+
+ rr.Signature = toBase64(signature)
+
+ return nil
+}
+
+func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
+ signature, err := k.Sign(rand.Reader, hashed, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ switch alg {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
+ return signature, nil
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ ecdsaSignature := &struct {
+ R, S *big.Int
+ }{}
+ if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
+ return nil, err
+ }
+
+ var intlen int
+ switch alg {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+
+ signature := intToBytes(ecdsaSignature.R, intlen)
+ signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
+ return signature, nil
+
+ // There is no defined interface for what a DSA backed crypto.Signer returns
+ case DSA, DSANSEC3SHA1:
+ // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
+ // signature := []byte{byte(t)}
+ // signature = append(signature, intToBytes(r1, 20)...)
+ // signature = append(signature, intToBytes(s1, 20)...)
+ // rr.Signature = signature
+ }
+
+ return nil, ErrAlg
+}
+
+// Verify validates an RRSet with the signature and key. This is only the
+// cryptographic test, the signature validity period must be checked separately.
+// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
+func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
+ // First the easy checks
+ if !IsRRset(rrset) {
+ return ErrRRset
+ }
+ if rr.KeyTag != k.KeyTag() {
+ return ErrKey
+ }
+ if rr.Hdr.Class != k.Hdr.Class {
+ return ErrKey
+ }
+ if rr.Algorithm != k.Algorithm {
+ return ErrKey
+ }
+ if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
+ return ErrKey
+ }
+ if k.Protocol != 3 {
+ return ErrKey
+ }
+
+ // IsRRset checked that we have at least one RR and that the RRs in
+ // the set have consistent type, class, and name. Also check that type and
+ // class matches the RRSIG record.
+ if rrset[0].Header().Class != rr.Hdr.Class {
+ return ErrRRset
+ }
+ if rrset[0].Header().Rrtype != rr.TypeCovered {
+ return ErrRRset
+ }
+
+ // RFC 4035 5.3.2. Reconstructing the Signed Data
+ // Copy the sig, except the rrsig data
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+ // Create the desired binary blob
+ signeddata := make([]byte, DefaultMsgSize)
+ n, err := packSigWire(sigwire, signeddata)
+ if err != nil {
+ return err
+ }
+ signeddata = signeddata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+ signeddata = append(signeddata, wire...)
+
+ sigbuf := rr.sigBuf() // Get the binary signature data
+ if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
+ // TODO(miek)
+ // remove the domain name and assume its ours?
+ }
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ switch rr.Algorithm {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
+ // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
+ pubkey := k.publicKeyRSA() // Get the key
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ h := hash.New()
+ h.Write(signeddata)
+ return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pubkey := k.publicKeyECDSA()
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ // Split sigbuf into the r and s coordinates
+ r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
+ s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
+
+ h := hash.New()
+ h.Write(signeddata)
+ if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
+ return nil
+ }
+ return ErrSig
+
+ default:
+ return ErrAlg
+ }
+}
+
+// ValidityPeriod uses RFC1982 serial arithmetic to calculate
+// if a signature period is valid. If t is the zero time, the
+// current time is taken other t is. Returns true if the signature
+// is valid at the given time, otherwise returns false.
+func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
+ var utc int64
+ if t.IsZero() {
+ utc = time.Now().UTC().Unix()
+ } else {
+ utc = t.UTC().Unix()
+ }
+ modi := (int64(rr.Inception) - utc) / year68
+ mode := (int64(rr.Expiration) - utc) / year68
+ ti := int64(rr.Inception) + (modi * year68)
+ te := int64(rr.Expiration) + (mode * year68)
+ return ti <= utc && utc <= te
+}
+
+// Return the signatures base64 encodedig sigdata as a byte slice.
+func (rr *RRSIG) sigBuf() []byte {
+ sigbuf, err := fromBase64([]byte(rr.Signature))
+ if err != nil {
+ return nil
+ }
+ return sigbuf
+}
+
+// publicKeyRSA returns the RSA public key from a DNSKEY record.
+func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+
+ // RFC 2537/3110, section 2. RSA Public KEY Resource Records
+ // Length is in the 0th byte, unless its zero, then it
+ // it in bytes 1 and 2 and its a 16 bit number
+ explen := uint16(keybuf[0])
+ keyoff := 1
+ if explen == 0 {
+ explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
+ keyoff = 3
+ }
+ pubkey := new(rsa.PublicKey)
+
+ pubkey.N = big.NewInt(0)
+ shift := uint64((explen - 1) * 8)
+ expo := uint64(0)
+ for i := int(explen - 1); i > 0; i-- {
+ expo += uint64(keybuf[keyoff+i]) << shift
+ shift -= 8
+ }
+ // Remainder
+ expo += uint64(keybuf[keyoff])
+ if expo > 2<<31 {
+ // Larger expo than supported.
+ // println("dns: F5 primes (or larger) are not supported")
+ return nil
+ }
+ pubkey.E = int(expo)
+
+ pubkey.N.SetBytes(keybuf[keyoff+int(explen):])
+ return pubkey
+}
+
+// publicKeyECDSA returns the Curve public key from the DNSKEY record.
+func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ pubkey := new(ecdsa.PublicKey)
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ pubkey.Curve = elliptic.P256()
+ if len(keybuf) != 64 {
+ // wrongly encoded key
+ return nil
+ }
+ case ECDSAP384SHA384:
+ pubkey.Curve = elliptic.P384()
+ if len(keybuf) != 96 {
+ // Wrongly encoded key
+ return nil
+ }
+ }
+ pubkey.X = big.NewInt(0)
+ pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
+ pubkey.Y = big.NewInt(0)
+ pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
+ return pubkey
+}
+
+func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ if len(keybuf) < 22 {
+ return nil
+ }
+ t, keybuf := int(keybuf[0]), keybuf[1:]
+ size := 64 + t*8
+ q, keybuf := keybuf[:20], keybuf[20:]
+ if len(keybuf) != 3*size {
+ return nil
+ }
+ p, keybuf := keybuf[:size], keybuf[size:]
+ g, y := keybuf[:size], keybuf[size:]
+ pubkey := new(dsa.PublicKey)
+ pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
+ pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
+ pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
+ pubkey.Y = big.NewInt(0).SetBytes(y)
+ return pubkey
+}
+
+type wireSlice [][]byte
+
+func (p wireSlice) Len() int { return len(p) }
+func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p wireSlice) Less(i, j int) bool {
+ _, ioff, _ := UnpackDomainName(p[i], 0)
+ _, joff, _ := UnpackDomainName(p[j], 0)
+ return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
+}
+
+// Return the raw signature data.
+func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
+ wires := make(wireSlice, len(rrset))
+ for i, r := range rrset {
+ r1 := r.copy()
+ r1.Header().Ttl = s.OrigTtl
+ labels := SplitDomainName(r1.Header().Name)
+ // 6.2. Canonical RR Form. (4) - wildcards
+ if len(labels) > int(s.Labels) {
+ // Wildcard
+ r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
+ }
+ // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
+ r1.Header().Name = strings.ToLower(r1.Header().Name)
+ // 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
+ // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
+ // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
+ // SRV, DNAME, A6
+ //
+ // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
+ // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
+ // that needs conversion to lowercase, and twice at that. Since HINFO
+ // records contain no domain names, they are not subject to case
+ // conversion.
+ switch x := r1.(type) {
+ case *NS:
+ x.Ns = strings.ToLower(x.Ns)
+ case *CNAME:
+ x.Target = strings.ToLower(x.Target)
+ case *SOA:
+ x.Ns = strings.ToLower(x.Ns)
+ x.Mbox = strings.ToLower(x.Mbox)
+ case *MB:
+ x.Mb = strings.ToLower(x.Mb)
+ case *MG:
+ x.Mg = strings.ToLower(x.Mg)
+ case *MR:
+ x.Mr = strings.ToLower(x.Mr)
+ case *PTR:
+ x.Ptr = strings.ToLower(x.Ptr)
+ case *MINFO:
+ x.Rmail = strings.ToLower(x.Rmail)
+ x.Email = strings.ToLower(x.Email)
+ case *MX:
+ x.Mx = strings.ToLower(x.Mx)
+ case *NAPTR:
+ x.Replacement = strings.ToLower(x.Replacement)
+ case *KX:
+ x.Exchanger = strings.ToLower(x.Exchanger)
+ case *SRV:
+ x.Target = strings.ToLower(x.Target)
+ case *DNAME:
+ x.Target = strings.ToLower(x.Target)
+ }
+ // 6.2. Canonical RR Form. (5) - origTTL
+ wire := make([]byte, r1.len()+1) // +1 to be safe(r)
+ off, err1 := PackRR(r1, wire, 0, nil, false)
+ if err1 != nil {
+ return nil, err1
+ }
+ wire = wire[:off]
+ wires[i] = wire
+ }
+ sort.Sort(wires)
+ for i, wire := range wires {
+ if i > 0 && bytes.Equal(wire, wires[i-1]) {
+ continue
+ }
+ buf = append(buf, wire...)
+ }
+ return buf, nil
+}
+
+func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go RRSIG packing
+ off, err := packUint16(sw.TypeCovered, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(sw.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(sw.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(sw.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go DNSKEY packing
+ off, err := packUint16(dw.Flags, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(dw.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(dw.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(dw.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
new file mode 100644
index 000000000..229a07937
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
@@ -0,0 +1,156 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "math/big"
+)
+
+// Generate generates a DNSKEY of the given bit size.
+// The public part is put inside the DNSKEY record.
+// The Algorithm in the key must be set as this will define
+// what kind of DNSKEY will be generated.
+// The ECDSA algorithms imply a fixed keysize, in that case
+// bits should be set to the size of the algorithm.
+func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
+ switch k.Algorithm {
+ case DSA, DSANSEC3SHA1:
+ if bits != 1024 {
+ return nil, ErrKeySize
+ }
+ case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
+ if bits < 512 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case RSASHA512:
+ if bits < 1024 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP256SHA256:
+ if bits != 256 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP384SHA384:
+ if bits != 384 {
+ return nil, ErrKeySize
+ }
+ }
+
+ switch k.Algorithm {
+ case DSA, DSANSEC3SHA1:
+ params := new(dsa.Parameters)
+ if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
+ return nil, err
+ }
+ priv := new(dsa.PrivateKey)
+ priv.PublicKey.Parameters = *params
+ err := dsa.GenerateKey(priv, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
+ return priv, nil
+ case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
+ priv, err := rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
+ return priv, nil
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ var c elliptic.Curve
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ c = elliptic.P256()
+ case ECDSAP384SHA384:
+ c = elliptic.P384()
+ }
+ priv, err := ecdsa.GenerateKey(c, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
+ return priv, nil
+ default:
+ return nil, ErrAlg
+ }
+}
+
+// Set the public key (the value E and N)
+func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
+ if _E == 0 || _N == nil {
+ return false
+ }
+ buf := exponentToBuf(_E)
+ buf = append(buf, _N.Bytes()...)
+ k.PublicKey = toBase64(buf)
+ return true
+}
+
+// Set the public key for Elliptic Curves
+func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
+ if _X == nil || _Y == nil {
+ return false
+ }
+ var intlen int
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
+ return true
+}
+
+// Set the public key for DSA
+func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
+ if _Q == nil || _P == nil || _G == nil || _Y == nil {
+ return false
+ }
+ buf := dsaToBuf(_Q, _P, _G, _Y)
+ k.PublicKey = toBase64(buf)
+ return true
+}
+
+// Set the public key (the values E and N) for RSA
+// RFC 3110: Section 2. RSA Public KEY Resource Records
+func exponentToBuf(_E int) []byte {
+ var buf []byte
+ i := big.NewInt(int64(_E))
+ if len(i.Bytes()) < 256 {
+ buf = make([]byte, 1)
+ buf[0] = uint8(len(i.Bytes()))
+ } else {
+ buf = make([]byte, 3)
+ buf[0] = 0
+ buf[1] = uint8(len(i.Bytes()) >> 8)
+ buf[2] = uint8(len(i.Bytes()))
+ }
+ buf = append(buf, i.Bytes()...)
+ return buf
+}
+
+// Set the public key for X and Y for Curve. The two
+// values are just concatenated.
+func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
+ buf := intToBytes(_X, intlen)
+ buf = append(buf, intToBytes(_Y, intlen)...)
+ return buf
+}
+
+// Set the public key for X and Y for Curve. The two
+// values are just concatenated.
+func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
+ t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
+ buf := []byte{byte(t)}
+ buf = append(buf, intToBytes(_Q, 20)...)
+ buf = append(buf, intToBytes(_P, 64+t*8)...)
+ buf = append(buf, intToBytes(_G, 64+t*8)...)
+ buf = append(buf, intToBytes(_Y, 64+t*8)...)
+ return buf
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
new file mode 100644
index 000000000..c0b54dc76
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
@@ -0,0 +1,249 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// NewPrivateKey returns a PrivateKey by parsing the string s.
+// s should be in the same form of the BIND private key files.
+func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
+ if s[len(s)-1] != '\n' { // We need a closing newline
+ return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
+ }
+ return k.ReadPrivateKey(strings.NewReader(s), "")
+}
+
+// ReadPrivateKey reads a private key from the io.Reader q. The string file is
+// only used in error reporting.
+// The public key must be known, because some cryptographic algorithms embed
+// the public inside the privatekey.
+func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
+ m, err := parseKey(q, file)
+ if m == nil {
+ return nil, err
+ }
+ if _, ok := m["private-key-format"]; !ok {
+ return nil, ErrPrivKey
+ }
+ if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
+ return nil, ErrPrivKey
+ }
+ // TODO(mg): check if the pubkey matches the private key
+ algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
+ if err != nil {
+ return nil, ErrPrivKey
+ }
+ switch uint8(algo) {
+ case DSA:
+ priv, err := readPrivateKeyDSA(m)
+ if err != nil {
+ return nil, err
+ }
+ pub := k.publicKeyDSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, nil
+ case RSAMD5:
+ fallthrough
+ case RSASHA1:
+ fallthrough
+ case RSASHA1NSEC3SHA1:
+ fallthrough
+ case RSASHA256:
+ fallthrough
+ case RSASHA512:
+ priv, err := readPrivateKeyRSA(m)
+ if err != nil {
+ return nil, err
+ }
+ pub := k.publicKeyRSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, nil
+ case ECCGOST:
+ return nil, ErrPrivKey
+ case ECDSAP256SHA256:
+ fallthrough
+ case ECDSAP384SHA384:
+ priv, err := readPrivateKeyECDSA(m)
+ if err != nil {
+ return nil, err
+ }
+ pub := k.publicKeyECDSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, nil
+ default:
+ return nil, ErrPrivKey
+ }
+}
+
+// Read a private key (file) string and create a public key. Return the private key.
+func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
+ p := new(rsa.PrivateKey)
+ p.Primes = []*big.Int{nil, nil}
+ for k, v := range m {
+ switch k {
+ case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ switch k {
+ case "modulus":
+ p.PublicKey.N = big.NewInt(0)
+ p.PublicKey.N.SetBytes(v1)
+ case "publicexponent":
+ i := big.NewInt(0)
+ i.SetBytes(v1)
+ p.PublicKey.E = int(i.Int64()) // int64 should be large enough
+ case "privateexponent":
+ p.D = big.NewInt(0)
+ p.D.SetBytes(v1)
+ case "prime1":
+ p.Primes[0] = big.NewInt(0)
+ p.Primes[0].SetBytes(v1)
+ case "prime2":
+ p.Primes[1] = big.NewInt(0)
+ p.Primes[1].SetBytes(v1)
+ }
+ case "exponent1", "exponent2", "coefficient":
+ // not used in Go (yet)
+ case "created", "publish", "activate":
+ // not used in Go (yet)
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
+ p := new(dsa.PrivateKey)
+ p.X = big.NewInt(0)
+ for k, v := range m {
+ switch k {
+ case "private_value(x)":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ p.X.SetBytes(v1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
+ p := new(ecdsa.PrivateKey)
+ p.D = big.NewInt(0)
+ // TODO: validate that the required flags are present
+ for k, v := range m {
+ switch k {
+ case "privatekey":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ p.D.SetBytes(v1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+// parseKey reads a private key from r. It returns a map[string]string,
+// with the key-value pairs, or an error when the file is not correct.
+func parseKey(r io.Reader, file string) (map[string]string, error) {
+ s := scanInit(r)
+ m := make(map[string]string)
+ c := make(chan lex)
+ k := ""
+ // Start the lexer
+ go klexer(s, c)
+ for l := range c {
+ // It should alternate
+ switch l.value {
+ case zKey:
+ k = l.token
+ case zValue:
+ if k == "" {
+ return nil, &ParseError{file, "no private key seen", l}
+ }
+ //println("Setting", strings.ToLower(k), "to", l.token, "b")
+ m[strings.ToLower(k)] = l.token
+ k = ""
+ }
+ }
+ return m, nil
+}
+
+// klexer scans the sourcefile and returns tokens on the channel c.
+func klexer(s *scan, c chan lex) {
+ var l lex
+ str := "" // Hold the current read text
+ commt := false
+ key := true
+ x, err := s.tokenText()
+ defer close(c)
+ for err == nil {
+ l.column = s.position.Column
+ l.line = s.position.Line
+ switch x {
+ case ':':
+ if commt {
+ break
+ }
+ l.token = str
+ if key {
+ l.value = zKey
+ c <- l
+ // Next token is a space, eat it
+ s.tokenText()
+ key = false
+ str = ""
+ } else {
+ l.value = zValue
+ }
+ case ';':
+ commt = true
+ case '\n':
+ if commt {
+ // Reset a comment
+ commt = false
+ }
+ l.value = zValue
+ l.token = str
+ c <- l
+ str = ""
+ commt = false
+ key = true
+ default:
+ if commt {
+ break
+ }
+ str += string(x)
+ }
+ x, err = s.tokenText()
+ }
+ if len(str) > 0 {
+ // Send remainder
+ l.token = str
+ l.value = zValue
+ c <- l
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go
new file mode 100644
index 000000000..56f3ea934
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_privkey.go
@@ -0,0 +1,85 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "math/big"
+ "strconv"
+)
+
+const format = "Private-key-format: v1.3\n"
+
+// PrivateKeyString converts a PrivateKey to a string. This string has the same
+// format as the private-key-file of BIND9 (Private-key-format: v1.3).
+// It needs some info from the key (the algorithm), so its a method of the DNSKEY
+// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
+func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
+ algorithm := strconv.Itoa(int(r.Algorithm))
+ algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
+
+ switch p := p.(type) {
+ case *rsa.PrivateKey:
+ modulus := toBase64(p.PublicKey.N.Bytes())
+ e := big.NewInt(int64(p.PublicKey.E))
+ publicExponent := toBase64(e.Bytes())
+ privateExponent := toBase64(p.D.Bytes())
+ prime1 := toBase64(p.Primes[0].Bytes())
+ prime2 := toBase64(p.Primes[1].Bytes())
+ // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
+ // and from: http://code.google.com/p/go/issues/detail?id=987
+ one := big.NewInt(1)
+ p1 := big.NewInt(0).Sub(p.Primes[0], one)
+ q1 := big.NewInt(0).Sub(p.Primes[1], one)
+ exp1 := big.NewInt(0).Mod(p.D, p1)
+ exp2 := big.NewInt(0).Mod(p.D, q1)
+ coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
+
+ exponent1 := toBase64(exp1.Bytes())
+ exponent2 := toBase64(exp2.Bytes())
+ coefficient := toBase64(coeff.Bytes())
+
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Modulus: " + modulus + "\n" +
+ "PublicExponent: " + publicExponent + "\n" +
+ "PrivateExponent: " + privateExponent + "\n" +
+ "Prime1: " + prime1 + "\n" +
+ "Prime2: " + prime2 + "\n" +
+ "Exponent1: " + exponent1 + "\n" +
+ "Exponent2: " + exponent2 + "\n" +
+ "Coefficient: " + coefficient + "\n"
+
+ case *ecdsa.PrivateKey:
+ var intlen int
+ switch r.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ private := toBase64(intToBytes(p.D, intlen))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "PrivateKey: " + private + "\n"
+
+ case *dsa.PrivateKey:
+ T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
+ prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
+ subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
+ base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
+ priv := toBase64(intToBytes(p.X, 20))
+ pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Prime(p): " + prime + "\n" +
+ "Subprime(q): " + subprime + "\n" +
+ "Base(g): " + base + "\n" +
+ "Private_value(x): " + priv + "\n" +
+ "Public_value(y): " + pub + "\n"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/miekg/dns/dnssec_test.go
new file mode 100644
index 000000000..ca085ed3b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_test.go
@@ -0,0 +1,733 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func getKey() *DNSKEY {
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
+ return key
+}
+
+func getSoa() *SOA {
+ soa := new(SOA)
+ soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
+ soa.Ns = "open.nlnetlabs.nl."
+ soa.Mbox = "miekg.atoom.net."
+ soa.Serial = 1293945905
+ soa.Refresh = 14400
+ soa.Retry = 3600
+ soa.Expire = 604800
+ soa.Minttl = 86400
+ return soa
+}
+
+func TestGenerateEC(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = ECDSAP256SHA256
+ privkey, _ := key.Generate(256)
+ t.Log(key.String())
+ t.Log(key.PrivateKeyString(privkey))
+}
+
+func TestGenerateDSA(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = DSA
+ privkey, _ := key.Generate(1024)
+ t.Log(key.String())
+ t.Log(key.PrivateKeyString(privkey))
+}
+
+func TestGenerateRSA(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ privkey, _ := key.Generate(1024)
+ t.Log(key.String())
+ t.Log(key.PrivateKeyString(privkey))
+}
+
+func TestSecure(t *testing.T) {
+ soa := getSoa()
+
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.TypeCovered = TypeSOA
+ sig.Algorithm = RSASHA256
+ sig.Labels = 2
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.OrigTtl = 14400
+ sig.KeyTag = 12051
+ sig.SignerName = "miek.nl."
+ sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M="
+
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
+
+ // It should validate. Period is checked separately, so this will keep on working
+ if sig.Verify(key, []RR{soa}) != nil {
+ t.Error("failure to validate")
+ }
+}
+
+func TestSignature(t *testing.T) {
+ sig := new(RRSIG)
+ sig.Hdr.Name = "miek.nl."
+ sig.Hdr.Class = ClassINET
+ sig.Hdr.Ttl = 3600
+ sig.TypeCovered = TypeDNSKEY
+ sig.Algorithm = RSASHA1
+ sig.Labels = 2
+ sig.OrigTtl = 4000
+ sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970
+ sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970
+ sig.KeyTag = 34641
+ sig.SignerName = "miek.nl."
+ sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"
+
+ // Should not be valid
+ if sig.ValidityPeriod(time.Now()) {
+ t.Error("should not be valid")
+ }
+
+ sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980
+ sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100
+ if !sig.ValidityPeriod(time.Now()) {
+ t.Error("should be valid")
+ }
+}
+
+func TestSignVerify(t *testing.T) {
+ // The record we want to sign
+ soa := new(SOA)
+ soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
+ soa.Ns = "open.nlnetlabs.nl."
+ soa.Mbox = "miekg.atoom.net."
+ soa.Serial = 1293945905
+ soa.Refresh = 14400
+ soa.Retry = 3600
+ soa.Expire = 604800
+ soa.Minttl = 86400
+
+ soa1 := new(SOA)
+ soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0}
+ soa1.Ns = "open.nlnetlabs.nl."
+ soa1.Mbox = "miekg.atoom.net."
+ soa1.Serial = 1293945905
+ soa1.Refresh = 14400
+ soa1.Retry = 3600
+ soa1.Expire = 604800
+ soa1.Minttl = 86400
+
+ srv := new(SRV)
+ srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0}
+ srv.Port = 1000
+ srv.Weight = 800
+ srv.Target = "web1.miek.nl."
+
+ hinfo := &HINFO{
+ Hdr: RR_Header{
+ Name: "miek.nl.",
+ Rrtype: TypeHINFO,
+ Class: ClassINET,
+ Ttl: 3789,
+ },
+ Cpu: "X",
+ Os: "Y",
+ }
+
+ // With this key
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ privkey, _ := key.Generate(512)
+
+ // Fill in the values of the Sig, before signing
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.TypeCovered = soa.Hdr.Rrtype
+ sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3
+ sig.OrigTtl = soa.Hdr.Ttl
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.KeyTag = key.KeyTag() // Get the keyfrom the Key
+ sig.SignerName = key.Hdr.Name
+ sig.Algorithm = RSASHA256
+
+ for _, r := range []RR{soa, soa1, srv, hinfo} {
+ if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{r}); err != nil {
+ t.Error("failure to sign the record:", err)
+ continue
+ }
+ if err := sig.Verify(key, []RR{r}); err != nil {
+ t.Error("failure to validate")
+ continue
+ }
+ t.Logf("validated: %s", r.Header().Name)
+ }
+}
+
+func Test65534(t *testing.T) {
+ t6 := new(RFC3597)
+ t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0}
+ t6.Rdata = "505D870001"
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ privkey, _ := key.Generate(1024)
+
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.TypeCovered = t6.Hdr.Rrtype
+ sig.Labels = uint8(CountLabel(t6.Hdr.Name))
+ sig.OrigTtl = t6.Hdr.Ttl
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.KeyTag = key.KeyTag()
+ sig.SignerName = key.Hdr.Name
+ sig.Algorithm = RSASHA256
+ if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{t6}); err != nil {
+ t.Error(err)
+ t.Error("failure to sign the TYPE65534 record")
+ }
+ if err := sig.Verify(key, []RR{t6}); err != nil {
+ t.Error(err)
+ t.Error("failure to validate")
+ } else {
+ t.Logf("validated: %s", t6.Header().Name)
+ }
+}
+
+func TestDnskey(t *testing.T) {
+ pubkey, err := ReadRR(strings.NewReader(`
+miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b}
+`), "Kmiek.nl.+010+05240.key")
+ if err != nil {
+ t.Fatal(err)
+ }
+ privStr := `Private-key-format: v1.3
+Algorithm: 10 (RSASHA512)
+Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs=
+PublicExponent: AQAB
+PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE=
+Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ==
+Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw==
+Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ==
+Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw==
+Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ==
+`
+ privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr),
+ "Kmiek.nl.+010+05240.private")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" {
+ t.Error("pubkey is not what we've read")
+ }
+ if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr {
+ t.Error("privkey is not what we've read")
+ t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey))
+ }
+}
+
+func TestTag(t *testing.T) {
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 3600
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
+
+ tag := key.KeyTag()
+ if tag != 12051 {
+ t.Errorf("wrong key tag: %d for key %v", tag, key)
+ }
+}
+
+func TestKeyRSA(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 3600
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ priv, _ := key.Generate(2048)
+
+ soa := new(SOA)
+ soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
+ soa.Ns = "open.nlnetlabs.nl."
+ soa.Mbox = "miekg.atoom.net."
+ soa.Serial = 1293945905
+ soa.Refresh = 14400
+ soa.Retry = 3600
+ soa.Expire = 604800
+ soa.Minttl = 86400
+
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.TypeCovered = TypeSOA
+ sig.Algorithm = RSASHA256
+ sig.Labels = 2
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.OrigTtl = soa.Hdr.Ttl
+ sig.KeyTag = key.KeyTag()
+ sig.SignerName = key.Hdr.Name
+
+ if err := sig.Sign(priv.(*rsa.PrivateKey), []RR{soa}); err != nil {
+ t.Error("failed to sign")
+ return
+ }
+ if err := sig.Verify(key, []RR{soa}); err != nil {
+ t.Error("failed to verify")
+ }
+}
+
+func TestKeyToDS(t *testing.T) {
+ key := new(DNSKEY)
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 3600
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = RSASHA256
+ key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
+
+ ds := key.ToDS(SHA1)
+ if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" {
+ t.Errorf("wrong DS digest for SHA1\n%v", ds)
+ }
+}
+
+func TestSignRSA(t *testing.T) {
+ pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ"
+
+ priv := `Private-key-format: v1.3
+Algorithm: 5 (RSASHA1)
+Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k=
+PublicExponent: AQAB
+PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk=
+Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw==
+Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw==
+Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ==
+Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw==
+Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw==
+Created: 20110302104537
+Publish: 20110302104537
+Activate: 20110302104537`
+
+ xk, _ := NewRR(pub)
+ k := xk.(*DNSKEY)
+ p, err := k.NewPrivateKey(priv)
+ if err != nil {
+ t.Error(err)
+ }
+ switch priv := p.(type) {
+ case *rsa.PrivateKey:
+ if 65537 != priv.PublicKey.E {
+ t.Error("exponenent should be 65537")
+ }
+ default:
+ t.Errorf("we should have read an RSA key: %v", priv)
+ }
+ if k.KeyTag() != 37350 {
+ t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k)
+ }
+
+ soa := new(SOA)
+ soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
+ soa.Ns = "open.nlnetlabs.nl."
+ soa.Mbox = "miekg.atoom.net."
+ soa.Serial = 1293945905
+ soa.Refresh = 14400
+ soa.Retry = 3600
+ soa.Expire = 604800
+ soa.Minttl = 86400
+
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.KeyTag = k.KeyTag()
+ sig.SignerName = k.Hdr.Name
+ sig.Algorithm = k.Algorithm
+
+ sig.Sign(p.(*rsa.PrivateKey), []RR{soa})
+ if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" {
+ t.Errorf("signature is not correct: %v", sig)
+ }
+}
+
+func TestSignVerifyECDSA(t *testing.T) {
+ pub := `example.net. 3600 IN DNSKEY 257 3 14 (
+ xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1
+ w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8
+ /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )`
+ priv := `Private-key-format: v1.2
+Algorithm: 14 (ECDSAP384SHA384)
+PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
+
+ eckey, err := NewRR(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // TODO: Create separate test for this
+ ds := eckey.(*DNSKEY).ToDS(SHA384)
+ if ds.KeyTag != 10771 {
+ t.Fatal("wrong keytag on DS")
+ }
+ if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" {
+ t.Fatal("wrong DS Digest")
+ }
+ a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1")
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.Expiration, _ = StringToTime("20100909102025")
+ sig.Inception, _ = StringToTime("20100812102025")
+ sig.KeyTag = eckey.(*DNSKEY).KeyTag()
+ sig.SignerName = eckey.(*DNSKEY).Hdr.Name
+ sig.Algorithm = eckey.(*DNSKEY).Algorithm
+
+ if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{a}) != nil {
+ t.Fatal("failure to sign the record")
+ }
+
+ if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil {
+ t.Fatalf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
+ eckey.(*DNSKEY).String(),
+ a.String(),
+ sig.String(),
+ eckey.(*DNSKEY).PrivateKeyString(privkey),
+ err,
+ )
+ }
+}
+
+func TestSignVerifyECDSA2(t *testing.T) {
+ srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv := srv1.(*SRV)
+
+ // With this key
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = ECDSAP256SHA256
+ privkey, err := key.Generate(256)
+ if err != nil {
+ t.Fatal("failure to generate key")
+ }
+
+ // Fill in the values of the Sig, before signing
+ sig := new(RRSIG)
+ sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
+ sig.TypeCovered = srv.Hdr.Rrtype
+ sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3
+ sig.OrigTtl = srv.Hdr.Ttl
+ sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
+ sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05"
+ sig.KeyTag = key.KeyTag() // Get the keyfrom the Key
+ sig.SignerName = key.Hdr.Name
+ sig.Algorithm = ECDSAP256SHA256
+
+ if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil {
+ t.Fatal("failure to sign the record")
+ }
+
+ err = sig.Verify(key, []RR{srv})
+ if err != nil {
+ t.Logf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
+ key.String(),
+ srv.String(),
+ sig.String(),
+ key.PrivateKeyString(privkey),
+ err,
+ )
+ }
+}
+
+// Here the test vectors from the relevant RFCs are checked.
+// rfc6605 6.1
+func TestRFC6605P256(t *testing.T) {
+ exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 (
+ GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb
+ krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )`
+ exPriv := `Private-key-format: v1.2
+Algorithm: 13 (ECDSAP256SHA256)
+PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
+ rrDNSKEY, err := NewRR(exDNSKEY)
+ if err != nil {
+ t.Fatal(err)
+ }
+ priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exDS := `example.net. 3600 IN DS 55648 13 2 (
+ b4c8c1fe2e7477127b27115656ad6256f424625bf5c1
+ e2770ce6d6e37df61d17 )`
+ rrDS, err := NewRR(exDS)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256)
+ if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
+ t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
+ }
+
+ exA := `www.example.net. 3600 IN A 192.0.2.1`
+ exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 (
+ 20100909100439 20100812100439 55648 example.net.
+ qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA
+ yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )`
+ rrA, err := NewRR(exA)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rrRRSIG, err := NewRR(exRRSIG)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
+ t.Errorf("failure to validate the spec RRSIG: %v", err)
+ }
+
+ ourRRSIG := &RRSIG{
+ Hdr: RR_Header{
+ Ttl: rrA.Header().Ttl,
+ },
+ KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(),
+ SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name,
+ Algorithm: rrDNSKEY.(*DNSKEY).Algorithm,
+ }
+ ourRRSIG.Expiration, _ = StringToTime("20100909100439")
+ ourRRSIG.Inception, _ = StringToTime("20100812100439")
+ err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
+ t.Errorf("failure to validate our RRSIG: %v", err)
+ }
+
+ // Signatures are randomized
+ rrRRSIG.(*RRSIG).Signature = ""
+ ourRRSIG.Signature = ""
+ if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
+ t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
+ }
+}
+
+// rfc6605 6.2
+func TestRFC6605P384(t *testing.T) {
+ exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 (
+ xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1
+ w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8
+ /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )`
+ exPriv := `Private-key-format: v1.2
+Algorithm: 14 (ECDSAP384SHA384)
+PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
+ rrDNSKEY, err := NewRR(exDNSKEY)
+ if err != nil {
+ t.Fatal(err)
+ }
+ priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exDS := `example.net. 3600 IN DS 10771 14 4 (
+ 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8
+ d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94
+ 6df983d6 )`
+ rrDS, err := NewRR(exDS)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384)
+ if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
+ t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
+ }
+
+ exA := `www.example.net. 3600 IN A 192.0.2.1`
+ exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 (
+ 20100909102025 20100812102025 10771 example.net.
+ /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP
+ 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz
+ WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )`
+ rrA, err := NewRR(exA)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rrRRSIG, err := NewRR(exRRSIG)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
+ t.Errorf("failure to validate the spec RRSIG: %v", err)
+ }
+
+ ourRRSIG := &RRSIG{
+ Hdr: RR_Header{
+ Ttl: rrA.Header().Ttl,
+ },
+ KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(),
+ SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name,
+ Algorithm: rrDNSKEY.(*DNSKEY).Algorithm,
+ }
+ ourRRSIG.Expiration, _ = StringToTime("20100909102025")
+ ourRRSIG.Inception, _ = StringToTime("20100812102025")
+ err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
+ t.Errorf("failure to validate our RRSIG: %v", err)
+ }
+
+ // Signatures are randomized
+ rrRRSIG.(*RRSIG).Signature = ""
+ ourRRSIG.Signature = ""
+ if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
+ t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
+ }
+}
+
+func TestInvalidRRSet(t *testing.T) {
+ goodRecords := make([]RR, 2)
+ goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ goodRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}}
+
+ // Generate key
+ keyname := "cloudflare.com."
+ key := &DNSKEY{
+ Hdr: RR_Header{Name: keyname, Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 0},
+ Algorithm: ECDSAP256SHA256,
+ Flags: ZONE,
+ Protocol: 3,
+ }
+ privatekey, err := key.Generate(256)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // Need to fill in: Inception, Expiration, KeyTag, SignerName and Algorithm
+ curTime := time.Now()
+ signature := &RRSIG{
+ Inception: uint32(curTime.Unix()),
+ Expiration: uint32(curTime.Add(time.Hour).Unix()),
+ KeyTag: key.KeyTag(),
+ SignerName: keyname,
+ Algorithm: ECDSAP256SHA256,
+ }
+
+ // Inconsistent name between records
+ badRecords := make([]RR, 2)
+ badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ badRecords[1] = &TXT{Hdr: RR_Header{Name: "nama.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}}
+
+ if IsRRset(badRecords) {
+ t.Fatal("Record set with inconsistent names considered valid")
+ }
+
+ badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ badRecords[1] = &A{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeA, Class: ClassINET, Ttl: 0}}
+
+ if IsRRset(badRecords) {
+ t.Fatal("Record set with inconsistent record types considered valid")
+ }
+
+ badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ badRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassCHAOS, Ttl: 0}, Txt: []string{"_o/"}}
+
+ if IsRRset(badRecords) {
+ t.Fatal("Record set with inconsistent record class considered valid")
+ }
+
+ // Sign the good record set and then make sure verification fails on the bad record set
+ if err := signature.Sign(privatekey.(crypto.Signer), goodRecords); err != nil {
+ t.Fatal("Signing good records failed")
+ }
+
+ if err := signature.Verify(key, badRecords); err != ErrRRset {
+ t.Fatal("Verification did not return ErrRRset with inconsistent records")
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go
new file mode 100644
index 000000000..9ed03f296
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnsutil/util.go
@@ -0,0 +1,79 @@
+// Package dnsutil contains higher-level methods useful with the dns
+// package. While package dns implements the DNS protocols itself,
+// these functions are related but not directly required for protocol
+// processing. They are often useful in preparing input/output of the
+// functions in package dns.
+package dnsutil
+
+import (
+ "strings"
+
+ "github.com/miekg/dns"
+)
+
+// AddDomain adds origin to s if s is not already a FQDN.
+// Note that the result may not be a FQDN. If origin does not end
+// with a ".", the result won't either.
+// This implements the zonefile convention (specified in RFC 1035,
+// Section "5.1. Format") that "@" represents the
+// apex (bare) domain. i.e. AddOrigin("@", "foo.com.") returns "foo.com.".
+func AddOrigin(s, origin string) string {
+ // ("foo.", "origin.") -> "foo." (already a FQDN)
+ // ("foo", "origin.") -> "foo.origin."
+ // ("foo"), "origin" -> "foo.origin"
+ // ("@", "origin.") -> "origin." (@ represents the apex (bare) domain)
+ // ("", "origin.") -> "origin." (not obvious)
+ // ("foo", "") -> "foo" (not obvious)
+
+ if dns.IsFqdn(s) {
+ return s // s is already a FQDN, no need to mess with it.
+ }
+ if len(origin) == 0 {
+ return s // Nothing to append.
+ }
+ if s == "@" || len(s) == 0 {
+ return origin // Expand apex.
+ }
+
+ if origin == "." {
+ return s + origin // AddOrigin(s, ".") is an expensive way to add a ".".
+ }
+
+ return s + "." + origin // The simple case.
+}
+
+// TrimDomainName trims origin from s if s is a subdomain.
+// This function will never return "", but returns "@" instead (@ represents the apex (bare) domain).
+func TrimDomainName(s, origin string) string {
+ // An apex (bare) domain is always returned as "@".
+ // If the return value ends in a ".", the domain was not the suffix.
+ // origin can end in "." or not. Either way the results should be the same.
+
+ if len(s) == 0 {
+ return "@" // Return the apex (@) rather than "".
+ }
+ // Someone is using TrimDomainName(s, ".") to remove a dot if it exists.
+ if origin == "." {
+ return strings.TrimSuffix(s, origin)
+ }
+
+ // Dude, you aren't even if the right subdomain!
+ if !dns.IsSubDomain(origin, s) {
+ return s
+ }
+
+ slabels := dns.Split(s)
+ olabels := dns.Split(origin)
+ m := dns.CompareDomainName(s, origin)
+ if len(olabels) == m {
+ if len(olabels) == len(slabels) {
+ return "@" // origin == s
+ }
+ if (s[0] == '.') && (len(slabels) == (len(olabels) + 1)) {
+ return "@" // TrimDomainName(".foo.", "foo.")
+ }
+ }
+
+ // Return the first (len-m) labels:
+ return s[:slabels[len(slabels)-m]-1]
+}
diff --git a/vendor/github.com/miekg/dns/dnsutil/util_test.go b/vendor/github.com/miekg/dns/dnsutil/util_test.go
new file mode 100644
index 000000000..0f1ecec8e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnsutil/util_test.go
@@ -0,0 +1,130 @@
+package dnsutil
+
+import "testing"
+
+func TestAddOrigin(t *testing.T) {
+ var tests = []struct{ e1, e2, expected string }{
+ {"@", "example.com", "example.com"},
+ {"foo", "example.com", "foo.example.com"},
+ {"foo.", "example.com", "foo."},
+ {"@", "example.com.", "example.com."},
+ {"foo", "example.com.", "foo.example.com."},
+ {"foo.", "example.com.", "foo."},
+ // Oddball tests:
+ // In general origin should not be "" or "." but at least
+ // these tests verify we don't crash and will keep results
+ // from changing unexpectedly.
+ {"*.", "", "*."},
+ {"@", "", "@"},
+ {"foobar", "", "foobar"},
+ {"foobar.", "", "foobar."},
+ {"*.", ".", "*."},
+ {"@", ".", "."},
+ {"foobar", ".", "foobar."},
+ {"foobar.", ".", "foobar."},
+ }
+ for _, test := range tests {
+ actual := AddOrigin(test.e1, test.e2)
+ if test.expected != actual {
+ t.Errorf("AddOrigin(%#v, %#v) expected %#v, go %#v\n", test.e1, test.e2, test.expected, actual)
+ }
+ }
+}
+
+func TestTrimDomainName(t *testing.T) {
+
+ // Basic tests.
+ // Try trimming "example.com" and "example.com." from typical use cases.
+ var tests_examplecom = []struct{ experiment, expected string }{
+ {"foo.example.com", "foo"},
+ {"foo.example.com.", "foo"},
+ {".foo.example.com", ".foo"},
+ {".foo.example.com.", ".foo"},
+ {"*.example.com", "*"},
+ {"example.com", "@"},
+ {"example.com.", "@"},
+ {"com.", "com."},
+ {"foo.", "foo."},
+ {"serverfault.com.", "serverfault.com."},
+ {"serverfault.com", "serverfault.com"},
+ {".foo.ronco.com", ".foo.ronco.com"},
+ {".foo.ronco.com.", ".foo.ronco.com."},
+ }
+ for _, dom := range []string{"example.com", "example.com."} {
+ for i, test := range tests_examplecom {
+ actual := TrimDomainName(test.experiment, dom)
+ if test.expected != actual {
+ t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual)
+ }
+ }
+ }
+
+ // Paranoid tests.
+ // These test shouldn't be needed but I was weary of off-by-one errors.
+ // In theory, these can't happen because there are no single-letter TLDs,
+ // but it is good to exercize the code this way.
+ var tests = []struct{ experiment, expected string }{
+ {"", "@"},
+ {".", "."},
+ {"a.b.c.d.e.f.", "a.b.c.d.e"},
+ {"b.c.d.e.f.", "b.c.d.e"},
+ {"c.d.e.f.", "c.d.e"},
+ {"d.e.f.", "d.e"},
+ {"e.f.", "e"},
+ {"f.", "@"},
+ {".a.b.c.d.e.f.", ".a.b.c.d.e"},
+ {".b.c.d.e.f.", ".b.c.d.e"},
+ {".c.d.e.f.", ".c.d.e"},
+ {".d.e.f.", ".d.e"},
+ {".e.f.", ".e"},
+ {".f.", "@"},
+ {"a.b.c.d.e.f", "a.b.c.d.e"},
+ {"a.b.c.d.e.", "a.b.c.d.e."},
+ {"a.b.c.d.e", "a.b.c.d.e"},
+ {"a.b.c.d.", "a.b.c.d."},
+ {"a.b.c.d", "a.b.c.d"},
+ {"a.b.c.", "a.b.c."},
+ {"a.b.c", "a.b.c"},
+ {"a.b.", "a.b."},
+ {"a.b", "a.b"},
+ {"a.", "a."},
+ {"a", "a"},
+ {".a.b.c.d.e.f", ".a.b.c.d.e"},
+ {".a.b.c.d.e.", ".a.b.c.d.e."},
+ {".a.b.c.d.e", ".a.b.c.d.e"},
+ {".a.b.c.d.", ".a.b.c.d."},
+ {".a.b.c.d", ".a.b.c.d"},
+ {".a.b.c.", ".a.b.c."},
+ {".a.b.c", ".a.b.c"},
+ {".a.b.", ".a.b."},
+ {".a.b", ".a.b"},
+ {".a.", ".a."},
+ {".a", ".a"},
+ }
+ for _, dom := range []string{"f", "f."} {
+ for i, test := range tests {
+ actual := TrimDomainName(test.experiment, dom)
+ if test.expected != actual {
+ t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual)
+ }
+ }
+ }
+
+ // Test cases for bugs found in the wild.
+ // These test cases provide both origin, s, and the expected result.
+ // If you find a bug in the while, this is probably the easiest place
+ // to add it as a test case.
+ var tests_wild = []struct{ e1, e2, expected string }{
+ {"mathoverflow.net.", ".", "mathoverflow.net"},
+ {"mathoverflow.net", ".", "mathoverflow.net"},
+ {"", ".", "@"},
+ {"@", ".", "@"},
+ }
+ for i, test := range tests_wild {
+ actual := TrimDomainName(test.e1, test.e2)
+ if test.expected != actual {
+ t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.e1, test.e2, test.expected, actual)
+ }
+ }
+
+}
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
new file mode 100644
index 000000000..f3555e433
--- /dev/null
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -0,0 +1,251 @@
+/*
+Package dns implements a full featured interface to the Domain Name System.
+Server- and client-side programming is supported.
+The package allows complete control over what is send out to the DNS. The package
+API follows the less-is-more principle, by presenting a small, clean interface.
+
+The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
+TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
+Note that domain names MUST be fully qualified, before sending them, unqualified
+names in a message will result in a packing failure.
+
+Resource records are native types. They are not stored in wire format.
+Basic usage pattern for creating a new resource record:
+
+ r := new(dns.MX)
+ r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
+ Class: dns.ClassINET, Ttl: 3600}
+ r.Preference = 10
+ r.Mx = "mx.miek.nl."
+
+Or directly from a string:
+
+ mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
+
+Or when the default TTL (3600) and class (IN) suit you:
+
+ mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
+
+Or even:
+
+ mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
+
+In the DNS messages are exchanged, these messages contain resource
+records (sets). Use pattern for creating a message:
+
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+
+Or when not certain if the domain name is fully qualified:
+
+ m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
+
+The message m is now a message with the question section set to ask
+the MX records for the miek.nl. zone.
+
+The following is slightly more verbose, but more flexible:
+
+ m1 := new(dns.Msg)
+ m1.Id = dns.Id()
+ m1.RecursionDesired = true
+ m1.Question = make([]dns.Question, 1)
+ m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
+
+After creating a message it can be send.
+Basic use pattern for synchronous querying the DNS at a
+server configured on 127.0.0.1 and port 53:
+
+ c := new(dns.Client)
+ in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
+
+Suppressing multiple outstanding queries (with the same question, type and
+class) is as easy as setting:
+
+ c.SingleInflight = true
+
+If these "advanced" features are not needed, a simple UDP query can be send,
+with:
+
+ in, err := dns.Exchange(m1, "127.0.0.1:53")
+
+When this functions returns you will get dns message. A dns message consists
+out of four sections.
+The question section: in.Question, the answer section: in.Answer,
+the authority section: in.Ns and the additional section: in.Extra.
+
+Each of these sections (except the Question section) contain a []RR. Basic
+use pattern for accessing the rdata of a TXT RR as the first RR in
+the Answer section:
+
+ if t, ok := in.Answer[0].(*dns.TXT); ok {
+ // do something with t.Txt
+ }
+
+Domain Name and TXT Character String Representations
+
+Both domain names and TXT character strings are converted to presentation
+form both when unpacked and when converted to strings.
+
+For TXT character strings, tabs, carriage returns and line feeds will be
+converted to \t, \r and \n respectively. Back slashes and quotations marks
+will be escaped. Bytes below 32 and above 127 will be converted to \DDD
+form.
+
+For domain names, in addition to the above rules brackets, periods,
+spaces, semicolons and the at symbol are escaped.
+
+DNSSEC
+
+DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
+uses public key cryptography to sign resource records. The
+public keys are stored in DNSKEY records and the signatures in RRSIG records.
+
+Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
+to a request.
+
+ m := new(dns.Msg)
+ m.SetEdns0(4096, true)
+
+Signature generation, signature verification and key generation are all supported.
+
+DYNAMIC UPDATES
+
+Dynamic updates reuses the DNS message format, but renames three of
+the sections. Question is Zone, Answer is Prerequisite, Authority is
+Update, only the Additional is not renamed. See RFC 2136 for the gory details.
+
+You can set a rather complex set of rules for the existence of absence of
+certain resource records or names in a zone to specify if resource records
+should be added or removed. The table from RFC 2136 supplemented with the Go
+DNS function shows which functions exist to specify the prerequisites.
+
+ 3.2.4 - Table Of Metavalues Used In Prerequisite Section
+
+ CLASS TYPE RDATA Meaning Function
+ --------------------------------------------------------------
+ ANY ANY empty Name is in use dns.NameUsed
+ ANY rrset empty RRset exists (value indep) dns.RRsetUsed
+ NONE ANY empty Name is not in use dns.NameNotUsed
+ NONE rrset empty RRset does not exist dns.RRsetNotUsed
+ zone rrset rr RRset exists (value dep) dns.Used
+
+The prerequisite section can also be left empty.
+If you have decided on the prerequisites you can tell what RRs should
+be added or deleted. The next table shows the options you have and
+what functions to call.
+
+ 3.4.2.6 - Table Of Metavalues Used In Update Section
+
+ CLASS TYPE RDATA Meaning Function
+ ---------------------------------------------------------------
+ ANY ANY empty Delete all RRsets from name dns.RemoveName
+ ANY rrset empty Delete an RRset dns.RemoveRRset
+ NONE rrset rr Delete an RR from RRset dns.Remove
+ zone rrset rr Add to an RRset dns.Insert
+
+TRANSACTION SIGNATURE
+
+An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
+The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
+
+Basic use pattern when querying with a TSIG name "axfr." (note that these key names
+must be fully qualified - as they are domain names) and the base64 secret
+"so6ZGir4GPAqINNh9U5c3A==":
+
+ c := new(dns.Client)
+ c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ ...
+ // When sending the TSIG RR is calculated and filled in before sending
+
+When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
+TSIG, this is the basic use pattern. In this example we request an AXFR for
+miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
+and using the server 176.58.119.54:
+
+ t := new(dns.Transfer)
+ m := new(dns.Msg)
+ t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m.SetAxfr("miek.nl.")
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ c, err := t.In(m, "176.58.119.54:53")
+ for r := range c { ... }
+
+You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
+If something is not correct an error is returned.
+
+Basic use pattern validating and replying to a message that has TSIG set.
+
+ server := &dns.Server{Addr: ":53", Net: "udp"}
+ server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ go server.ListenAndServe()
+ dns.HandleFunc(".", handleRequest)
+
+ func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ if r.IsTsig() != nil {
+ if w.TsigStatus() == nil {
+ // *Msg r has an TSIG record and it was validated
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ } else {
+ // *Msg r has an TSIG records and it was not valided
+ }
+ }
+ w.WriteMsg(m)
+ }
+
+PRIVATE RRS
+
+RFC 6895 sets aside a range of type codes for private use. This range
+is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
+can be used, before requesting an official type code from IANA.
+
+see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
+information.
+
+EDNS0
+
+EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
+by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
+abused.
+Basic use pattern for creating an (empty) OPT RR:
+
+ o := new(dns.OPT)
+ o.Hdr.Name = "." // MUST be the root zone, per definition.
+ o.Hdr.Rrtype = dns.TypeOPT
+
+The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
+interfaces. Currently only a few have been standardized: EDNS0_NSID
+(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
+that these options may be combined in an OPT RR.
+Basic use pattern for a server to check if (and which) options are set:
+
+ // o is a dns.OPT
+ for _, s := range o.Option {
+ switch e := s.(type) {
+ case *dns.EDNS0_NSID:
+ // do stuff with e.Nsid
+ case *dns.EDNS0_SUBNET:
+ // access e.Family, e.Address, etc.
+ }
+ }
+
+SIG(0)
+
+From RFC 2931:
+
+ SIG(0) provides protection for DNS transactions and requests ....
+ ... protection for glue records, DNS requests, protection for message headers
+ on requests and responses, and protection of the overall integrity of a response.
+
+It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
+secret approach in TSIG.
+Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
+RSASHA512.
+
+Signing subsequent messages in multi-message sessions is not implemented.
+*/
+package dns
diff --git a/vendor/github.com/miekg/dns/dyn_test.go b/vendor/github.com/miekg/dns/dyn_test.go
new file mode 100644
index 000000000..09986a5e4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dyn_test.go
@@ -0,0 +1,3 @@
+package dns
+
+// Find better solution
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
new file mode 100644
index 000000000..7a58aa9b1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -0,0 +1,532 @@
+package dns
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "net"
+ "strconv"
+)
+
+// EDNS0 Option codes.
+const (
+ EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+ EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
+ EDNS0NSID = 0x3 // nsid (RFC5001)
+ EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
+ EDNS0DHU = 0x6 // DS Hash Understood
+ EDNS0N3U = 0x7 // NSEC3 Hash Understood
+ EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
+ EDNS0EXPIRE = 0x9 // EDNS0 expire
+ EDNS0COOKIE = 0xa // EDNS0 Cookie
+ EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
+ EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
+ EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
+ _DO = 1 << 15 // dnssec ok
+)
+
+// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
+// See RFC 6891.
+type OPT struct {
+ Hdr RR_Header
+ Option []EDNS0 `dns:"opt"`
+}
+
+func (rr *OPT) String() string {
+ s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
+ if rr.Do() {
+ s += "flags: do; "
+ } else {
+ s += "flags: ; "
+ }
+ s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
+
+ for _, o := range rr.Option {
+ switch o.(type) {
+ case *EDNS0_NSID:
+ s += "\n; NSID: " + o.String()
+ h, e := o.pack()
+ var r string
+ if e == nil {
+ for _, c := range h {
+ r += "(" + string(c) + ")"
+ }
+ s += " " + r
+ }
+ case *EDNS0_SUBNET:
+ s += "\n; SUBNET: " + o.String()
+ if o.(*EDNS0_SUBNET).DraftOption {
+ s += " (draft)"
+ }
+ case *EDNS0_COOKIE:
+ s += "\n; COOKIE: " + o.String()
+ case *EDNS0_UL:
+ s += "\n; UPDATE LEASE: " + o.String()
+ case *EDNS0_LLQ:
+ s += "\n; LONG LIVED QUERIES: " + o.String()
+ case *EDNS0_DAU:
+ s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
+ case *EDNS0_DHU:
+ s += "\n; DS HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_N3U:
+ s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_LOCAL:
+ s += "\n; LOCAL OPT: " + o.String()
+ }
+ }
+ return s
+}
+
+func (rr *OPT) len() int {
+ l := rr.Hdr.len()
+ for i := 0; i < len(rr.Option); i++ {
+ l += 4 // Account for 2-byte option code and 2-byte option length.
+ lo, _ := rr.Option[i].pack()
+ l += len(lo)
+ }
+ return l
+}
+
+// return the old value -> delete SetVersion?
+
+// Version returns the EDNS version used. Only zero is defined.
+func (rr *OPT) Version() uint8 {
+ return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
+}
+
+// SetVersion sets the version of EDNS. This is usually zero.
+func (rr *OPT) SetVersion(v uint8) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
+}
+
+// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
+func (rr *OPT) ExtendedRcode() int {
+ return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
+}
+
+// SetExtendedRcode sets the EDNS extended RCODE field.
+func (rr *OPT) SetExtendedRcode(v uint8) {
+ if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
+ return
+ }
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
+}
+
+// UDPSize returns the UDP buffer size.
+func (rr *OPT) UDPSize() uint16 {
+ return rr.Hdr.Class
+}
+
+// SetUDPSize sets the UDP buffer size.
+func (rr *OPT) SetUDPSize(size uint16) {
+ rr.Hdr.Class = size
+}
+
+// Do returns the value of the DO (DNSSEC OK) bit.
+func (rr *OPT) Do() bool {
+ return rr.Hdr.Ttl&_DO == _DO
+}
+
+// SetDo sets the DO (DNSSEC OK) bit.
+func (rr *OPT) SetDo() {
+ rr.Hdr.Ttl |= _DO
+}
+
+// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
+type EDNS0 interface {
+ // Option returns the option code for the option.
+ Option() uint16
+ // pack returns the bytes of the option data.
+ pack() ([]byte, error)
+ // unpack sets the data as found in the buffer. Is also sets
+ // the length of the slice as the length of the option data.
+ unpack([]byte) error
+ // String returns the string representation of the option.
+ String() string
+}
+
+// The nsid EDNS0 option is used to retrieve a nameserver
+// identifier. When sending a request Nsid must be set to the empty string
+// The identifier is an opaque string encoded as hex.
+// Basic use pattern for creating an nsid option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_NSID)
+// e.Code = dns.EDNS0NSID
+// e.Nsid = "AA"
+// o.Option = append(o.Option, e)
+type EDNS0_NSID struct {
+ Code uint16 // Always EDNS0NSID
+ Nsid string // This string needs to be hex encoded
+}
+
+func (e *EDNS0_NSID) pack() ([]byte, error) {
+ h, err := hex.DecodeString(e.Nsid)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID }
+func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
+func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
+
+// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
+// an idea of where the client lives. It can then give back a different
+// answer depending on the location or network topology.
+// Basic use pattern for creating an subnet option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_SUBNET)
+// e.Code = dns.EDNS0SUBNET
+// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
+// e.NetMask = 32 // 32 for IPV4, 128 for IPv6
+// e.SourceScope = 0
+// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
+// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
+// o.Option = append(o.Option, e)
+//
+// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic
+// for which netmask applies to the address. This code will parse all the
+// available bits when unpacking (up to optlen). When packing it will apply
+// SourceNetmask. If you need more advanced logic, patches welcome and good luck.
+type EDNS0_SUBNET struct {
+ Code uint16 // Always EDNS0SUBNET
+ Family uint16 // 1 for IP, 2 for IP6
+ SourceNetmask uint8
+ SourceScope uint8
+ Address net.IP
+ DraftOption bool // Set to true if using the old (0x50fa) option code
+}
+
+func (e *EDNS0_SUBNET) Option() uint16 {
+ if e.DraftOption {
+ return EDNS0SUBNETDRAFT
+ }
+ return EDNS0SUBNET
+}
+
+func (e *EDNS0_SUBNET) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint16(b[0:], e.Family)
+ b[2] = e.SourceNetmask
+ b[3] = e.SourceScope
+ switch e.Family {
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address.To4()) != net.IPv4len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address) != net.IPv6len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ default:
+ return nil, errors.New("dns: bad address family")
+ }
+ return b, nil
+}
+
+func (e *EDNS0_SUBNET) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Family = binary.BigEndian.Uint16(b)
+ e.SourceNetmask = b[2]
+ e.SourceScope = b[3]
+ switch e.Family {
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make([]byte, net.IPv4len)
+ for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
+ addr[i] = b[4+i]
+ }
+ e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make([]byte, net.IPv6len)
+ for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
+ addr[i] = b[4+i]
+ }
+ e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
+ addr[11], addr[12], addr[13], addr[14], addr[15]}
+ default:
+ return errors.New("dns: bad address family")
+ }
+ return nil
+}
+
+func (e *EDNS0_SUBNET) String() (s string) {
+ if e.Address == nil {
+ s = "<nil>"
+ } else if e.Address.To4() != nil {
+ s = e.Address.String()
+ } else {
+ s = "[" + e.Address.String() + "]"
+ }
+ s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
+ return
+}
+
+// The Cookie EDNS0 option
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_COOKIE)
+// e.Code = dns.EDNS0COOKIE
+// e.Cookie = "24a5ac.."
+// o.Option = append(o.Option, e)
+//
+// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
+// always 8 bytes. It may then optionally be followed by the server cookie. The server
+// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
+//
+// cCookie := o.Cookie[:16]
+// sCookie := o.Cookie[16:]
+//
+// There is no guarantee that the Cookie string has a specific length.
+type EDNS0_COOKIE struct {
+ Code uint16 // Always EDNS0COOKIE
+ Cookie string // Hex-encoded cookie data
+}
+
+func (e *EDNS0_COOKIE) pack() ([]byte, error) {
+ h, err := hex.DecodeString(e.Cookie)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
+func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
+func (e *EDNS0_COOKIE) String() string { return e.Cookie }
+
+// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
+// an expiration on an update RR. This is helpful for clients that cannot clean
+// up after themselves. This is a draft RFC and more information can be found at
+// http://files.dns-sd.org/draft-sekar-dns-ul.txt
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_UL)
+// e.Code = dns.EDNS0UL
+// e.Lease = 120 // in seconds
+// o.Option = append(o.Option, e)
+type EDNS0_UL struct {
+ Code uint16 // Always EDNS0UL
+ Lease uint32
+}
+
+func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
+func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
+
+// Copied: http://golang.org/src/pkg/net/dnsmsg.go
+func (e *EDNS0_UL) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint32(b, e.Lease)
+ return b, nil
+}
+
+func (e *EDNS0_UL) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Lease = binary.BigEndian.Uint32(b)
+ return nil
+}
+
+// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+// Implemented for completeness, as the EDNS0 type code is assigned.
+type EDNS0_LLQ struct {
+ Code uint16 // Always EDNS0LLQ
+ Version uint16
+ Opcode uint16
+ Error uint16
+ Id uint64
+ LeaseLife uint32
+}
+
+func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
+
+func (e *EDNS0_LLQ) pack() ([]byte, error) {
+ b := make([]byte, 18)
+ binary.BigEndian.PutUint16(b[0:], e.Version)
+ binary.BigEndian.PutUint16(b[2:], e.Opcode)
+ binary.BigEndian.PutUint16(b[4:], e.Error)
+ binary.BigEndian.PutUint64(b[6:], e.Id)
+ binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
+ return b, nil
+}
+
+func (e *EDNS0_LLQ) unpack(b []byte) error {
+ if len(b) < 18 {
+ return ErrBuf
+ }
+ e.Version = binary.BigEndian.Uint16(b[0:])
+ e.Opcode = binary.BigEndian.Uint16(b[2:])
+ e.Error = binary.BigEndian.Uint16(b[4:])
+ e.Id = binary.BigEndian.Uint64(b[6:])
+ e.LeaseLife = binary.BigEndian.Uint32(b[14:])
+ return nil
+}
+
+func (e *EDNS0_LLQ) String() string {
+ s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
+ " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
+ " " + strconv.FormatUint(uint64(e.LeaseLife), 10)
+ return s
+}
+
+type EDNS0_DAU struct {
+ Code uint16 // Always EDNS0DAU
+ AlgCode []uint8
+}
+
+func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
+func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DAU) String() string {
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_DHU struct {
+ Code uint16 // Always EDNS0DHU
+ AlgCode []uint8
+}
+
+func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
+func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DHU) String() string {
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := HashToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_N3U struct {
+ Code uint16 // Always EDNS0N3U
+ AlgCode []uint8
+}
+
+func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
+func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_N3U) String() string {
+ // Re-use the hash map
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := HashToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_EXPIRE struct {
+ Code uint16 // Always EDNS0EXPIRE
+ Expire uint32
+}
+
+func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
+func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
+
+func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ b[0] = byte(e.Expire >> 24)
+ b[1] = byte(e.Expire >> 16)
+ b[2] = byte(e.Expire >> 8)
+ b[3] = byte(e.Expire)
+ return b, nil
+}
+
+func (e *EDNS0_EXPIRE) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Expire = binary.BigEndian.Uint32(b)
+ return nil
+}
+
+// The EDNS0_LOCAL option is used for local/experimental purposes. The option
+// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
+// (RFC6891), although any unassigned code can actually be used. The content of
+// the option is made available in Data, unaltered.
+// Basic use pattern for creating a local option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_LOCAL)
+// e.Code = dns.EDNS0LOCALSTART
+// e.Data = []byte{72, 82, 74}
+// o.Option = append(o.Option, e)
+type EDNS0_LOCAL struct {
+ Code uint16
+ Data []byte
+}
+
+func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
+func (e *EDNS0_LOCAL) String() string {
+ return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
+}
+
+func (e *EDNS0_LOCAL) pack() ([]byte, error) {
+ b := make([]byte, len(e.Data))
+ copied := copy(b, e.Data)
+ if copied != len(e.Data) {
+ return nil, ErrBuf
+ }
+ return b, nil
+}
+
+func (e *EDNS0_LOCAL) unpack(b []byte) error {
+ e.Data = make([]byte, len(b))
+ copied := copy(e.Data, b)
+ if copied != len(b) {
+ return ErrBuf
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/miekg/dns/edns_test.go
new file mode 100644
index 000000000..5fd75abb4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/edns_test.go
@@ -0,0 +1,32 @@
+package dns
+
+import "testing"
+
+func TestOPTTtl(t *testing.T) {
+ e := &OPT{}
+ e.Hdr.Name = "."
+ e.Hdr.Rrtype = TypeOPT
+
+ if e.Do() {
+ t.Errorf("DO bit should be zero")
+ }
+
+ e.SetDo()
+ if !e.Do() {
+ t.Errorf("DO bit should be non-zero")
+ }
+
+ if e.Version() != 0 {
+ t.Errorf("version should be non-zero")
+ }
+
+ e.SetVersion(42)
+ if e.Version() != 42 {
+ t.Errorf("set 42, expected %d, got %d", 42, e.Version())
+ }
+
+ e.SetExtendedRcode(42)
+ if e.ExtendedRcode() != 42 {
+ t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode())
+ }
+}
diff --git a/vendor/github.com/miekg/dns/example_test.go b/vendor/github.com/miekg/dns/example_test.go
new file mode 100644
index 000000000..64c14962c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/example_test.go
@@ -0,0 +1,146 @@
+package dns_test
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "net"
+
+ "github.com/miekg/dns"
+)
+
+// Retrieve the MX records for miek.nl.
+func ExampleMX() {
+ config, _ := dns.ClientConfigFromFile("/etc/resolv.conf")
+ c := new(dns.Client)
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+ m.RecursionDesired = true
+ r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port)
+ if err != nil {
+ return
+ }
+ if r.Rcode != dns.RcodeSuccess {
+ return
+ }
+ for _, a := range r.Answer {
+ if mx, ok := a.(*dns.MX); ok {
+ fmt.Printf("%s\n", mx.String())
+ }
+ }
+}
+
+// Retrieve the DNSKEY records of a zone and convert them
+// to DS records for SHA1, SHA256 and SHA384.
+func ExampleDS() {
+ config, _ := dns.ClientConfigFromFile("/etc/resolv.conf")
+ c := new(dns.Client)
+ m := new(dns.Msg)
+ zone := "miek.nl"
+ m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY)
+ m.SetEdns0(4096, true)
+ r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port)
+ if err != nil {
+ return
+ }
+ if r.Rcode != dns.RcodeSuccess {
+ return
+ }
+ for _, k := range r.Answer {
+ if key, ok := k.(*dns.DNSKEY); ok {
+ for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} {
+ fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags)
+ }
+ }
+ }
+}
+
+const TypeAPAIR = 0x0F99
+
+type APAIR struct {
+ addr [2]net.IP
+}
+
+func NewAPAIR() dns.PrivateRdata { return new(APAIR) }
+
+func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() }
+func (rd *APAIR) Parse(txt []string) error {
+ if len(txt) != 2 {
+ return errors.New("two addresses required for APAIR")
+ }
+ for i, s := range txt {
+ ip := net.ParseIP(s)
+ if ip == nil {
+ return errors.New("invalid IP in APAIR text representation")
+ }
+ rd.addr[i] = ip
+ }
+ return nil
+}
+
+func (rd *APAIR) Pack(buf []byte) (int, error) {
+ b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...)
+ n := copy(buf, b)
+ if n != len(b) {
+ return n, dns.ErrBuf
+ }
+ return n, nil
+}
+
+func (rd *APAIR) Unpack(buf []byte) (int, error) {
+ ln := net.IPv4len * 2
+ if len(buf) != ln {
+ return 0, errors.New("invalid length of APAIR rdata")
+ }
+ cp := make([]byte, ln)
+ copy(cp, buf) // clone bytes to use them in IPs
+
+ rd.addr[0] = net.IP(cp[:3])
+ rd.addr[1] = net.IP(cp[4:])
+
+ return len(buf), nil
+}
+
+func (rd *APAIR) Copy(dest dns.PrivateRdata) error {
+ cp := make([]byte, rd.Len())
+ _, err := rd.Pack(cp)
+ if err != nil {
+ return err
+ }
+
+ d := dest.(*APAIR)
+ d.addr[0] = net.IP(cp[:3])
+ d.addr[1] = net.IP(cp[4:])
+ return nil
+}
+
+func (rd *APAIR) Len() int {
+ return net.IPv4len * 2
+}
+
+func ExamplePrivateHandle() {
+ dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR)
+ defer dns.PrivateHandleRemove(TypeAPAIR)
+
+ rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)")
+ if err != nil {
+ log.Fatal("could not parse APAIR record: ", err)
+ }
+ fmt.Println(rr)
+ // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5
+
+ m := new(dns.Msg)
+ m.Id = 12345
+ m.SetQuestion("miek.nl.", TypeAPAIR)
+ m.Answer = append(m.Answer, rr)
+
+ fmt.Println(m)
+ // ;; opcode: QUERY, status: NOERROR, id: 12345
+ // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
+ //
+ // ;; QUESTION SECTION:
+ // ;miek.nl. IN APAIR
+ //
+ // ;; ANSWER SECTION:
+ // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5
+}
diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go
new file mode 100644
index 000000000..3f5303c20
--- /dev/null
+++ b/vendor/github.com/miekg/dns/format.go
@@ -0,0 +1,87 @@
+package dns
+
+import (
+ "net"
+ "reflect"
+ "strconv"
+)
+
+// NumField returns the number of rdata fields r has.
+func NumField(r RR) int {
+ return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
+}
+
+// Field returns the rdata field i as a string. Fields are indexed starting from 1.
+// RR types that holds slice data, for instance the NSEC type bitmap will return a single
+// string where the types are concatenated using a space.
+// Accessing non existing fields will cause a panic.
+func Field(r RR, i int) string {
+ if i == 0 {
+ return ""
+ }
+ d := reflect.ValueOf(r).Elem().Field(i)
+ switch k := d.Kind(); k {
+ case reflect.String:
+ return d.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(d.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(d.Uint(), 10)
+ case reflect.Slice:
+ switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
+ case `dns:"a"`:
+ // TODO(miek): Hmm store this as 16 bytes
+ if d.Len() < net.IPv6len {
+ return net.IPv4(byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint())).String()
+ }
+ return net.IPv4(byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint())).String()
+ case `dns:"aaaa"`:
+ return net.IP{
+ byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint()),
+ byte(d.Index(4).Uint()),
+ byte(d.Index(5).Uint()),
+ byte(d.Index(6).Uint()),
+ byte(d.Index(7).Uint()),
+ byte(d.Index(8).Uint()),
+ byte(d.Index(9).Uint()),
+ byte(d.Index(10).Uint()),
+ byte(d.Index(11).Uint()),
+ byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint()),
+ }.String()
+ case `dns:"nsec"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := Type(d.Index(0).Uint()).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + Type(d.Index(i).Uint()).String()
+ }
+ return s
+ default:
+ // if it does not have a tag its a string slice
+ fallthrough
+ case `dns:"txt"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := d.Index(0).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + d.Index(i).String()
+ }
+ return s
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/miekg/dns/fuzz_test.go
new file mode 100644
index 000000000..255869730
--- /dev/null
+++ b/vendor/github.com/miekg/dns/fuzz_test.go
@@ -0,0 +1,25 @@
+package dns
+
+import "testing"
+
+func TestFuzzString(t *testing.T) {
+ testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"",
+ ";a ", ";a����������",
+ " NSAP O ", " NSAP N ",
+ " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6",
+ " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ",
+ "$GENERATE 0-3 ${441189,5039418474430,o}",
+ "$INCLUDE 00 TYPE00000000000n ",
+ "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0",
+ }
+ for i, tc := range testcases {
+ rr, err := NewRR(tc)
+ if err == nil {
+ // rr can be nil because we can (for instance) just parse a comment
+ if rr == nil {
+ continue
+ }
+ t.Fatalf("parsed mailformed RR %d: %s", i, rr.String())
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go
new file mode 100644
index 000000000..e4481a4b0
--- /dev/null
+++ b/vendor/github.com/miekg/dns/generate.go
@@ -0,0 +1,159 @@
+package dns
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Parse the $GENERATE statement as used in BIND9 zones.
+// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
+// We are called after '$GENERATE '. After which we expect:
+// * the range (12-24/2)
+// * lhs (ownername)
+// * [[ttl][class]]
+// * type
+// * rhs (rdata)
+// But we are lazy here, only the range is parsed *all* occurrences
+// of $ after that are interpreted.
+// Any error are returned as a string value, the empty string signals
+// "no error".
+func generate(l lex, c chan lex, t chan *Token, o string) string {
+ step := 1
+ if i := strings.IndexAny(l.token, "/"); i != -1 {
+ if i+1 == len(l.token) {
+ return "bad step in $GENERATE range"
+ }
+ if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
+ if s < 0 {
+ return "bad step in $GENERATE range"
+ }
+ step = s
+ } else {
+ return "bad step in $GENERATE range"
+ }
+ l.token = l.token[:i]
+ }
+ sx := strings.SplitN(l.token, "-", 2)
+ if len(sx) != 2 {
+ return "bad start-stop in $GENERATE range"
+ }
+ start, err := strconv.Atoi(sx[0])
+ if err != nil {
+ return "bad start in $GENERATE range"
+ }
+ end, err := strconv.Atoi(sx[1])
+ if err != nil {
+ return "bad stop in $GENERATE range"
+ }
+ if end < 0 || start < 0 || end < start {
+ return "bad range in $GENERATE range"
+ }
+
+ <-c // _BLANK
+ // Create a complete new string, which we then parse again.
+ s := ""
+BuildRR:
+ l = <-c
+ if l.value != zNewline && l.value != zEOF {
+ s += l.token
+ goto BuildRR
+ }
+ for i := start; i <= end; i += step {
+ var (
+ escape bool
+ dom bytes.Buffer
+ mod string
+ err error
+ offset int
+ )
+
+ for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
+ switch s[j] {
+ case '\\':
+ if escape {
+ dom.WriteByte('\\')
+ escape = false
+ continue
+ }
+ escape = true
+ case '$':
+ mod = "%d"
+ offset = 0
+ if escape {
+ dom.WriteByte('$')
+ escape = false
+ continue
+ }
+ escape = false
+ if j+1 >= len(s) { // End of the string
+ dom.WriteString(fmt.Sprintf(mod, i+offset))
+ continue
+ } else {
+ if s[j+1] == '$' {
+ dom.WriteByte('$')
+ j++
+ continue
+ }
+ }
+ // Search for { and }
+ if s[j+1] == '{' { // Modifier block
+ sep := strings.Index(s[j+2:], "}")
+ if sep == -1 {
+ return "bad modifier in $GENERATE"
+ }
+ mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
+ if err != nil {
+ return err.Error()
+ }
+ j += 2 + sep // Jump to it
+ }
+ dom.WriteString(fmt.Sprintf(mod, i+offset))
+ default:
+ if escape { // Pretty useless here
+ escape = false
+ continue
+ }
+ dom.WriteByte(s[j])
+ }
+ }
+ // Re-parse the RR and send it on the current channel t
+ rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
+ if err != nil {
+ return err.Error()
+ }
+ t <- &Token{RR: rx}
+ // Its more efficient to first built the rrlist and then parse it in
+ // one go! But is this a problem?
+ }
+ return ""
+}
+
+// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
+func modToPrintf(s string) (string, int, error) {
+ xs := strings.SplitN(s, ",", 3)
+ if len(xs) != 3 {
+ return "", 0, errors.New("bad modifier in $GENERATE")
+ }
+ // xs[0] is offset, xs[1] is width, xs[2] is base
+ if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
+ return "", 0, errors.New("bad base in $GENERATE")
+ }
+ offset, err := strconv.Atoi(xs[0])
+ if err != nil || offset > 255 {
+ return "", 0, errors.New("bad offset in $GENERATE")
+ }
+ width, err := strconv.Atoi(xs[1])
+ if err != nil || width > 255 {
+ return "", offset, errors.New("bad width in $GENERATE")
+ }
+ switch {
+ case width < 0:
+ return "", offset, errors.New("bad width in $GENERATE")
+ case width == 0:
+ return "%" + xs[1] + xs[2], offset, nil
+ }
+ return "%0" + xs[1] + xs[2], offset, nil
+}
diff --git a/vendor/github.com/miekg/dns/idn/code_points.go b/vendor/github.com/miekg/dns/idn/code_points.go
new file mode 100644
index 000000000..129c3742f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/idn/code_points.go
@@ -0,0 +1,2346 @@
+package idn
+
+const (
+ propertyUnknown property = iota // unknown character property
+ propertyPVALID // allowed to be used in IDNs
+ propertyCONTEXTJ // invisible or problematic characters (join controls)
+ propertyCONTEXTO // invisible or problematic characters (others)
+ propertyDISALLOWED // should not be included in IDNs
+ propertyUNASSIGNED // code points that are not designated in the Unicode Standard
+)
+
+// property stores the property of a code point, as described in RFC 5892,
+// section 1
+type property int
+
+// codePoints list all code points in Unicode Character Database (UCD) Format
+// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) -
+// http://www.gnu.org/software/libidn/libidn2/
+var codePoints = []struct {
+ start rune
+ end rune
+ state property
+}{
+ {0x0000, 0x002C, propertyDISALLOWED}, // <control>..COMMA
+ {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS
+ {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS
+ {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE
+ {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT
+ {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z
+ {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z
+ {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN
+ {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT
+ {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN
+ {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT
+ {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN
+ {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL
+ {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON
+ {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON
+ {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE
+ {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE
+ {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK
+ {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK
+ {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE
+ {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE
+ {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+ {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX
+ {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE
+ {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE
+ {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON
+ {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON
+ {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON
+ {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON
+ {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE
+ {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE
+ {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON
+ {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON
+ {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE
+ {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE
+ {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE
+ {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE
+ {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK
+ {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK
+ {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON
+ {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON
+ {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+ {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX
+ {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE
+ {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE
+ {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE
+ {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE
+ {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA
+ {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA
+ {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+ {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX
+ {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE
+ {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE
+ {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE
+ {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE
+ {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON
+ {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON
+ {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE
+ {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE
+ {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK
+ {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK
+ {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE
+ {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I
+ {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET
+ {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX
+ {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA
+ {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA
+ {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE
+ {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE
+ {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA
+ {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA
+ {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON
+ {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON
+ {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI
+ {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE
+ {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE
+ {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE
+ {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA
+ {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA
+ {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON
+ {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON
+ {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE.
+ {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG
+ {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON
+ {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON
+ {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE
+ {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE
+ {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+ {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE
+ {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE
+ {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE
+ {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE
+ {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA
+ {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA
+ {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON
+ {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON
+ {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE
+ {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE
+ {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+ {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX
+ {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA
+ {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA
+ {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON
+ {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON
+ {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA
+ {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA
+ {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON
+ {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON
+ {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE
+ {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE
+ {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE
+ {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE
+ {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON
+ {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON
+ {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE
+ {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE
+ {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE
+ {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE
+ {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+ {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE
+ {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK
+ {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK
+ {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+ {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX
+ {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+ {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX
+ {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN
+ {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE
+ {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE
+ {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON
+ {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON
+ {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S
+ {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE
+ {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI
+ {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR
+ {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX
+ {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX
+ {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L
+ {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK
+ {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA
+ {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL
+ {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT
+ {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK
+ {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI
+ {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV
+ {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET
+ {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL
+ {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL
+ {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG
+ {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA
+ {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN
+ {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI
+ {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI
+ {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK
+ {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK
+ {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T
+ {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO
+ {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH
+ {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL
+ {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK
+ {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK
+ {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK..
+ {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN
+ {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL
+ {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK
+ {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE
+ {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE
+ {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT
+ {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE
+ {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE
+ {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R
+ {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA
+ {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON
+ {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON
+ {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON
+ {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON
+ {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON
+ {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON
+ {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON
+ {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA
+ {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR
+ {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC
+ {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT
+ {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA
+ {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO
+ {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR
+ {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV
+ {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA
+ {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR
+ {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA
+ {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR
+ {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON
+ {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON
+ {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE
+ {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE
+ {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON
+ {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON
+ {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON
+ {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON
+ {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK
+ {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK
+ {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO
+ {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON
+ {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON
+ {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA
+ {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE
+ {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE
+ {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE
+ {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE
+ {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A
+ {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU
+ {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE
+ {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE
+ {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+ {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE
+ {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
+ {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE
+ {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE
+ {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE
+ {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
+ {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE
+ {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE
+ {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE
+ {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
+ {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE
+ {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE
+ {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE
+ {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
+ {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE
+ {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE
+ {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE
+ {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
+ {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE
+ {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE
+ {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE
+ {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
+ {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE
+ {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE
+ {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE
+ {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW
+ {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW
+ {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW
+ {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW
+ {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH
+ {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH
+ {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON
+ {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON
+ {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG
+ {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL
+ {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU
+ {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU
+ {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK
+ {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK
+ {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE
+ {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE
+ {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA
+ {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA
+ {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA
+ {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR
+ {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON
+ {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON
+ {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE
+ {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE
+ {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA
+ {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR
+ {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON
+ {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL
+ {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA
+ {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE
+ {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT
+ {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN
+ {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP
+ {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP
+ {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA
+ {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE
+ {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE
+ {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE
+ {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL
+ {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL
+ {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE
+ {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE
+ {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE
+ {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL
+ {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA
+ {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER
+ {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET
+ {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER
+ {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD
+ {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING
+ {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED
+ {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE
+ {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER
+ {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE
+ {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T
+ {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI
+ {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO
+ {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR
+ {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER
+ {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L
+ {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA
+ {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA
+ {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI
+ {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI
+ {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN
+ {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN
+ {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA
+ {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA
+ {0x0378, 0x0379, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI
+ {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR
+ {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK
+ {0x037F, 0x0383, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH
+ {0x038B, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS
+ {0x038D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE
+ {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T
+ {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE
+ {0x03A2, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE
+ {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S
+ {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL
+ {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL
+ {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA
+ {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA
+ {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA
+ {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA
+ {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA
+ {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA
+ {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA
+ {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA
+ {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI
+ {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI
+ {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI
+ {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI
+ {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI
+ {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI
+ {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI
+ {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI
+ {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI
+ {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI
+ {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA
+ {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA
+ {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA
+ {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA
+ {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI
+ {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI
+ {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO
+ {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT
+ {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE
+ {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO
+ {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP
+ {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK
+ {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL..
+ {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT
+ {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA
+ {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA
+ {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT
+ {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT
+ {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E
+ {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E
+ {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS
+ {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS
+ {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS
+ {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS
+ {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS
+ {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS
+ {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS
+ {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS
+ {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI
+ {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI
+ {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI
+ {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI
+ {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA
+ {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA
+ {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA
+ {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA
+ {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE
+ {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR
+ {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK
+ {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK
+ {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA
+ {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA
+ {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO
+ {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO
+ {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT
+ {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT
+ {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA
+ {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA
+ {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN
+ {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC
+ {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C
+ {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL
+ {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN
+ {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN
+ {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK
+ {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK
+ {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+ {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN
+ {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE
+ {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE
+ {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK
+ {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK
+ {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
+ {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER
+ {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
+ {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER
+ {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER
+ {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER
+ {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR
+ {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK
+ {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE
+ {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE
+ {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA
+ {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA
+ {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER
+ {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER
+ {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE
+ {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE
+ {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK
+ {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK
+ {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA
+ {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA
+ {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER
+ {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER
+ {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER
+ {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER
+ {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U
+ {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U
+ {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO
+ {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
+ {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER
+ {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER
+ {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE
+ {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE
+ {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
+ {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER
+ {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST
+ {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO
+ {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA
+ {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA
+ {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE
+ {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE
+ {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D
+ {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES
+ {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L
+ {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE
+ {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK
+ {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK
+ {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL
+ {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL
+ {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK
+ {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK
+ {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL
+ {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL
+ {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE
+ {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE
+ {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL
+ {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC
+ {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE
+ {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE
+ {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS
+ {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS
+ {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE
+ {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE
+ {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE
+ {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE
+ {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA
+ {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA
+ {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
+ {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS
+ {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
+ {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS
+ {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
+ {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS
+ {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE
+ {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE
+ {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON
+ {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON
+ {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS
+ {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS
+ {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS
+ {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS
+ {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O
+ {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O
+ {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE
+ {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI
+ {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS
+ {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS
+ {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON
+ {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON
+ {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS
+ {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS
+ {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
+ {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE
+ {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
+ {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS
+ {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER
+ {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER
+ {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
+ {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS
+ {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND
+ {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO
+ {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK
+ {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK
+ {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE
+ {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE
+ {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE
+ {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE
+ {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE
+ {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE
+ {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE
+ {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE
+ {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE
+ {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE
+ {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE
+ {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE
+ {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE
+ {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE
+ {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE
+ {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE
+ {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE
+ {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE
+ {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE
+ {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE
+ {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK
+ {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK
+ {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA
+ {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA
+ {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA
+ {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA
+ {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE
+ {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE
+ {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA
+ {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA
+ {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE
+ {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE
+ {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA
+ {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA
+ {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK
+ {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK
+ {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK
+ {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK
+ {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER
+ {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER
+ {0x0526, 0x0530, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA
+ {0x0557, 0x0558, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING
+ {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M
+ {0x0560, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE
+ {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN
+ {0x0588, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN
+ {0x058B, 0x0590, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG
+ {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF
+ {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE
+ {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ
+ {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT
+ {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ
+ {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT
+ {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA
+ {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN
+ {0x05C8, 0x05CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV
+ {0x05EB, 0x05EF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L
+ {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO
+ {0x05F5, 0x05FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA
+ {0x0604, 0x0605, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA
+ {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR
+ {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON
+ {0x061C, 0x061D, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q
+ {0x0620, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH
+ {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL
+ {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT
+ {0x065F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT
+ {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA
+ {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG
+ {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER
+ {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE
+ {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP
+ {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN
+ {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H
+ {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL
+ {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH
+ {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER
+ {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A
+ {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L
+ {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST
+ {0x070E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK
+ {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH
+ {0x074B, 0x074C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N
+ {0x07B2, 0x07BF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE
+ {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN
+ {0x07FB, 0x07FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA
+ {0x082E, 0x082F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN
+ {0x083F, 0x08FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA
+ {0x093A, 0x093B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN
+ {0x094F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA
+ {0x0956, 0x0957, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA
+ {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW
+ {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA
+ {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE
+ {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN
+ {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI
+ {0x0973, 0x0978, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA
+ {0x0980, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR
+ {0x0984, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L
+ {0x098D, 0x098E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI
+ {0x0991, 0x0992, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA
+ {0x09A9, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA
+ {0x09B1, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA
+ {0x09B3, 0x09B5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA
+ {0x09BA, 0x09BB, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL
+ {0x09C5, 0x09C6, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI
+ {0x09C9, 0x09CA, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA
+ {0x09CF, 0x09D6, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK
+ {0x09D8, 0x09DB, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA
+ {0x09DE, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA
+ {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG
+ {0x09E4, 0x09E5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L
+ {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK
+ {0x09FC, 0x0A00, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA
+ {0x0A04, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU
+ {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI
+ {0x0A11, 0x0A12, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA
+ {0x0A29, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA
+ {0x0A31, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA
+ {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA
+ {0x0A34, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA
+ {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA
+ {0x0A37, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA
+ {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA
+ {0x0A3D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN
+ {0x0A43, 0x0A46, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN
+ {0x0A49, 0x0A4A, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA
+ {0x0A4E, 0x0A50, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT
+ {0x0A52, 0x0A58, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA
+ {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA
+ {0x0A5D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA
+ {0x0A5F, 0x0A65, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH
+ {0x0A76, 0x0A80, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS
+ {0x0A84, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E
+ {0x0A8E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O
+ {0x0A92, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA
+ {0x0AA9, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA
+ {0x0AB1, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA
+ {0x0AB4, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA
+ {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN
+ {0x0AC6, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C
+ {0x0ACA, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA
+ {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM
+ {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S
+ {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE
+ {0x0AF0, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN
+ {0x0AF2, 0x0B00, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA
+ {0x0B04, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L
+ {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI
+ {0x0B11, 0x0B12, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA
+ {0x0B29, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA
+ {0x0B31, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA
+ {0x0B34, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA
+ {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R
+ {0x0B45, 0x0B46, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI
+ {0x0B49, 0x0B4A, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA
+ {0x0B4E, 0x0B55, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK
+ {0x0B58, 0x0B5B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA
+ {0x0B5E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L
+ {0x0B64, 0x0B65, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE
+ {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR
+ {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA
+ {0x0B72, 0x0B81, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA
+ {0x0B84, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU
+ {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI
+ {0x0B91, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA
+ {0x0B96, 0x0B98, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA
+ {0x0B9B, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA
+ {0x0B9D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA
+ {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA
+ {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA
+ {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA
+ {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU
+ {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI
+ {0x0BC9, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA
+ {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM
+ {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK
+ {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE
+ {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN
+ {0x0BFB, 0x0C00, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA
+ {0x0C04, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L
+ {0x0C0D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI
+ {0x0C11, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA
+ {0x0C29, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA
+ {0x0C34, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA
+ {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA
+ {0x0C45, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI
+ {0x0C49, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA
+ {0x0C4E, 0x0C54, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK
+ {0x0C57, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA
+ {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN
+ {0x0C64, 0x0C65, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE
+ {0x0C70, 0x0C77, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF
+ {0x0C80, 0x0C81, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA
+ {0x0C84, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L
+ {0x0C8D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI
+ {0x0C91, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA
+ {0x0CA9, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA
+ {0x0CB4, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA
+ {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL
+ {0x0CC5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI
+ {0x0CC9, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA
+ {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK
+ {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA
+ {0x0CDF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG
+ {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE
+ {0x0CF0, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH
+ {0x0CF3, 0x0D01, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA
+ {0x0D04, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC
+ {0x0D0D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI
+ {0x0D11, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA
+ {0x0D29, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA
+ {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG
+ {0x0D45, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN
+ {0x0D49, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM
+ {0x0D4E, 0x0D56, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK
+ {0x0D58, 0x0D5F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL
+ {0x0D64, 0x0D65, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE
+ {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR
+ {0x0D76, 0x0D78, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK
+ {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER
+ {0x0D80, 0x0D81, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG
+ {0x0D84, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN
+ {0x0D97, 0x0D99, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L
+ {0x0DB2, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT
+ {0x0DBC, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA
+ {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN
+ {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA
+ {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL
+ {0x0DD5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA
+ {0x0DD7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW
+ {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL
+ {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA
+ {0x0DF5, 0x0E00, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A
+ {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM
+ {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH
+ {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT
+ {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK
+ {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN
+ {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE
+ {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH
+ {0x0E5C, 0x0E80, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG
+ {0x0E83, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM
+ {0x0E85, 0x0E86, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO
+ {0x0E89, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM
+ {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO
+ {0x0E8E, 0x0E93, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM
+ {0x0E98, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG
+ {0x0EA0, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING
+ {0x0EA4, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT
+ {0x0EA6, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO
+ {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG
+ {0x0EAC, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA
+ {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM
+ {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU
+ {0x0EBA, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N
+ {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI
+ {0x0EC5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA
+ {0x0EC7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA
+ {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE
+ {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO
+ {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM
+ {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET
+ {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG
+ {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN
+ {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN
+ {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD
+ {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE
+ {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R
+ {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA
+ {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN
+ {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS
+ {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO
+ {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU
+ {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN
+ {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA
+ {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA
+ {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA
+ {0x0F48, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA
+ {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA
+ {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA
+ {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA
+ {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA
+ {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA
+ {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA
+ {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA
+ {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A
+ {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA
+ {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER
+ {0x0F6D, 0x0F70, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I
+ {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II
+ {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U
+ {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO
+ {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV
+ {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II
+ {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA
+ {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA
+ {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED
+ {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN
+ {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA
+ {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI
+ {0x0F98, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI
+ {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA
+ {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI
+ {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA
+ {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN
+ {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA
+ {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN
+ {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA
+ {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN
+ {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA
+ {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE
+ {0x0FBD, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE
+ {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN
+ {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY
+ {0x0FCD, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING
+ {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE
+ {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL
+ {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON
+ {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE
+ {0x10C6, 0x10CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN
+ {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE
+ {0x10FD, 0x10FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA
+ {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA
+ {0x1249, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE
+ {0x124E, 0x124F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO
+ {0x1257, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA
+ {0x1259, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH
+ {0x125E, 0x125F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA
+ {0x1289, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE
+ {0x128E, 0x128F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA
+ {0x12B1, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE
+ {0x12B6, 0x12B7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO
+ {0x12BF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA
+ {0x12C1, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX
+ {0x12C6, 0x12C7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR
+ {0x12D7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA
+ {0x1311, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE
+ {0x1316, 0x1317, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA
+ {0x135B, 0x135E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK
+ {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T
+ {0x137D, 0x137F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY
+ {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA
+ {0x139A, 0x139F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV
+ {0x13F5, 0x13FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN
+ {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR
+ {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB
+ {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B
+ {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK
+ {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH
+ {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M
+ {0x169D, 0x169F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X
+ {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM
+ {0x16F1, 0x16FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA
+ {0x170D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA
+ {0x1715, 0x171F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD
+ {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO
+ {0x1737, 0x173F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U
+ {0x1754, 0x175F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA
+ {0x176D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA
+ {0x1771, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U
+ {0x1774, 0x177F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU
+ {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN
+ {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT
+ {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH
+ {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO
+ {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE
+ {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA
+ {0x17DE, 0x17DF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE
+ {0x17EA, 0x17EF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK
+ {0x17FA, 0x17FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR
+ {0x180F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE
+ {0x181A, 0x181F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU
+ {0x1878, 0x187F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG
+ {0x18AB, 0x18AF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA
+ {0x18F6, 0x18FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA
+ {0x191D, 0x191F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W
+ {0x192C, 0x192F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I
+ {0x193C, 0x193F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO
+ {0x1941, 0x1943, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK
+ {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI
+ {0x196E, 0x196F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6
+ {0x1975, 0x197F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT
+ {0x19AC, 0x19AF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW
+ {0x19CA, 0x19CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG
+ {0x19DB, 0x19DD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM
+ {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE
+ {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION
+ {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT
+ {0x1A5F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE
+ {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH
+ {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI
+ {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED
+ {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK
+ {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG
+ {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY
+ {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE
+ {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG
+ {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL
+ {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG.
+ {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA
+ {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE
+ {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA
+ {0x1C38, 0x1C3A, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO
+ {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE
+ {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD
+ {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU
+ {0x1C80, 0x1CCF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA
+ {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA
+ {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC
+ {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE
+ {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C
+ {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B
+ {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C
+ {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N
+ {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S
+ {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I
+ {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA
+ {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU
+ {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN
+ {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE
+ {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER
+ {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT
+ {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R
+ {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW
+ {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW
+ {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE
+ {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE
+ {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW
+ {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW
+ {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW
+ {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW
+ {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT
+ {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE
+ {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE
+ {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE
+ {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW
+ {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW
+ {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW
+ {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW
+ {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA
+ {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA
+ {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
+ {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW
+ {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
+ {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE
+ {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
+ {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE
+ {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
+ {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW
+ {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW
+ {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW
+ {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV
+ {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE
+ {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE
+ {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE
+ {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON
+ {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON
+ {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE
+ {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE
+ {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW
+ {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW
+ {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS
+ {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS
+ {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA
+ {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA
+ {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW
+ {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW
+ {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW
+ {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW
+ {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC
+ {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT
+ {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE
+ {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE
+ {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW
+ {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW
+ {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW
+ {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW
+ {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW
+ {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW
+ {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA
+ {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR
+ {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW
+ {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW
+ {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
+ {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW
+ {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE
+ {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE
+ {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE
+ {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE
+ {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW
+ {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW
+ {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE
+ {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE
+ {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW
+ {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW
+ {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW
+ {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW
+ {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
+ {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW
+ {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
+ {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE
+ {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE
+ {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI
+ {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
+ {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE
+ {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
+ {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE
+ {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE
+ {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE
+ {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE
+ {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE
+ {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE
+ {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE
+ {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW
+ {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW
+ {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA
+ {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR
+ {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW
+ {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW
+ {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE
+ {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE
+ {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW
+ {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW
+ {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB
+ {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV
+ {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB
+ {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV
+ {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO
+ {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT
+ {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE
+ {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE
+ {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW
+ {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW
+ {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW
+ {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW
+ {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
+ {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW
+ {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
+ {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW
+ {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW
+ {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW
+ {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
+ {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW
+ {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
+ {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE
+ {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER
+ {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES
+ {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE
+ {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE
+ {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW
+ {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW
+ {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE
+ {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE
+ {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE
+ {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE
+ {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS
+ {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS
+ {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE
+ {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE
+ {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW
+ {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW
+ {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE
+ {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE
+ {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS
+ {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS
+ {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE
+ {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE
+ {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
+ {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX
+ {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW
+ {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW
+ {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW
+ {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN
+ {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L
+ {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO
+ {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S
+ {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA
+ {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW
+ {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW
+ {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE
+ {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE
+ {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A
+ {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU
+ {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G
+ {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA
+ {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H
+ {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO
+ {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T
+ {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL
+ {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D
+ {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT
+ {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
+ {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE
+ {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
+ {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE
+ {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A
+ {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO
+ {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE
+ {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE
+ {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE
+ {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO
+ {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW
+ {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW
+ {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE
+ {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE
+ {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE
+ {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE
+ {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A
+ {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU
+ {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G
+ {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA
+ {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H
+ {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO
+ {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T
+ {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL
+ {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D
+ {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT
+ {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE
+ {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE
+ {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW
+ {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW
+ {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW
+ {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW
+ {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE
+ {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE
+ {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A
+ {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU
+ {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G
+ {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA
+ {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H
+ {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO
+ {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T
+ {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL
+ {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D
+ {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT
+ {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE
+ {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE
+ {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE
+ {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE
+ {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB
+ {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV
+ {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE
+ {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE
+ {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL
+ {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW
+ {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW
+ {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW
+ {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE
+ {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE
+ {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE
+ {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE
+ {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE
+ {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE
+ {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB
+ {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV
+ {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE
+ {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE
+ {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL
+ {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW
+ {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE
+ {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE
+ {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW
+ {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW
+ {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE
+ {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE
+ {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE
+ {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE
+ {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL
+ {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL
+ {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V
+ {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V
+ {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP
+ {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL
+ {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK
+ {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK
+ {0x1F16, 0x1F17, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE
+ {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA
+ {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C
+ {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM
+ {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK
+ {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK
+ {0x1F46, 0x1F47, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE
+ {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK
+ {0x1F58, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA
+ {0x1F5A, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND
+ {0x1F5C, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND
+ {0x1F5E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND
+ {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S
+ {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK
+ {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA
+ {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA
+ {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA
+ {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA
+ {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA
+ {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA
+ {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA
+ {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA
+ {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA
+ {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA
+ {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA
+ {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA
+ {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA
+ {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA
+ {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG
+ {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK
+ {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG
+ {0x1FB5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI
+ {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN
+ {0x1FC5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI
+ {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND
+ {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S
+ {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O
+ {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR
+ {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK
+ {0x1FDC, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS
+ {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE
+ {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN
+ {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA
+ {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR
+ {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG
+ {0x1FF5, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI
+ {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN
+ {0x1FFF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE
+ {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER
+ {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS
+ {0x2065, 0x2069, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI
+ {0x2072, 0x2073, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI
+ {0x208F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR
+ {0x2095, 0x209F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN
+ {0x20B9, 0x20CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE
+ {0x20F1, 0x20FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB
+ {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F
+ {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R
+ {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C
+ {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION
+ {0x218A, 0x218F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL
+ {0x23E9, 0x23FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM
+ {0x2427, 0x243F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH
+ {0x244B, 0x245F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR
+ {0x26CE, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2
+ {0x26E2, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE
+ {0x26E4, 0x26E7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ
+ {0x2700, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS
+ {0x2705, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE
+ {0x270A, 0x270B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR
+ {0x2728, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR
+ {0x274C, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE
+ {0x274E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE
+ {0x2753, 0x2755, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO
+ {0x275F, 0x2760, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W
+ {0x2795, 0x2797, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT-
+ {0x27B0, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD
+ {0x27BF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H
+ {0x27CB, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION
+ {0x27CD, 0x27CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A
+ {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE
+ {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA
+ {0x2C2F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL
+ {0x2C5F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR
+ {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR
+ {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA
+ {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL
+ {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER
+ {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER
+ {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER
+ {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER
+ {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER
+ {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER
+ {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE
+ {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK
+ {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK
+ {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL
+ {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H
+ {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL
+ {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT
+ {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA
+ {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA
+ {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA
+ {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA
+ {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA
+ {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA
+ {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA
+ {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE
+ {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE
+ {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU
+ {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU
+ {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA
+ {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA
+ {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE
+ {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE
+ {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE
+ {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE
+ {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA
+ {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA
+ {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA
+ {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA
+ {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA
+ {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA
+ {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI
+ {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI
+ {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI
+ {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI
+ {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI
+ {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI
+ {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O
+ {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O
+ {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI
+ {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI
+ {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO
+ {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO
+ {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA
+ {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA
+ {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU
+ {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU
+ {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA
+ {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA
+ {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI
+ {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI
+ {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI
+ {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI
+ {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI
+ {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI
+ {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU
+ {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU
+ {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF
+ {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF
+ {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN
+ {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN
+ {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE
+ {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE
+ {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA
+ {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA
+ {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI
+ {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI
+ {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI
+ {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI
+ {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU
+ {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU
+ {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI
+ {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI
+ {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI
+ {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI
+ {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI
+ {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI
+ {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH
+ {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH
+ {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI
+ {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI
+ {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI
+ {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI
+ {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI
+ {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI
+ {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA
+ {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA
+ {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA
+ {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA
+ {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI
+ {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI
+ {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT
+ {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT
+ {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA
+ {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA
+ {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA
+ {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA
+ {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA
+ {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA
+ {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA
+ {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA
+ {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI
+ {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI
+ {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI
+ {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI
+ {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU
+ {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S
+ {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C
+ {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI
+ {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA
+ {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO
+ {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO
+ {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET
+ {0x2D26, 0x2D2F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ
+ {0x2D66, 0x2D6E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK
+ {0x2D70, 0x2D7F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW
+ {0x2D97, 0x2D9F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO
+ {0x2DA7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO
+ {0x2DAF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO
+ {0x2DB7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC
+ {0x2DBF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO
+ {0x2DC7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO
+ {0x2DCF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO
+ {0x2DD7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO
+ {0x2DDF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI
+ {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU
+ {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE
+ {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT
+ {0x2E32, 0x2E7F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP
+ {0x2E9A, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED
+ {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE
+ {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI
+ {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND
+ {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB
+ {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE
+ {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT
+ {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR
+ {0x303C, 0x0, propertyPVALID}, // MASU MARK
+ {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL
+ {0x3040, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA
+ {0x3097, 0x3098, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR
+ {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN
+ {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE
+ {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU
+ {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO
+ {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT
+ {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA
+ {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO
+ {0x3100, 0x3104, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH
+ {0x312E, 0x3130, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE
+ {0x318F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA
+ {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H
+ {0x31B8, 0x31BF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q
+ {0x31E4, 0x31EF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM
+ {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K
+ {0x321F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN
+ {0x32FF, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL
+ {0x3400, 0x4DB5, propertyPVALID}, // <CJK Ideograph Extension A>..<CJK Ideograph
+ {0x4DB6, 0x4DBF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F
+ {0x4E00, 0x9FCB, propertyPVALID}, // <CJK Ideograph>..<CJK Ideograph>
+ {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR
+ {0xA48D, 0xA48F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE
+ {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU
+ {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL
+ {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER
+ {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK
+ {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO
+ {0xA62C, 0xA63F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA
+ {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA
+ {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO
+ {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO
+ {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE
+ {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE
+ {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA
+ {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA
+ {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV
+ {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV
+ {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK
+ {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK
+ {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA
+ {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA
+ {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER
+ {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER
+ {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER
+ {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER
+ {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT
+ {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT
+ {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU
+ {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU
+ {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A
+ {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A
+ {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS
+ {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS
+ {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS
+ {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS
+ {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT
+ {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE
+ {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN
+ {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN
+ {0xA660, 0xA661, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE
+ {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE
+ {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL
+ {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL
+ {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM
+ {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM
+ {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O
+ {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O
+ {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O
+ {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O
+ {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O
+ {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO
+ {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON
+ {0xA674, 0xA67B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI
+ {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA
+ {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK
+ {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE
+ {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE
+ {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE
+ {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE
+ {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE
+ {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE
+ {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE
+ {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE
+ {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE
+ {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE
+ {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK
+ {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK
+ {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE
+ {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE
+ {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE
+ {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE
+ {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE
+ {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE
+ {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE
+ {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE
+ {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE
+ {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE
+ {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE
+ {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE
+ {0xA698, 0xA69F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI
+ {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM
+ {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ
+ {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK
+ {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF
+ {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L
+ {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN
+ {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF
+ {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN
+ {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN
+ {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG
+ {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG
+ {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ
+ {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ
+ {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO
+ {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO
+ {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO
+ {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO
+ {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA
+ {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT
+ {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA
+ {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA
+ {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO
+ {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO
+ {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU
+ {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU
+ {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV
+ {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV
+ {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR
+ {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR
+ {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY
+ {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY
+ {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT
+ {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT
+ {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE
+ {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE
+ {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE
+ {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE
+ {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO
+ {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA
+ {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L
+ {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L
+ {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE
+ {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE
+ {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER
+ {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA
+ {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP
+ {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP
+ {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO
+ {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO
+ {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D
+ {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES
+ {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH
+ {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH
+ {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL
+ {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL
+ {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D
+ {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES
+ {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE
+ {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE
+ {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA
+ {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA
+ {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA
+ {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA
+ {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE
+ {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE
+ {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY
+ {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY
+ {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z
+ {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z
+ {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE
+ {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE
+ {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU
+ {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH
+ {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND
+ {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND
+ {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET
+ {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET
+ {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS
+ {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS
+ {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON
+ {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON
+ {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US
+ {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U
+ {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D
+ {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D
+ {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F
+ {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F
+ {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA
+ {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G
+ {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L
+ {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L
+ {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R
+ {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R
+ {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S
+ {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S
+ {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T
+ {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE
+ {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER
+ {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO
+ {0xA78D, 0xA7FA, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N
+ {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE
+ {0xA82C, 0xA82F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI
+ {0xA83A, 0xA83F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI
+ {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU
+ {0xA878, 0xA87F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI
+ {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA
+ {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE
+ {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI
+ {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET
+ {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE
+ {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO
+ {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA
+ {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA
+ {0xA954, 0xA95E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE
+ {0xA97D, 0xA97F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON
+ {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD
+ {0xA9CE, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE
+ {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I
+ {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA
+ {0xAA37, 0xAA3F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN
+ {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE
+ {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR
+ {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K
+ {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY
+ {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA
+ {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO
+ {0xAAC3, 0xAADA, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM
+ {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI
+ {0xAAE0, 0xABBF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL
+ {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI
+ {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE
+ {0xABEE, 0xABEF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT
+ {0xABFA, 0xABFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAC00, 0xD7A3, propertyPVALID}, // <Hangul Syllable>..<Hangul Syllable>
+ {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA
+ {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO
+ {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xD800, 0xFA0D, propertyDISALLOWED}, // <Non Private Use High Surrogate>..CJK COMPAT
+ {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT
+ {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10
+ {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11
+ {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12
+ {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT
+ {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT
+ {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F
+ {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20
+ {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21
+ {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22
+ {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT
+ {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT
+ {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT
+ {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT
+ {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT
+ {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT
+ {0xFADA, 0xFAFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR
+ {0xFB07, 0xFB12, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM
+ {0xFB18, 0xFB1C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ
+ {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA
+ {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE
+ {0xFB37, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER
+ {0xFB3D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH
+ {0xFB3F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER
+ {0xFB42, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L
+ {0xFB45, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT
+ {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT
+ {0xFD40, 0xFD4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT
+ {0xFD90, 0xFD91, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI
+ {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFDD0, 0xFDFD, propertyDISALLOWED}, // <noncharacter>..ARABIC LIGATURE BISMILLAH AR
+ {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR
+ {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ
+ {0xFE27, 0xFE2F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE
+ {0xFE53, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN
+ {0xFE67, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT
+ {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT
+ {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT
+ {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM
+ {0xFE75, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE
+ {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE
+ {0xFF00, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL
+ {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL
+ {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU
+ {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL
+ {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL
+ {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN
+ {0xFFE7, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH
+ {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR..<noncharacter
+ {0x10000, 0x1000B, propertyPVALID}, // LINEAR B SYLLABLE B008 A..LINEAR B SYLLABLE
+ {0x1000C, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE
+ {0x10027, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE
+ {0x1003B, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE
+ {0x1003E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE
+ {0x1004E, 0x1004F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089
+ {0x1005E, 0x1007F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA
+ {0x100FB, 0x100FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR
+ {0x10103, 0x10106, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU
+ {0x10134, 0x10136, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN
+ {0x1018B, 0x1018F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN
+ {0x1019C, 0x101CF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC
+ {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE
+ {0x101FE, 0x1027F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X
+ {0x1029D, 0x1029F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3
+ {0x102D1, 0x102FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU
+ {0x1031F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F
+ {0x10324, 0x1032F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA
+ {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY
+ {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL
+ {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED
+ {0x1034B, 0x1037F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU
+ {0x1039E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER
+ {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA
+ {0x103C4, 0x103C7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG
+ {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER
+ {0x103D6, 0x103FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT
+ {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER
+ {0x1049E, 0x1049F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE
+ {0x104AA, 0x107FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA
+ {0x10806, 0x10807, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO
+ {0x10809, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO
+ {0x10836, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE
+ {0x10839, 0x1083B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA
+ {0x1083D, 0x1083E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER
+ {0x10856, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM
+ {0x10860, 0x108FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU
+ {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR
+ {0x1091C, 0x1091E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR
+ {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C
+ {0x1093A, 0x1093E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK
+ {0x10940, 0x109FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V
+ {0x10A04, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI
+ {0x10A07, 0x10A0B, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET
+ {0x10A14, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA
+ {0x10A18, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT
+ {0x10A34, 0x10A37, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D
+ {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA
+ {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE
+ {0x10A48, 0x10A4F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT
+ {0x10A59, 0x10A5F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI
+ {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB
+ {0x10A80, 0x10AFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE
+ {0x10B36, 0x10B38, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV
+ {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP
+ {0x10B56, 0x10B57, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI
+ {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT
+ {0x10B73, 0x10B77, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO
+ {0x10B80, 0x10BFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE
+ {0x10C49, 0x10E5F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS
+ {0x10E7F, 0x1107F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA
+ {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND
+ {0x110C2, 0x11FFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM
+ {0x1236F, 0x123FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU
+ {0x12463, 0x1246F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD
+ {0x12474, 0x12FFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP
+ {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU
+ {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO
+ {0x1D127, 0x1D128, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA
+ {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL
+ {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING
+ {0x1D357, 0x1D35F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE
+ {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT
+ {0x1D455, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC
+ {0x1D49D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL
+ {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G
+ {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL
+ {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL
+ {0x1D4AD, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL
+ {0x1D4BA, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F
+ {0x1D4BC, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC
+ {0x1D4C4, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR
+ {0x1D506, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL
+ {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL
+ {0x1D515, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL
+ {0x1D51D, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D
+ {0x1D53A, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM
+ {0x1D53F, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM
+ {0x1D545, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O
+ {0x1D547, 0x1D549, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM
+ {0x1D551, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT
+ {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA
+ {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M
+ {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK
+ {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER
+ {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA
+ {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE
+ {0x1F12F, 0x1F130, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B
+ {0x1F132, 0x1F13C, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N
+ {0x1F13E, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P
+ {0x1F140, 0x1F141, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S
+ {0x1F143, 0x1F145, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W
+ {0x1F147, 0x1F149, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV
+ {0x1F14F, 0x1F156, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H
+ {0x1F158, 0x1F15E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P
+ {0x1F160, 0x1F178, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J
+ {0x1F17A, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG
+ {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P
+ {0x1F180, 0x1F189, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE
+ {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ
+ {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA
+ {0x1F201, 0x1F20F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED
+ {0x1F232, 0x1F23F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA
+ {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x20000, 0x2A6D6, propertyPVALID}, // <CJK Ideograph Extension B>..<CJK Ideograph
+ {0x2A6D7, 0x2A6FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2A700, 0x2B734, propertyPVALID}, // <CJK Ideograph Extension C>..<CJK Ideograph
+ {0x2B735, 0x2F7FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA
+ {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x30000, 0x3FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x40000, 0x4FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x50000, 0x5FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x60000, 0x6FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x70000, 0x7FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x80000, 0x8FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0x90000, 0x9FFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+ {0xE0000, 0x0, propertyUNASSIGNED}, // <reserved>
+ {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG
+ {0xE0002, 0xE001F, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG
+ {0xE0080, 0xE00FF, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25
+ {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // <reserved>..<reserved>
+ {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter>
+}
diff --git a/vendor/github.com/miekg/dns/idn/example_test.go b/vendor/github.com/miekg/dns/idn/example_test.go
new file mode 100644
index 000000000..8833cd91d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/idn/example_test.go
@@ -0,0 +1,18 @@
+package idn_test
+
+import (
+ "fmt"
+ "github.com/miekg/dns/idn"
+)
+
+func ExampleToPunycode() {
+ name := "インターãƒãƒƒãƒˆ.テスト"
+ fmt.Printf("%s -> %s", name, idn.ToPunycode(name))
+ // Output: インターãƒãƒƒãƒˆ.テスト -> xn--eckucmux0ukc.xn--zckzah
+}
+
+func ExampleFromPunycode() {
+ name := "xn--mgbaja8a1hpac.xn--mgbachtv"
+ fmt.Printf("%s -> %s", name, idn.FromPunycode(name))
+ // Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار
+}
diff --git a/vendor/github.com/miekg/dns/idn/punycode.go b/vendor/github.com/miekg/dns/idn/punycode.go
new file mode 100644
index 000000000..7e5c263fc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/idn/punycode.go
@@ -0,0 +1,373 @@
+// Package idn implements encoding from and to punycode as speficied by RFC 3492.
+package idn
+
+import (
+ "bytes"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/miekg/dns"
+)
+
+// Implementation idea from RFC itself and from from IDNA::Punycode created by
+// Tatsuhiko Miyagawa <miyagawa@bulknews.net> and released under Perl Artistic
+// License in 2002.
+
+const (
+ _MIN rune = 1
+ _MAX rune = 26
+ _SKEW rune = 38
+ _BASE rune = 36
+ _BIAS rune = 72
+ _N rune = 128
+ _DAMP rune = 700
+
+ _DELIMITER = '-'
+ _PREFIX = "xn--"
+)
+
+// ToPunycode converts unicode domain names to DNS-appropriate punycode names.
+// This function will return an empty string result for domain names with
+// invalid unicode strings. This function expects domain names in lowercase.
+func ToPunycode(s string) string {
+ // Early check to see if encoding is needed.
+ // This will prevent making heap allocations when not needed.
+ if !needToPunycode(s) {
+ return s
+ }
+
+ tokens := dns.SplitDomainName(s)
+ switch {
+ case s == "":
+ return ""
+ case tokens == nil: // s == .
+ return "."
+ case s[len(s)-1] == '.':
+ tokens = append(tokens, "")
+ }
+
+ for i := range tokens {
+ t := encode([]byte(tokens[i]))
+ if t == nil {
+ return ""
+ }
+ tokens[i] = string(t)
+ }
+ return strings.Join(tokens, ".")
+}
+
+// FromPunycode returns unicode domain name from provided punycode string.
+// This function expects punycode strings in lowercase.
+func FromPunycode(s string) string {
+ // Early check to see if decoding is needed.
+ // This will prevent making heap allocations when not needed.
+ if !needFromPunycode(s) {
+ return s
+ }
+
+ tokens := dns.SplitDomainName(s)
+ switch {
+ case s == "":
+ return ""
+ case tokens == nil: // s == .
+ return "."
+ case s[len(s)-1] == '.':
+ tokens = append(tokens, "")
+ }
+ for i := range tokens {
+ tokens[i] = string(decode([]byte(tokens[i])))
+ }
+ return strings.Join(tokens, ".")
+}
+
+// digitval converts single byte into meaningful value that's used to calculate decoded unicode character.
+const errdigit = 0xffff
+
+func digitval(code rune) rune {
+ switch {
+ case code >= 'A' && code <= 'Z':
+ return code - 'A'
+ case code >= 'a' && code <= 'z':
+ return code - 'a'
+ case code >= '0' && code <= '9':
+ return code - '0' + 26
+ }
+ return errdigit
+}
+
+// lettercode finds BASE36 byte (a-z0-9) based on calculated number.
+func lettercode(digit rune) rune {
+ switch {
+ case digit >= 0 && digit <= 25:
+ return digit + 'a'
+ case digit >= 26 && digit <= 36:
+ return digit - 26 + '0'
+ }
+ panic("dns: not reached")
+}
+
+// adapt calculates next bias to be used for next iteration delta.
+func adapt(delta rune, numpoints int, firsttime bool) rune {
+ if firsttime {
+ delta /= _DAMP
+ } else {
+ delta /= 2
+ }
+
+ var k rune
+ for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE {
+ delta /= _BASE - _MIN
+ }
+
+ return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW)
+}
+
+// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary.
+func next(b []rune, boundary rune) rune {
+ if len(b) == 0 {
+ panic("dns: invalid set of runes to determine next one")
+ }
+ m := b[0]
+ for _, x := range b[1:] {
+ if x >= boundary && (m < boundary || x < m) {
+ m = x
+ }
+ }
+ return m
+}
+
+// preprune converts unicode rune to lower case. At this time it's not
+// supporting all things described in RFCs.
+func preprune(r rune) rune {
+ if unicode.IsUpper(r) {
+ r = unicode.ToLower(r)
+ }
+ return r
+}
+
+// tfunc is a function that helps calculate each character weight.
+func tfunc(k, bias rune) rune {
+ switch {
+ case k <= bias:
+ return _MIN
+ case k >= bias+_MAX:
+ return _MAX
+ }
+ return k - bias
+}
+
+// needToPunycode returns true for strings that require punycode encoding
+// (contain unicode characters).
+func needToPunycode(s string) bool {
+ // This function is very similar to bytes.Runes. We don't use bytes.Runes
+ // because it makes a heap allocation that's not needed here.
+ for i := 0; len(s) > 0; i++ {
+ r, l := utf8.DecodeRuneInString(s)
+ if r > 0x7f {
+ return true
+ }
+ s = s[l:]
+ }
+ return false
+}
+
+// needFromPunycode returns true for strings that require punycode decoding.
+func needFromPunycode(s string) bool {
+ if s == "." {
+ return false
+ }
+
+ off := 0
+ end := false
+ pl := len(_PREFIX)
+ sl := len(s)
+
+ // If s starts with _PREFIX.
+ if sl > pl && s[off:off+pl] == _PREFIX {
+ return true
+ }
+
+ for {
+ // Find the part after the next ".".
+ off, end = dns.NextLabel(s, off)
+ if end {
+ return false
+ }
+ // If this parts starts with _PREFIX.
+ if sl-off > pl && s[off:off+pl] == _PREFIX {
+ return true
+ }
+ }
+}
+
+// encode transforms Unicode input bytes (that represent DNS label) into
+// punycode bytestream. This function would return nil if there's an invalid
+// character in the label.
+func encode(input []byte) []byte {
+ n, bias := _N, _BIAS
+
+ b := bytes.Runes(input)
+ for i := range b {
+ if !isValidRune(b[i]) {
+ return nil
+ }
+
+ b[i] = preprune(b[i])
+ }
+
+ basic := make([]byte, 0, len(b))
+ for _, ltr := range b {
+ if ltr <= 0x7f {
+ basic = append(basic, byte(ltr))
+ }
+ }
+ basiclen := len(basic)
+ fulllen := len(b)
+ if basiclen == fulllen {
+ return basic
+ }
+
+ var out bytes.Buffer
+
+ out.WriteString(_PREFIX)
+ if basiclen > 0 {
+ out.Write(basic)
+ out.WriteByte(_DELIMITER)
+ }
+
+ var (
+ ltr, nextltr rune
+ delta, q rune // delta calculation (see rfc)
+ t, k, cp rune // weight and codepoint calculation
+ )
+
+ s := &bytes.Buffer{}
+ for h := basiclen; h < fulllen; n, delta = n+1, delta+1 {
+ nextltr = next(b, n)
+ s.Truncate(0)
+ s.WriteRune(nextltr)
+ delta, n = delta+(nextltr-n)*rune(h+1), nextltr
+
+ for _, ltr = range b {
+ if ltr < n {
+ delta++
+ }
+ if ltr == n {
+ q = delta
+ for k = _BASE; ; k += _BASE {
+ t = tfunc(k, bias)
+ if q < t {
+ break
+ }
+ cp = t + ((q - t) % (_BASE - t))
+ out.WriteRune(lettercode(cp))
+ q = (q - t) / (_BASE - t)
+ }
+
+ out.WriteRune(lettercode(q))
+
+ bias = adapt(delta, h+1, h == basiclen)
+ h, delta = h+1, 0
+ }
+ }
+ }
+ return out.Bytes()
+}
+
+// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream.
+func decode(b []byte) []byte {
+ src := b // b would move and we need to keep it
+
+ n, bias := _N, _BIAS
+ if !bytes.HasPrefix(b, []byte(_PREFIX)) {
+ return b
+ }
+ out := make([]rune, 0, len(b))
+ b = b[len(_PREFIX):]
+ for pos := len(b) - 1; pos >= 0; pos-- {
+ // only last delimiter is our interest
+ if b[pos] == _DELIMITER {
+ out = append(out, bytes.Runes(b[:pos])...)
+ b = b[pos+1:] // trim source string
+ break
+ }
+ }
+ if len(b) == 0 {
+ return src
+ }
+ var (
+ i, oldi, w rune
+ ch byte
+ t, digit rune
+ ln int
+ )
+
+ for i = 0; len(b) > 0; i++ {
+ oldi, w = i, 1
+ for k := _BASE; len(b) > 0; k += _BASE {
+ ch, b = b[0], b[1:]
+ digit = digitval(rune(ch))
+ if digit == errdigit {
+ return src
+ }
+ i += digit * w
+ if i < 0 {
+ // safety check for rune overflow
+ return src
+ }
+
+ t = tfunc(k, bias)
+ if digit < t {
+ break
+ }
+
+ w *= _BASE - t
+ }
+ ln = len(out) + 1
+ bias = adapt(i-oldi, ln, oldi == 0)
+ n += i / rune(ln)
+ i = i % rune(ln)
+ // insert
+ out = append(out, 0)
+ copy(out[i+1:], out[i:])
+ out[i] = n
+ }
+
+ var ret bytes.Buffer
+ for _, r := range out {
+ ret.WriteRune(r)
+ }
+ return ret.Bytes()
+}
+
+// isValidRune checks if the character is valid. We will look for the
+// character property in the code points list. For now we aren't checking special
+// rules in case of contextual property
+func isValidRune(r rune) bool {
+ return findProperty(r) == propertyPVALID
+}
+
+// findProperty will try to check the code point property of the given
+// character. It will use a binary search algorithm as we have a slice of
+// ordered ranges (average case performance O(log n))
+func findProperty(r rune) property {
+ imin, imax := 0, len(codePoints)
+
+ for imax >= imin {
+ imid := (imin + imax) / 2
+
+ codePoint := codePoints[imid]
+ if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) {
+ return codePoint.state
+ }
+
+ if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) {
+ imin = imid + 1
+ } else {
+ imax = imid - 1
+ }
+ }
+
+ return propertyUnknown
+}
diff --git a/vendor/github.com/miekg/dns/idn/punycode_test.go b/vendor/github.com/miekg/dns/idn/punycode_test.go
new file mode 100644
index 000000000..9c9a15f0b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/idn/punycode_test.go
@@ -0,0 +1,116 @@
+package idn
+
+import (
+ "strings"
+ "testing"
+)
+
+var testcases = [][2]string{
+ {"", ""},
+ {"a", "a"},
+ {"a-b", "a-b"},
+ {"a-b-c", "a-b-c"},
+ {"abc", "abc"},
+ {"Ñ", "xn--41a"},
+ {"zÑ", "xn--z-0ub"},
+ {"ÑZ", "xn--z-zub"},
+ {"а-Ñ", "xn----7sb8g"},
+ {"إختبار", "xn--kgbechtv"},
+ {"آزمایشی", "xn--hgbk6aj7f53bba"},
+ {"测试", "xn--0zwm56d"},
+ {"測試", "xn--g6w251d"},
+ {"иÑпытание", "xn--80akhbyknj4f"},
+ {"परीकà¥à¤·à¤¾", "xn--11b5bs3a9aj6g"},
+ {"δοκιμή", "xn--jxalpdlp"},
+ {"테스트", "xn--9t4b11yi5a"},
+ {"טעסט", "xn--deba0ad"},
+ {"テスト", "xn--zckzah"},
+ {"பரிடà¯à®šà¯ˆ", "xn--hlcj6aya9esc7a"},
+ {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"},
+ {"σ", "xn--4xa"},
+}
+
+func TestEncodeDecodePunycode(t *testing.T) {
+ for _, tst := range testcases {
+ enc := encode([]byte(tst[0]))
+ if string(enc) != tst[1] {
+ t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1])
+ }
+ dec := decode([]byte(tst[1]))
+ if string(dec) != strings.ToLower(tst[0]) {
+ t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0]))
+ }
+ }
+}
+
+func TestToFromPunycode(t *testing.T) {
+ for _, tst := range testcases {
+ // assert unicode.com == punycode.com
+ full := ToPunycode(tst[0] + ".com")
+ if full != tst[1]+".com" {
+ t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1])
+ }
+ // assert punycode.punycode == unicode.unicode
+ decoded := FromPunycode(tst[1] + "." + tst[1])
+ if decoded != strings.ToLower(tst[0]+"."+tst[0]) {
+ t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0])
+ }
+ }
+}
+
+func TestEncodeDecodeFinalPeriod(t *testing.T) {
+ for _, tst := range testcases {
+ // assert unicode.com. == punycode.com.
+ full := ToPunycode(tst[0] + ".")
+ if full != tst[1]+"." {
+ t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".")
+ }
+ // assert punycode.com. == unicode.com.
+ decoded := FromPunycode(tst[1] + ".")
+ if decoded != strings.ToLower(tst[0]+".") {
+ t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".")
+ }
+ full = ToPunycode(tst[0])
+ if full != tst[1] {
+ t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".")
+ }
+ // assert punycode.com. == unicode.com.
+ decoded = FromPunycode(tst[1])
+ if decoded != strings.ToLower(tst[0]) {
+ t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".")
+ }
+ }
+}
+
+var invalidACEs = []string{
+ "xn--*",
+ "xn--",
+ "xn---",
+ "xn--a000000000",
+}
+
+func TestInvalidPunycode(t *testing.T) {
+ for _, d := range invalidACEs {
+ s := FromPunycode(d)
+ if s != d {
+ t.Errorf("Changed invalid name %s to %#v", d, s)
+ }
+ }
+}
+
+// You can verify the labels that are valid or not comparing to the Verisign
+// website: http://mct.verisign-grs.com/
+var invalidUnicodes = []string{
+ "Σ",
+ "ЯZ",
+ "ИÑпытание",
+}
+
+func TestInvalidUnicodes(t *testing.T) {
+ for _, d := range invalidUnicodes {
+ s := ToPunycode(d)
+ if s != "" {
+ t.Errorf("Changed invalid name %s to %#v", d, s)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/miekg/dns/issue_test.go
new file mode 100644
index 000000000..3025fc98c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/issue_test.go
@@ -0,0 +1,23 @@
+package dns
+
+// Tests that solve that an specific issue.
+
+import "testing"
+
+func TestTCPRtt(t *testing.T) {
+ m := new(Msg)
+ m.RecursionDesired = true
+ m.SetQuestion("example.org.", TypeA)
+
+ c := &Client{}
+ for _, proto := range []string{"udp", "tcp"} {
+ c.Net = proto
+ _, rtt, err := c.Exchange(m, "8.8.4.4:53")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rtt == 0 {
+ t.Fatalf("expecting non zero rtt %s, got zero", c.Net)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
new file mode 100644
index 000000000..fca5c7dd2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/labels.go
@@ -0,0 +1,168 @@
+package dns
+
+// Holds a bunch of helper functions for dealing with labels.
+
+// SplitDomainName splits a name string into it's labels.
+// www.miek.nl. returns []string{"www", "miek", "nl"}
+// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
+// The root label (.) returns nil. Note that using
+// strings.Split(s) will work in most cases, but does not handle
+// escaped dots (\.) for instance.
+// s must be a syntactically valid domain name, see IsDomainName.
+func SplitDomainName(s string) (labels []string) {
+ if len(s) == 0 {
+ return nil
+ }
+ fqdnEnd := 0 // offset of the final '.' or the length of the name
+ idx := Split(s)
+ begin := 0
+ if s[len(s)-1] == '.' {
+ fqdnEnd = len(s) - 1
+ } else {
+ fqdnEnd = len(s)
+ }
+
+ switch len(idx) {
+ case 0:
+ return nil
+ case 1:
+ // no-op
+ default:
+ end := 0
+ for i := 1; i < len(idx); i++ {
+ end = idx[i]
+ labels = append(labels, s[begin:end-1])
+ begin = end
+ }
+ }
+
+ labels = append(labels, s[begin:fqdnEnd])
+ return labels
+}
+
+// CompareDomainName compares the names s1 and s2 and
+// returns how many labels they have in common starting from the *right*.
+// The comparison stops at the first inequality. The names are not downcased
+// before the comparison.
+//
+// www.miek.nl. and miek.nl. have two labels in common: miek and nl
+// www.miek.nl. and www.bla.nl. have one label in common: nl
+//
+// s1 and s2 must be syntactically valid domain names.
+func CompareDomainName(s1, s2 string) (n int) {
+ s1 = Fqdn(s1)
+ s2 = Fqdn(s2)
+ l1 := Split(s1)
+ l2 := Split(s2)
+
+ // the first check: root label
+ if l1 == nil || l2 == nil {
+ return
+ }
+
+ j1 := len(l1) - 1 // end
+ i1 := len(l1) - 2 // start
+ j2 := len(l2) - 1
+ i2 := len(l2) - 2
+ // the second check can be done here: last/only label
+ // before we fall through into the for-loop below
+ if s1[l1[j1]:] == s2[l2[j2]:] {
+ n++
+ } else {
+ return
+ }
+ for {
+ if i1 < 0 || i2 < 0 {
+ break
+ }
+ if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
+ n++
+ } else {
+ break
+ }
+ j1--
+ i1--
+ j2--
+ i2--
+ }
+ return
+}
+
+// CountLabel counts the the number of labels in the string s.
+// s must be a syntactically valid domain name.
+func CountLabel(s string) (labels int) {
+ if s == "." {
+ return
+ }
+ off := 0
+ end := false
+ for {
+ off, end = NextLabel(s, off)
+ labels++
+ if end {
+ return
+ }
+ }
+}
+
+// Split splits a name s into its label indexes.
+// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
+// The root name (.) returns nil. Also see SplitDomainName.
+// s must be a syntactically valid domain name.
+func Split(s string) []int {
+ if s == "." {
+ return nil
+ }
+ idx := make([]int, 1, 3)
+ off := 0
+ end := false
+
+ for {
+ off, end = NextLabel(s, off)
+ if end {
+ return idx
+ }
+ idx = append(idx, off)
+ }
+}
+
+// NextLabel returns the index of the start of the next label in the
+// string s starting at offset.
+// The bool end is true when the end of the string has been reached.
+// Also see PrevLabel.
+func NextLabel(s string, offset int) (i int, end bool) {
+ quote := false
+ for i = offset; i < len(s)-1; i++ {
+ switch s[i] {
+ case '\\':
+ quote = !quote
+ default:
+ quote = false
+ case '.':
+ if quote {
+ quote = !quote
+ continue
+ }
+ return i + 1, false
+ }
+ }
+ return i + 1, true
+}
+
+// PrevLabel returns the index of the label when starting from the right and
+// jumping n labels to the left.
+// The bool start is true when the start of the string has been overshot.
+// Also see NextLabel.
+func PrevLabel(s string, n int) (i int, start bool) {
+ if n == 0 {
+ return len(s), false
+ }
+ lab := Split(s)
+ if lab == nil {
+ return 0, true
+ }
+ if n > len(lab) {
+ return 0, true
+ }
+ return lab[len(lab)-n], false
+}
diff --git a/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/miekg/dns/labels_test.go
new file mode 100644
index 000000000..536757d52
--- /dev/null
+++ b/vendor/github.com/miekg/dns/labels_test.go
@@ -0,0 +1,200 @@
+package dns
+
+import "testing"
+
+func TestCompareDomainName(t *testing.T) {
+ s1 := "www.miek.nl."
+ s2 := "miek.nl."
+ s3 := "www.bla.nl."
+ s4 := "nl.www.bla."
+ s5 := "nl"
+ s6 := "miek.nl"
+
+ if CompareDomainName(s1, s2) != 2 {
+ t.Errorf("%s with %s should be %d", s1, s2, 2)
+ }
+ if CompareDomainName(s1, s3) != 1 {
+ t.Errorf("%s with %s should be %d", s1, s3, 1)
+ }
+ if CompareDomainName(s3, s4) != 0 {
+ t.Errorf("%s with %s should be %d", s3, s4, 0)
+ }
+ // Non qualified tests
+ if CompareDomainName(s1, s5) != 1 {
+ t.Errorf("%s with %s should be %d", s1, s5, 1)
+ }
+ if CompareDomainName(s1, s6) != 2 {
+ t.Errorf("%s with %s should be %d", s1, s5, 2)
+ }
+
+ if CompareDomainName(s1, ".") != 0 {
+ t.Errorf("%s with %s should be %d", s1, s5, 0)
+ }
+ if CompareDomainName(".", ".") != 0 {
+ t.Errorf("%s with %s should be %d", ".", ".", 0)
+ }
+}
+
+func TestSplit(t *testing.T) {
+ splitter := map[string]int{
+ "www.miek.nl.": 3,
+ "www.miek.nl": 3,
+ "www..miek.nl": 4,
+ `www\.miek.nl.`: 2,
+ `www\\.miek.nl.`: 3,
+ ".": 0,
+ "nl.": 1,
+ "nl": 1,
+ "com.": 1,
+ ".com.": 2,
+ }
+ for s, i := range splitter {
+ if x := len(Split(s)); x != i {
+ t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s))
+ } else {
+ t.Logf("%s %v", s, Split(s))
+ }
+ }
+}
+
+func TestSplit2(t *testing.T) {
+ splitter := map[string][]int{
+ "www.miek.nl.": {0, 4, 9},
+ "www.miek.nl": {0, 4, 9},
+ "nl": {0},
+ }
+ for s, i := range splitter {
+ x := Split(s)
+ switch len(i) {
+ case 1:
+ if x[0] != i[0] {
+ t.Errorf("labels should be %v, got %v: %s", i, x, s)
+ }
+ default:
+ if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] {
+ t.Errorf("labels should be %v, got %v: %s", i, x, s)
+ }
+ }
+ }
+}
+
+func TestPrevLabel(t *testing.T) {
+ type prev struct {
+ string
+ int
+ }
+ prever := map[prev]int{
+ prev{"www.miek.nl.", 0}: 12,
+ prev{"www.miek.nl.", 1}: 9,
+ prev{"www.miek.nl.", 2}: 4,
+
+ prev{"www.miek.nl", 0}: 11,
+ prev{"www.miek.nl", 1}: 9,
+ prev{"www.miek.nl", 2}: 4,
+
+ prev{"www.miek.nl.", 5}: 0,
+ prev{"www.miek.nl", 5}: 0,
+
+ prev{"www.miek.nl.", 3}: 0,
+ prev{"www.miek.nl", 3}: 0,
+ }
+ for s, i := range prever {
+ x, ok := PrevLabel(s.string, s.int)
+ if i != x {
+ t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string)
+ }
+ }
+}
+
+func TestCountLabel(t *testing.T) {
+ splitter := map[string]int{
+ "www.miek.nl.": 3,
+ "www.miek.nl": 3,
+ "nl": 1,
+ ".": 0,
+ }
+ for s, i := range splitter {
+ x := CountLabel(s)
+ if x != i {
+ t.Errorf("CountLabel should have %d, got %d", i, x)
+ }
+ }
+}
+
+func TestSplitDomainName(t *testing.T) {
+ labels := map[string][]string{
+ "miek.nl": {"miek", "nl"},
+ ".": nil,
+ "www.miek.nl.": {"www", "miek", "nl"},
+ "www.miek.nl": {"www", "miek", "nl"},
+ "www..miek.nl": {"www", "", "miek", "nl"},
+ `www\.miek.nl`: {`www\.miek`, "nl"},
+ `www\\.miek.nl`: {`www\\`, "miek", "nl"},
+ ".www.miek.nl.": {"", "www", "miek", "nl"},
+ }
+domainLoop:
+ for domain, splits := range labels {
+ parts := SplitDomainName(domain)
+ if len(parts) != len(splits) {
+ t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
+ continue domainLoop
+ }
+ for i := range parts {
+ if parts[i] != splits[i] {
+ t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
+ continue domainLoop
+ }
+ }
+ }
+}
+
+func TestIsDomainName(t *testing.T) {
+ type ret struct {
+ ok bool
+ lab int
+ }
+ names := map[string]*ret{
+ "..": {false, 1},
+ "@.": {true, 1},
+ "www.example.com": {true, 3},
+ "www.e%ample.com": {true, 3},
+ "www.example.com.": {true, 3},
+ "mi\\k.nl.": {true, 2},
+ "mi\\k.nl": {true, 2},
+ }
+ for d, ok := range names {
+ l, k := IsDomainName(d)
+ if ok.ok != k || ok.lab != l {
+ t.Errorf(" got %v %d for %s ", k, l, d)
+ t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d)
+ }
+ }
+}
+
+func BenchmarkSplitLabels(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Split("www.example.com")
+ }
+}
+
+func BenchmarkLenLabels(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ CountLabel("www.example.com")
+ }
+}
+
+func BenchmarkCompareLabels(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ CompareDomainName("www.example.com", "aa.example.com")
+ }
+}
+
+func BenchmarkIsSubDomain(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ IsSubDomain("www.example.com", "aa.example.com")
+ IsSubDomain("example.com", "aa.example.com")
+ IsSubDomain("miek.nl", "aa.example.com")
+ }
+}
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
new file mode 100644
index 000000000..ec2f7ab7b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -0,0 +1,1231 @@
+// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
+// and to - Pack() - wire format.
+// All the packers and unpackers take a (msg []byte, off int)
+// and return (off1 int, ok bool). If they return ok==false, they
+// also return off1==len(msg), so that the next unpacker will
+// also fail. This lets us avoid checks of ok until the end of a
+// packing sequence.
+
+package dns
+
+//go:generate go run msg_generate.go
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ "math/big"
+ "math/rand"
+ "strconv"
+)
+
+func init() {
+ // Initialize default math/rand source using crypto/rand to provide better
+ // security without the performance trade-off.
+ buf := make([]byte, 8)
+ _, err := crand.Read(buf)
+ if err != nil {
+ // Failed to read from cryptographic source, fallback to default initial
+ // seed (1) by returning early
+ return
+ }
+ seed := binary.BigEndian.Uint64(buf)
+ rand.Seed(int64(seed))
+}
+
+const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
+
+var (
+ ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
+ ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
+ ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message.
+ ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized.
+ ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
+ ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
+ ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
+ ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
+ ErrKey error = &Error{err: "bad key"}
+ ErrKeySize error = &Error{err: "bad key size"}
+ ErrNoSig error = &Error{err: "no signature found"}
+ ErrPrivKey error = &Error{err: "bad private key"}
+ ErrRcode error = &Error{err: "bad rcode"}
+ ErrRdata error = &Error{err: "bad rdata"}
+ ErrRRset error = &Error{err: "bad rrset"}
+ ErrSecret error = &Error{err: "no secrets defined"}
+ ErrShortRead error = &Error{err: "short read"}
+ ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
+ ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
+ ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
+ ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired.
+)
+
+// Id, by default, returns a 16 bits random number to be used as a
+// message id. The random provided should be good enough. This being a
+// variable the function can be reassigned to a custom function.
+// For instance, to make it return a static value:
+//
+// dns.Id = func() uint16 { return 3 }
+var Id func() uint16 = id
+
+// id returns a 16 bits random number to be used as a
+// message id. The random provided should be good enough.
+func id() uint16 {
+ id32 := rand.Uint32()
+ return uint16(id32)
+}
+
+// MsgHdr is a a manually-unpacked version of (id, bits).
+type MsgHdr struct {
+ Id uint16
+ Response bool
+ Opcode int
+ Authoritative bool
+ Truncated bool
+ RecursionDesired bool
+ RecursionAvailable bool
+ Zero bool
+ AuthenticatedData bool
+ CheckingDisabled bool
+ Rcode int
+}
+
+// Msg contains the layout of a DNS message.
+type Msg struct {
+ MsgHdr
+ Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
+ Question []Question // Holds the RR(s) of the question section.
+ Answer []RR // Holds the RR(s) of the answer section.
+ Ns []RR // Holds the RR(s) of the authority section.
+ Extra []RR // Holds the RR(s) of the additional section.
+}
+
+// ClassToString is a maps Classes to strings for each CLASS wire type.
+var ClassToString = map[uint16]string{
+ ClassINET: "IN",
+ ClassCSNET: "CS",
+ ClassCHAOS: "CH",
+ ClassHESIOD: "HS",
+ ClassNONE: "NONE",
+ ClassANY: "ANY",
+}
+
+// OpcodeToString maps Opcodes to strings.
+var OpcodeToString = map[int]string{
+ OpcodeQuery: "QUERY",
+ OpcodeIQuery: "IQUERY",
+ OpcodeStatus: "STATUS",
+ OpcodeNotify: "NOTIFY",
+ OpcodeUpdate: "UPDATE",
+}
+
+// RcodeToString maps Rcodes to strings.
+var RcodeToString = map[int]string{
+ RcodeSuccess: "NOERROR",
+ RcodeFormatError: "FORMERR",
+ RcodeServerFailure: "SERVFAIL",
+ RcodeNameError: "NXDOMAIN",
+ RcodeNotImplemented: "NOTIMPL",
+ RcodeRefused: "REFUSED",
+ RcodeYXDomain: "YXDOMAIN", // See RFC 2136
+ RcodeYXRrset: "YXRRSET",
+ RcodeNXRrset: "NXRRSET",
+ RcodeNotAuth: "NOTAUTH",
+ RcodeNotZone: "NOTZONE",
+ RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
+ // RcodeBadVers: "BADVERS",
+ RcodeBadKey: "BADKEY",
+ RcodeBadTime: "BADTIME",
+ RcodeBadMode: "BADMODE",
+ RcodeBadName: "BADNAME",
+ RcodeBadAlg: "BADALG",
+ RcodeBadTrunc: "BADTRUNC",
+ RcodeBadCookie: "BADCOOKIE",
+}
+
+// Domain names are a sequence of counted strings
+// split at the dots. They end with a zero-length string.
+
+// PackDomainName packs a domain name s into msg[off:].
+// If compression is wanted compress must be true and the compression
+// map needs to hold a mapping between domain names and offsets
+// pointing into msg.
+func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ off1, _, err = packDomainName(s, msg, off, compression, compress)
+ return
+}
+
+func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
+ // special case if msg == nil
+ lenmsg := 256
+ if msg != nil {
+ lenmsg = len(msg)
+ }
+ ls := len(s)
+ if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
+ return off, 0, nil
+ }
+ // If not fully qualified, error out, but only if msg == nil #ugly
+ switch {
+ case msg == nil:
+ if s[ls-1] != '.' {
+ s += "."
+ ls++
+ }
+ case msg != nil:
+ if s[ls-1] != '.' {
+ return lenmsg, 0, ErrFqdn
+ }
+ }
+ // Each dot ends a segment of the name.
+ // We trade each dot byte for a length byte.
+ // Except for escaped dots (\.), which are normal dots.
+ // There is also a trailing zero.
+
+ // Compression
+ nameoffset := -1
+ pointer := -1
+ // Emit sequence of counted strings, chopping at dots.
+ begin := 0
+ bs := []byte(s)
+ roBs, bsFresh, escapedDot := s, true, false
+ for i := 0; i < ls; i++ {
+ if bs[i] == '\\' {
+ for j := i; j < ls-1; j++ {
+ bs[j] = bs[j+1]
+ }
+ ls--
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ // check for \DDD
+ if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ bs[i] = dddToByte(bs[i:])
+ for j := i + 1; j < ls-2; j++ {
+ bs[j] = bs[j+2]
+ }
+ ls -= 2
+ } else if bs[i] == 't' {
+ bs[i] = '\t'
+ } else if bs[i] == 'r' {
+ bs[i] = '\r'
+ } else if bs[i] == 'n' {
+ bs[i] = '\n'
+ }
+ escapedDot = bs[i] == '.'
+ bsFresh = false
+ continue
+ }
+
+ if bs[i] == '.' {
+ if i > 0 && bs[i-1] == '.' && !escapedDot {
+ // two dots back to back is not legal
+ return lenmsg, labels, ErrRdata
+ }
+ if i-begin >= 1<<6 { // top two bits of length must be clear
+ return lenmsg, labels, ErrRdata
+ }
+ // off can already (we're in a loop) be bigger than len(msg)
+ // this happens when a name isn't fully qualified
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ if msg != nil {
+ msg[off] = byte(i - begin)
+ }
+ offset := off
+ off++
+ for j := begin; j < i; j++ {
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ if msg != nil {
+ msg[off] = bs[j]
+ }
+ off++
+ }
+ if compress && !bsFresh {
+ roBs = string(bs)
+ bsFresh = true
+ }
+ // Don't try to compress '.'
+ if compress && roBs[begin:] != "." {
+ if p, ok := compression[roBs[begin:]]; !ok {
+ // Only offsets smaller than this can be used.
+ if offset < maxCompressionOffset {
+ compression[roBs[begin:]] = offset
+ }
+ } else {
+ // The first hit is the longest matching dname
+ // keep the pointer offset we get back and store
+ // the offset of the current name, because that's
+ // where we need to insert the pointer later
+
+ // If compress is true, we're allowed to compress this dname
+ if pointer == -1 && compress {
+ pointer = p // Where to point to
+ nameoffset = offset // Where to point from
+ break
+ }
+ }
+ }
+ labels++
+ begin = i + 1
+ }
+ escapedDot = false
+ }
+ // Root label is special
+ if len(bs) == 1 && bs[0] == '.' {
+ return off, labels, nil
+ }
+ // If we did compression and we find something add the pointer here
+ if pointer != -1 {
+ // We have two bytes (14 bits) to put the pointer in
+ // if msg == nil, we will never do compression
+ binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000))
+ off = nameoffset + 1
+ goto End
+ }
+ if msg != nil && off < len(msg) {
+ msg[off] = 0
+ }
+End:
+ off++
+ return off, labels, nil
+}
+
+// Unpack a domain name.
+// In addition to the simple sequences of counted strings above,
+// domain names are allowed to refer to strings elsewhere in the
+// packet, to avoid repeating common suffixes when returning
+// many entries in a single domain. The pointers are marked
+// by a length byte with the top two bits set. Ignoring those
+// two bits, that byte and the next give a 14 bit offset from msg[0]
+// where we should pick up the trail.
+// Note that if we jump elsewhere in the packet,
+// we return off1 == the offset after the first pointer we found,
+// which is where the next record will start.
+// In theory, the pointers are only allowed to jump backward.
+// We let them jump anywhere and stop jumping after a while.
+
+// UnpackDomainName unpacks a domain name into a string.
+func UnpackDomainName(msg []byte, off int) (string, int, error) {
+ s := make([]byte, 0, 64)
+ off1 := 0
+ lenmsg := len(msg)
+ ptr := 0 // number of pointers followed
+Loop:
+ for {
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c := int(msg[off])
+ off++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // end of name
+ break Loop
+ }
+ // literal string
+ if off+c > lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ for j := off; j < off+c; j++ {
+ switch b := msg[j]; b {
+ case '.', '(', ')', ';', ' ', '@':
+ fallthrough
+ case '"', '\\':
+ s = append(s, '\\', b)
+ case '\t':
+ s = append(s, '\\', 't')
+ case '\r':
+ s = append(s, '\\', 'r')
+ default:
+ if b < 32 || b >= 127 { // unprintable use \DDD
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ s = append(s, '.')
+ off += c
+ case 0xC0:
+ // pointer to somewhere else in msg.
+ // remember location after first ptr,
+ // since that's how many bytes we consumed.
+ // also, don't follow too many pointers --
+ // maybe there's a loop.
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c1 := msg[off]
+ off++
+ if ptr == 0 {
+ off1 = off
+ }
+ if ptr++; ptr > 10 {
+ return "", lenmsg, &Error{err: "too many compression pointers"}
+ }
+ off = (c^0xC0)<<8 | int(c1)
+ default:
+ // 0x80 and 0x40 are reserved
+ return "", lenmsg, ErrRdata
+ }
+ }
+ if ptr == 0 {
+ off1 = off
+ }
+ if len(s) == 0 {
+ s = []byte(".")
+ }
+ return string(s), off1, nil
+}
+
+func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
+ if len(txt) == 0 {
+ if offset >= len(msg) {
+ return offset, ErrBuf
+ }
+ msg[offset] = 0
+ return offset, nil
+ }
+ var err error
+ for i := range txt {
+ if len(txt[i]) > len(tmp) {
+ return offset, ErrBuf
+ }
+ offset, err = packTxtString(txt[i], msg, offset, tmp)
+ if err != nil {
+ return offset, err
+ }
+ }
+ return offset, nil
+}
+
+func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ lenByteOffset := offset
+ if offset >= len(msg) || len(s) > len(tmp) {
+ return offset, ErrBuf
+ }
+ offset++
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else if bs[i] == 't' {
+ msg[offset] = '\t'
+ } else if bs[i] == 'r' {
+ msg[offset] = '\r'
+ } else if bs[i] == 'n' {
+ msg[offset] = '\n'
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ l := offset - lenByteOffset - 1
+ if l > 255 {
+ return offset, &Error{err: "string exceeded 255 bytes in txt"}
+ }
+ msg[lenByteOffset] = byte(l)
+ return offset, nil
+}
+
+func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ if offset >= len(msg) || len(s) > len(tmp) {
+ return offset, ErrBuf
+ }
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ return offset, nil
+}
+
+func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
+ off = off0
+ var s string
+ for off < len(msg) && err == nil {
+ s, off, err = unpackTxtString(msg, off)
+ if err == nil {
+ ss = append(ss, s)
+ }
+ }
+ return
+}
+
+func unpackTxtString(msg []byte, offset int) (string, int, error) {
+ if offset+1 > len(msg) {
+ return "", offset, &Error{err: "overflow unpacking txt"}
+ }
+ l := int(msg[offset])
+ if offset+l+1 > len(msg) {
+ return "", offset, &Error{err: "overflow unpacking txt"}
+ }
+ s := make([]byte, 0, l)
+ for _, b := range msg[offset+1 : offset+1+l] {
+ switch b {
+ case '"', '\\':
+ s = append(s, '\\', b)
+ case '\t':
+ s = append(s, `\t`...)
+ case '\r':
+ s = append(s, `\r`...)
+ case '\n':
+ s = append(s, `\n`...)
+ default:
+ if b < 32 || b > 127 { // unprintable
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ offset += 1 + l
+ return string(s), offset, nil
+}
+
+// Helpers for dealing with escaped bytes
+func isDigit(b byte) bool { return b >= '0' && b <= '9' }
+
+func dddToByte(s []byte) byte {
+ return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+}
+
+// Helper function for packing and unpacking
+func intToBytes(i *big.Int, length int) []byte {
+ buf := i.Bytes()
+ if len(buf) < length {
+ b := make([]byte, length)
+ copy(b[length-len(buf):], buf)
+ return b
+ }
+ return buf
+}
+
+// PackRR packs a resource record rr into msg[off:].
+// See PackDomainName for documentation about the compression.
+func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ if rr == nil {
+ return len(msg), &Error{err: "nil rr"}
+ }
+
+ off1, err = rr.pack(msg, off, compression, compress)
+ if err != nil {
+ return len(msg), err
+ }
+ // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well.
+ if rawSetRdlength(msg, off, off1) {
+ return off1, nil
+ }
+ return off, ErrRdata
+}
+
+// UnpackRR unpacks msg[off:] into an RR.
+func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
+ h, off, msg, err := unpackHeader(msg, off)
+ if err != nil {
+ return nil, len(msg), err
+ }
+ end := off + int(h.Rdlength)
+
+ if fn, known := typeToUnpack[h.Rrtype]; !known {
+ rr, off, err = unpackRFC3597(h, msg, off)
+ } else {
+ rr, off, err = fn(h, msg, off)
+ }
+ if off != end {
+ return &h, end, &Error{err: "bad rdlength"}
+ }
+ return rr, off, err
+}
+
+// unpackRRslice unpacks msg[off:] into an []RR.
+// If we cannot unpack the whole array, then it will return nil
+func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
+ var r RR
+ // Optimistically make dst be the length that was sent
+ dst := make([]RR, 0, l)
+ for i := 0; i < l; i++ {
+ off1 := off
+ r, off, err = UnpackRR(msg, off)
+ if err != nil {
+ off = len(msg)
+ break
+ }
+ // If offset does not increase anymore, l is a lie
+ if off1 == off {
+ l = i
+ break
+ }
+ dst = append(dst, r)
+ }
+ if err != nil && off == len(msg) {
+ dst = nil
+ }
+ return dst, off, err
+}
+
+// Convert a MsgHdr to a string, with dig-like headers:
+//
+//;; opcode: QUERY, status: NOERROR, id: 48404
+//
+//;; flags: qr aa rd ra;
+func (h *MsgHdr) String() string {
+ if h == nil {
+ return "<nil> MsgHdr"
+ }
+
+ s := ";; opcode: " + OpcodeToString[h.Opcode]
+ s += ", status: " + RcodeToString[h.Rcode]
+ s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
+
+ s += ";; flags:"
+ if h.Response {
+ s += " qr"
+ }
+ if h.Authoritative {
+ s += " aa"
+ }
+ if h.Truncated {
+ s += " tc"
+ }
+ if h.RecursionDesired {
+ s += " rd"
+ }
+ if h.RecursionAvailable {
+ s += " ra"
+ }
+ if h.Zero { // Hmm
+ s += " z"
+ }
+ if h.AuthenticatedData {
+ s += " ad"
+ }
+ if h.CheckingDisabled {
+ s += " cd"
+ }
+
+ s += ";"
+ return s
+}
+
+// Pack packs a Msg: it is converted to to wire format.
+// If the dns.Compress is true the message will be in compressed wire format.
+func (dns *Msg) Pack() (msg []byte, err error) {
+ return dns.PackBuffer(nil)
+}
+
+// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
+// a new buffer is allocated.
+func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
+ // We use a similar function in tsig.go's stripTsig.
+ var (
+ dh Header
+ compression map[string]int
+ )
+
+ if dns.Compress {
+ compression = make(map[string]int) // Compression pointer mappings
+ }
+
+ if dns.Rcode < 0 || dns.Rcode > 0xFFF {
+ return nil, ErrRcode
+ }
+ if dns.Rcode > 0xF {
+ // Regular RCODE field is 4 bits
+ opt := dns.IsEdns0()
+ if opt == nil {
+ return nil, ErrExtendedRcode
+ }
+ opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
+ dns.Rcode &= 0xF
+ }
+
+ // Convert convenient Msg into wire-like Header.
+ dh.Id = dns.Id
+ dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
+ if dns.Response {
+ dh.Bits |= _QR
+ }
+ if dns.Authoritative {
+ dh.Bits |= _AA
+ }
+ if dns.Truncated {
+ dh.Bits |= _TC
+ }
+ if dns.RecursionDesired {
+ dh.Bits |= _RD
+ }
+ if dns.RecursionAvailable {
+ dh.Bits |= _RA
+ }
+ if dns.Zero {
+ dh.Bits |= _Z
+ }
+ if dns.AuthenticatedData {
+ dh.Bits |= _AD
+ }
+ if dns.CheckingDisabled {
+ dh.Bits |= _CD
+ }
+
+ // Prepare variable sized arrays.
+ question := dns.Question
+ answer := dns.Answer
+ ns := dns.Ns
+ extra := dns.Extra
+
+ dh.Qdcount = uint16(len(question))
+ dh.Ancount = uint16(len(answer))
+ dh.Nscount = uint16(len(ns))
+ dh.Arcount = uint16(len(extra))
+
+ // We need the uncompressed length here, because we first pack it and then compress it.
+ msg = buf
+ compress := dns.Compress
+ dns.Compress = false
+ if packLen := dns.Len() + 1; len(msg) < packLen {
+ msg = make([]byte, packLen)
+ }
+ dns.Compress = compress
+
+ // Pack it in: header and then the pieces.
+ off := 0
+ off, err = dh.pack(msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ for i := 0; i < len(question); i++ {
+ off, err = question[i].pack(msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(answer); i++ {
+ off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(ns); i++ {
+ off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(extra); i++ {
+ off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msg[:off], nil
+}
+
+// Unpack unpacks a binary message to a Msg structure.
+func (dns *Msg) Unpack(msg []byte) (err error) {
+ var (
+ dh Header
+ off int
+ )
+ if dh, off, err = unpackMsgHdr(msg, off); err != nil {
+ return err
+ }
+ if off == len(msg) {
+ return ErrTruncated
+ }
+
+ dns.Id = dh.Id
+ dns.Response = (dh.Bits & _QR) != 0
+ dns.Opcode = int(dh.Bits>>11) & 0xF
+ dns.Authoritative = (dh.Bits & _AA) != 0
+ dns.Truncated = (dh.Bits & _TC) != 0
+ dns.RecursionDesired = (dh.Bits & _RD) != 0
+ dns.RecursionAvailable = (dh.Bits & _RA) != 0
+ dns.Zero = (dh.Bits & _Z) != 0
+ dns.AuthenticatedData = (dh.Bits & _AD) != 0
+ dns.CheckingDisabled = (dh.Bits & _CD) != 0
+ dns.Rcode = int(dh.Bits & 0xF)
+
+ // Optimistically use the count given to us in the header
+ dns.Question = make([]Question, 0, int(dh.Qdcount))
+
+ for i := 0; i < int(dh.Qdcount); i++ {
+ off1 := off
+ var q Question
+ q, off, err = unpackQuestion(msg, off)
+ if err != nil {
+ // Even if Truncated is set, we only will set ErrTruncated if we
+ // actually got the questions
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
+ dh.Qdcount = uint16(i)
+ break
+ }
+ dns.Question = append(dns.Question, q)
+ }
+
+ dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
+ // The header counts might have been wrong so we need to update it
+ dh.Ancount = uint16(len(dns.Answer))
+ if err == nil {
+ dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
+ }
+ // The header counts might have been wrong so we need to update it
+ dh.Nscount = uint16(len(dns.Ns))
+ if err == nil {
+ dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
+ }
+ // The header counts might have been wrong so we need to update it
+ dh.Arcount = uint16(len(dns.Extra))
+
+ if off != len(msg) {
+ // TODO(miek) make this an error?
+ // use PackOpt to let people tell how detailed the error reporting should be?
+ // println("dns: extra bytes in dns packet", off, "<", len(msg))
+ } else if dns.Truncated {
+ // Whether we ran into a an error or not, we want to return that it
+ // was truncated
+ err = ErrTruncated
+ }
+ return err
+}
+
+// Convert a complete message to a string with dig-like output.
+func (dns *Msg) String() string {
+ if dns == nil {
+ return "<nil> MsgHdr"
+ }
+ s := dns.MsgHdr.String() + " "
+ s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
+ s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
+ s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
+ s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
+ if len(dns.Question) > 0 {
+ s += "\n;; QUESTION SECTION:\n"
+ for i := 0; i < len(dns.Question); i++ {
+ s += dns.Question[i].String() + "\n"
+ }
+ }
+ if len(dns.Answer) > 0 {
+ s += "\n;; ANSWER SECTION:\n"
+ for i := 0; i < len(dns.Answer); i++ {
+ if dns.Answer[i] != nil {
+ s += dns.Answer[i].String() + "\n"
+ }
+ }
+ }
+ if len(dns.Ns) > 0 {
+ s += "\n;; AUTHORITY SECTION:\n"
+ for i := 0; i < len(dns.Ns); i++ {
+ if dns.Ns[i] != nil {
+ s += dns.Ns[i].String() + "\n"
+ }
+ }
+ }
+ if len(dns.Extra) > 0 {
+ s += "\n;; ADDITIONAL SECTION:\n"
+ for i := 0; i < len(dns.Extra); i++ {
+ if dns.Extra[i] != nil {
+ s += dns.Extra[i].String() + "\n"
+ }
+ }
+ }
+ return s
+}
+
+// Len returns the message length when in (un)compressed wire format.
+// If dns.Compress is true compression it is taken into account. Len()
+// is provided to be a faster way to get the size of the resulting packet,
+// than packing it, measuring the size and discarding the buffer.
+func (dns *Msg) Len() int {
+ // We always return one more than needed.
+ l := 12 // Message header is always 12 bytes
+ var compression map[string]int
+ if dns.Compress {
+ compression = make(map[string]int)
+ }
+ for i := 0; i < len(dns.Question); i++ {
+ l += dns.Question[i].len()
+ if dns.Compress {
+ compressionLenHelper(compression, dns.Question[i].Name)
+ }
+ }
+ for i := 0; i < len(dns.Answer); i++ {
+ if dns.Answer[i] == nil {
+ continue
+ }
+ l += dns.Answer[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Answer[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Answer[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Answer[i])
+ }
+ }
+ for i := 0; i < len(dns.Ns); i++ {
+ if dns.Ns[i] == nil {
+ continue
+ }
+ l += dns.Ns[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Ns[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Ns[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Ns[i])
+ }
+ }
+ for i := 0; i < len(dns.Extra); i++ {
+ if dns.Extra[i] == nil {
+ continue
+ }
+ l += dns.Extra[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Extra[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Extra[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Extra[i])
+ }
+ }
+ return l
+}
+
+// Put the parts of the name in the compression map.
+func compressionLenHelper(c map[string]int, s string) {
+ pref := ""
+ lbs := Split(s)
+ for j := len(lbs) - 1; j >= 0; j-- {
+ pref = s[lbs[j]:]
+ if _, ok := c[pref]; !ok {
+ c[pref] = len(pref)
+ }
+ }
+}
+
+// Look for each part in the compression map and returns its length,
+// keep on searching so we get the longest match.
+func compressionLenSearch(c map[string]int, s string) (int, bool) {
+ off := 0
+ end := false
+ if s == "" { // don't bork on bogus data
+ return 0, false
+ }
+ for {
+ if _, ok := c[s[off:]]; ok {
+ return len(s[off:]), true
+ }
+ if end {
+ break
+ }
+ off, end = NextLabel(s, off)
+ }
+ return 0, false
+}
+
+// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go
+func compressionLenHelperType(c map[string]int, r RR) {
+ switch x := r.(type) {
+ case *NS:
+ compressionLenHelper(c, x.Ns)
+ case *MX:
+ compressionLenHelper(c, x.Mx)
+ case *CNAME:
+ compressionLenHelper(c, x.Target)
+ case *PTR:
+ compressionLenHelper(c, x.Ptr)
+ case *SOA:
+ compressionLenHelper(c, x.Ns)
+ compressionLenHelper(c, x.Mbox)
+ case *MB:
+ compressionLenHelper(c, x.Mb)
+ case *MG:
+ compressionLenHelper(c, x.Mg)
+ case *MR:
+ compressionLenHelper(c, x.Mr)
+ case *MF:
+ compressionLenHelper(c, x.Mf)
+ case *MD:
+ compressionLenHelper(c, x.Md)
+ case *RT:
+ compressionLenHelper(c, x.Host)
+ case *RP:
+ compressionLenHelper(c, x.Mbox)
+ compressionLenHelper(c, x.Txt)
+ case *MINFO:
+ compressionLenHelper(c, x.Rmail)
+ compressionLenHelper(c, x.Email)
+ case *AFSDB:
+ compressionLenHelper(c, x.Hostname)
+ case *SRV:
+ compressionLenHelper(c, x.Target)
+ case *NAPTR:
+ compressionLenHelper(c, x.Replacement)
+ case *RRSIG:
+ compressionLenHelper(c, x.SignerName)
+ case *NSEC:
+ compressionLenHelper(c, x.NextDomain)
+ // HIP?
+ }
+}
+
+// Only search on compressing these types.
+func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
+ switch x := r.(type) {
+ case *NS:
+ return compressionLenSearch(c, x.Ns)
+ case *MX:
+ return compressionLenSearch(c, x.Mx)
+ case *CNAME:
+ return compressionLenSearch(c, x.Target)
+ case *DNAME:
+ return compressionLenSearch(c, x.Target)
+ case *PTR:
+ return compressionLenSearch(c, x.Ptr)
+ case *SOA:
+ k, ok := compressionLenSearch(c, x.Ns)
+ k1, ok1 := compressionLenSearch(c, x.Mbox)
+ if !ok && !ok1 {
+ return 0, false
+ }
+ return k + k1, true
+ case *MB:
+ return compressionLenSearch(c, x.Mb)
+ case *MG:
+ return compressionLenSearch(c, x.Mg)
+ case *MR:
+ return compressionLenSearch(c, x.Mr)
+ case *MF:
+ return compressionLenSearch(c, x.Mf)
+ case *MD:
+ return compressionLenSearch(c, x.Md)
+ case *RT:
+ return compressionLenSearch(c, x.Host)
+ case *MINFO:
+ k, ok := compressionLenSearch(c, x.Rmail)
+ k1, ok1 := compressionLenSearch(c, x.Email)
+ if !ok && !ok1 {
+ return 0, false
+ }
+ return k + k1, true
+ case *AFSDB:
+ return compressionLenSearch(c, x.Hostname)
+ }
+ return 0, false
+}
+
+// Copy returns a new RR which is a deep-copy of r.
+func Copy(r RR) RR { r1 := r.copy(); return r1 }
+
+// Len returns the length (in octets) of the uncompressed RR in wire format.
+func Len(r RR) int { return r.len() }
+
+// Copy returns a new *Msg which is a deep-copy of dns.
+func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
+
+// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
+func (dns *Msg) CopyTo(r1 *Msg) *Msg {
+ r1.MsgHdr = dns.MsgHdr
+ r1.Compress = dns.Compress
+
+ if len(dns.Question) > 0 {
+ r1.Question = make([]Question, len(dns.Question))
+ copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
+ }
+
+ rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
+ var rri int
+
+ if len(dns.Answer) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Answer); i++ {
+ rrArr[rri] = dns.Answer[i].copy()
+ rri++
+ }
+ r1.Answer = rrArr[rrbegin:rri:rri]
+ }
+
+ if len(dns.Ns) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Ns); i++ {
+ rrArr[rri] = dns.Ns[i].copy()
+ rri++
+ }
+ r1.Ns = rrArr[rrbegin:rri:rri]
+ }
+
+ if len(dns.Extra) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Extra); i++ {
+ rrArr[rri] = dns.Extra[i].copy()
+ rri++
+ }
+ r1.Extra = rrArr[rrbegin:rri:rri]
+ }
+
+ return r1
+}
+
+func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := PackDomainName(q.Name, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(q.Qtype, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(q.Qclass, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func unpackQuestion(msg []byte, off int) (Question, int, error) {
+ var (
+ q Question
+ err error
+ )
+ q.Name, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return q, off, err
+ }
+ if off == len(msg) {
+ return q, off, nil
+ }
+ q.Qtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return q, off, err
+ }
+ if off == len(msg) {
+ return q, off, nil
+ }
+ q.Qclass, off, err = unpackUint16(msg, off)
+ if off == len(msg) {
+ return q, off, nil
+ }
+ return q, off, err
+}
+
+func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := packUint16(dh.Id, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Bits, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Qdcount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Ancount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Nscount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Arcount, msg, off)
+ return off, err
+}
+
+func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
+ var (
+ dh Header
+ err error
+ )
+ dh.Id, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Bits, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Qdcount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Ancount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Nscount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Arcount, off, err = unpackUint16(msg, off)
+ return dh, off, err
+}
diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go
new file mode 100644
index 000000000..35786f22c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg_generate.go
@@ -0,0 +1,340 @@
+//+build ignore
+
+// msg_generate.go is meant to run with go generate. It will use
+// go/{importer,types} to track down all the RR struct types. Then for each type
+// it will generate pack/unpack methods based on the struct tags. The generated source is
+// written to zmsg.go, and is meant to be checked into git.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/importer"
+ "go/types"
+ "log"
+ "os"
+ "strings"
+)
+
+var packageHdr = `
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from msg_generate.go
+
+package dns
+
+`
+
+// getTypeStruct will take a type and the package scope, and return the
+// (innermost) struct if the type is considered a RR type (currently defined as
+// those structs beginning with a RR_Header, could be redefined as implementing
+// the RR interface). The bool return value indicates if embedded structs were
+// resolved.
+func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
+ st, ok := t.Underlying().(*types.Struct)
+ if !ok {
+ return nil, false
+ }
+ if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
+ return st, false
+ }
+ if st.Field(0).Anonymous() {
+ st, _ := getTypeStruct(st.Field(0).Type(), scope)
+ return st, true
+ }
+ return nil, false
+}
+
+func main() {
+ // Import and type-check the package
+ pkg, err := importer.Default().Import("github.com/miekg/dns")
+ fatalIfErr(err)
+ scope := pkg.Scope()
+
+ // Collect actual types (*X)
+ var namedTypes []string
+ for _, name := range scope.Names() {
+ o := scope.Lookup(name)
+ if o == nil || !o.Exported() {
+ continue
+ }
+ if st, _ := getTypeStruct(o.Type(), scope); st == nil {
+ continue
+ }
+ if name == "PrivateRR" {
+ continue
+ }
+
+ // Check if corresponding TypeX exists
+ if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
+ log.Fatalf("Constant Type%s does not exist.", o.Name())
+ }
+
+ namedTypes = append(namedTypes, o.Name())
+ }
+
+ b := &bytes.Buffer{}
+ b.WriteString(packageHdr)
+
+ fmt.Fprint(b, "// pack*() functions\n\n")
+ for _, name := range namedTypes {
+ o := scope.Lookup(name)
+ st, _ := getTypeStruct(o.Type(), scope)
+
+ fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
+ fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
+if err != nil {
+ return off, err
+}
+headerEnd := off
+`)
+ for i := 1; i < st.NumFields(); i++ {
+ o := func(s string) {
+ fmt.Fprintf(b, s, st.Field(i).Name())
+ fmt.Fprint(b, `if err != nil {
+return off, err
+}
+`)
+ }
+
+ if _, ok := st.Field(i).Type().(*types.Slice); ok {
+ switch st.Tag(i) {
+ case `dns:"-"`: // ignored
+ case `dns:"txt"`:
+ o("off, err = packStringTxt(rr.%s, msg, off)\n")
+ case `dns:"opt"`:
+ o("off, err = packDataOpt(rr.%s, msg, off)\n")
+ case `dns:"nsec"`:
+ o("off, err = packDataNsec(rr.%s, msg, off)\n")
+ case `dns:"domain-name"`:
+ o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ continue
+ }
+
+ switch {
+ case st.Tag(i) == `dns:"-"`: // ignored
+ case st.Tag(i) == `dns:"cdomain-name"`:
+ fallthrough
+ case st.Tag(i) == `dns:"domain-name"`:
+ o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
+ case st.Tag(i) == `dns:"a"`:
+ o("off, err = packDataA(rr.%s, msg, off)\n")
+ case st.Tag(i) == `dns:"aaaa"`:
+ o("off, err = packDataAAAA(rr.%s, msg, off)\n")
+ case st.Tag(i) == `dns:"uint48"`:
+ o("off, err = packUint48(rr.%s, msg, off)\n")
+ case st.Tag(i) == `dns:"txt"`:
+ o("off, err = packString(rr.%s, msg, off)\n")
+
+ case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
+ fallthrough
+ case st.Tag(i) == `dns:"base32"`:
+ o("off, err = packStringBase32(rr.%s, msg, off)\n")
+
+ case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
+ fallthrough
+ case st.Tag(i) == `dns:"base64"`:
+ o("off, err = packStringBase64(rr.%s, msg, off)\n")
+
+ case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3
+ o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n")
+ continue
+ case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
+ fallthrough
+ case st.Tag(i) == `dns:"hex"`:
+ o("off, err = packStringHex(rr.%s, msg, off)\n")
+
+ case st.Tag(i) == `dns:"octet"`:
+ o("off, err = packStringOctet(rr.%s, msg, off)\n")
+ case st.Tag(i) == "":
+ switch st.Field(i).Type().(*types.Basic).Kind() {
+ case types.Uint8:
+ o("off, err = packUint8(rr.%s, msg, off)\n")
+ case types.Uint16:
+ o("off, err = packUint16(rr.%s, msg, off)\n")
+ case types.Uint32:
+ o("off, err = packUint32(rr.%s, msg, off)\n")
+ case types.Uint64:
+ o("off, err = packUint64(rr.%s, msg, off)\n")
+ case types.String:
+ o("off, err = packString(rr.%s, msg, off)\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name())
+ }
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ }
+ // We have packed everything, only now we know the rdlength of this RR
+ fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
+ fmt.Fprintln(b, "return off, nil }\n")
+ }
+
+ fmt.Fprint(b, "// unpack*() functions\n\n")
+ for _, name := range namedTypes {
+ o := scope.Lookup(name)
+ st, _ := getTypeStruct(o.Type(), scope)
+
+ fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
+ fmt.Fprintf(b, "rr := new(%s)\n", name)
+ fmt.Fprint(b, "rr.Hdr = h\n")
+ fmt.Fprint(b, `if noRdata(h) {
+return rr, off, nil
+ }
+var err error
+rdStart := off
+_ = rdStart
+
+`)
+ for i := 1; i < st.NumFields(); i++ {
+ o := func(s string) {
+ fmt.Fprintf(b, s, st.Field(i).Name())
+ fmt.Fprint(b, `if err != nil {
+return rr, off, err
+}
+`)
+ }
+
+ // size-* are special, because they reference a struct member we should use for the length.
+ if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
+ structMember := structMember(st.Tag(i))
+ structTag := structTag(st.Tag(i))
+ switch structTag {
+ case "hex":
+ fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
+ case "base32":
+ fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
+ case "base64":
+ fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ fmt.Fprint(b, `if err != nil {
+return rr, off, err
+}
+`)
+ continue
+ }
+
+ if _, ok := st.Field(i).Type().(*types.Slice); ok {
+ switch st.Tag(i) {
+ case `dns:"-"`: // ignored
+ case `dns:"txt"`:
+ o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
+ case `dns:"opt"`:
+ o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
+ case `dns:"nsec"`:
+ o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
+ case `dns:"domain-name"`:
+ o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ continue
+ }
+
+ switch st.Tag(i) {
+ case `dns:"-"`: // ignored
+ case `dns:"cdomain-name"`:
+ fallthrough
+ case `dns:"domain-name"`:
+ o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
+ case `dns:"a"`:
+ o("rr.%s, off, err = unpackDataA(msg, off)\n")
+ case `dns:"aaaa"`:
+ o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
+ case `dns:"uint48"`:
+ o("rr.%s, off, err = unpackUint48(msg, off)\n")
+ case `dns:"txt"`:
+ o("rr.%s, off, err = unpackString(msg, off)\n")
+ case `dns:"base32"`:
+ o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
+ case `dns:"base64"`:
+ o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
+ case `dns:"hex"`:
+ o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
+ case `dns:"octet"`:
+ o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
+ case "":
+ switch st.Field(i).Type().(*types.Basic).Kind() {
+ case types.Uint8:
+ o("rr.%s, off, err = unpackUint8(msg, off)\n")
+ case types.Uint16:
+ o("rr.%s, off, err = unpackUint16(msg, off)\n")
+ case types.Uint32:
+ o("rr.%s, off, err = unpackUint32(msg, off)\n")
+ case types.Uint64:
+ o("rr.%s, off, err = unpackUint64(msg, off)\n")
+ case types.String:
+ o("rr.%s, off, err = unpackString(msg, off)\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name())
+ }
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ // If we've hit len(msg) we return without error.
+ if i < st.NumFields()-1 {
+ fmt.Fprintf(b, `if off == len(msg) {
+return rr, off, nil
+ }
+`)
+ }
+ }
+ fmt.Fprintf(b, "return rr, off, err }\n\n")
+ }
+ // Generate typeToUnpack map
+ fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
+ for _, name := range namedTypes {
+ if name == "RFC3597" {
+ continue
+ }
+ fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
+ }
+ fmt.Fprintln(b, "}\n")
+
+ // gofmt
+ res, err := format.Source(b.Bytes())
+ if err != nil {
+ b.WriteTo(os.Stderr)
+ log.Fatal(err)
+ }
+
+ // write result
+ f, err := os.Create("zmsg.go")
+ fatalIfErr(err)
+ defer f.Close()
+ f.Write(res)
+}
+
+// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
+func structMember(s string) string {
+ fields := strings.Split(s, ":")
+ if len(fields) == 0 {
+ return ""
+ }
+ f := fields[len(fields)-1]
+ // f should have a closing "
+ if len(f) > 1 {
+ return f[:len(f)-1]
+ }
+ return f
+}
+
+// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
+func structTag(s string) string {
+ fields := strings.Split(s, ":")
+ if len(fields) < 2 {
+ return ""
+ }
+ return fields[1][len("\"size-"):]
+}
+
+func fatalIfErr(err error) {
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
new file mode 100644
index 000000000..e7a9500cc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -0,0 +1,630 @@
+package dns
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "net"
+ "strconv"
+)
+
+// helper functions called from the generated zmsg.go
+
+// These function are named after the tag to help pack/unpack, if there is no tag it is the name
+// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
+// packDataDomainName.
+
+func unpackDataA(msg []byte, off int) (net.IP, int, error) {
+ if off+net.IPv4len > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking a"}
+ }
+ a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
+ off += net.IPv4len
+ return a, off, nil
+}
+
+func packDataA(a net.IP, msg []byte, off int) (int, error) {
+ // It must be a slice of 4, even if it is 16, we encode only the first 4
+ if off+net.IPv4len > len(msg) {
+ return len(msg), &Error{err: "overflow packing a"}
+ }
+ switch len(a) {
+ case net.IPv4len, net.IPv6len:
+ copy(msg[off:], a.To4())
+ off += net.IPv4len
+ case 0:
+ // Allowed, for dynamic updates.
+ default:
+ return len(msg), &Error{err: "overflow packing a"}
+ }
+ return off, nil
+}
+
+func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
+ if off+net.IPv6len > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
+ }
+ aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
+ off += net.IPv6len
+ return aaaa, off, nil
+}
+
+func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
+ if off+net.IPv6len > len(msg) {
+ return len(msg), &Error{err: "overflow packing aaaa"}
+ }
+
+ switch len(aaaa) {
+ case net.IPv6len:
+ copy(msg[off:], aaaa)
+ off += net.IPv6len
+ case 0:
+ // Allowed, dynamic updates.
+ default:
+ return len(msg), &Error{err: "overflow packing aaaa"}
+ }
+ return off, nil
+}
+
+// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
+// re-sliced msg according to the expected length of the RR.
+func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
+ hdr := RR_Header{}
+ if off == len(msg) {
+ return hdr, off, msg, nil
+ }
+
+ hdr.Name, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Rrtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Class, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Ttl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Rdlength, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
+ return hdr, off, msg, nil
+}
+
+// pack packs an RR header, returning the offset to the end of the header.
+// See PackDomainName for documentation about the compression.
+func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ if off == len(msg) {
+ return off, nil
+ }
+
+ off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(hdr.Rrtype, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(hdr.Class, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint32(hdr.Ttl, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(hdr.Rdlength, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+// helper helper functions.
+
+// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
+// Returns an error if msg is smaller than the expected size.
+func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
+ lenrd := off + int(rdlength)
+ if lenrd > len(msg) {
+ return msg, &Error{err: "overflowing header size"}
+ }
+ return msg[:lenrd], nil
+}
+
+func fromBase32(s []byte) (buf []byte, err error) {
+ buflen := base32.HexEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base32.HexEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) }
+
+func fromBase64(s []byte) (buf []byte, err error) {
+ buflen := base64.StdEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base64.StdEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
+
+// dynamicUpdate returns true if the Rdlength is zero.
+func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
+
+func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
+ if off+1 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint8"}
+ }
+ return uint8(msg[off]), off + 1, nil
+}
+
+func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
+ if off+1 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint8"}
+ }
+ msg[off] = byte(i)
+ return off + 1, nil
+}
+
+func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
+ if off+2 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint16"}
+ }
+ return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
+}
+
+func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
+ if off+2 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint16"}
+ }
+ binary.BigEndian.PutUint16(msg[off:], i)
+ return off + 2, nil
+}
+
+func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
+ if off+4 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint32"}
+ }
+ return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
+}
+
+func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
+ if off+4 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint32"}
+ }
+ binary.BigEndian.PutUint32(msg[off:], i)
+ return off + 4, nil
+}
+
+func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
+ if off+6 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
+ }
+ // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
+ i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
+ uint64(msg[off+4])<<8 | uint64(msg[off+5])))
+ off += 6
+ return i, off, nil
+}
+
+func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
+ if off+6 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint64 as uint48"}
+ }
+ msg[off] = byte(i >> 40)
+ msg[off+1] = byte(i >> 32)
+ msg[off+2] = byte(i >> 24)
+ msg[off+3] = byte(i >> 16)
+ msg[off+4] = byte(i >> 8)
+ msg[off+5] = byte(i)
+ off += 6
+ return off, nil
+}
+
+func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
+ if off+8 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint64"}
+ }
+ return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
+}
+
+func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
+ if off+8 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint64"}
+ }
+ binary.BigEndian.PutUint64(msg[off:], i)
+ off += 8
+ return off, nil
+}
+
+func unpackString(msg []byte, off int) (string, int, error) {
+ if off+1 > len(msg) {
+ return "", off, &Error{err: "overflow unpacking txt"}
+ }
+ l := int(msg[off])
+ if off+l+1 > len(msg) {
+ return "", off, &Error{err: "overflow unpacking txt"}
+ }
+ s := make([]byte, 0, l)
+ for _, b := range msg[off+1 : off+1+l] {
+ switch b {
+ case '"', '\\':
+ s = append(s, '\\', b)
+ case '\t', '\r', '\n':
+ s = append(s, b)
+ default:
+ if b < 32 || b > 127 { // unprintable
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ off += 1 + l
+ return string(s), off, nil
+}
+
+func packString(s string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1)
+ off, err := packTxtString(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking base32"}
+ }
+ s := toBase32(msg[off:end])
+ return s, end, nil
+}
+
+func packStringBase32(s string, msg []byte, off int) (int, error) {
+ b32, err := fromBase32([]byte(s))
+ if err != nil {
+ return len(msg), err
+ }
+ if off+len(b32) > len(msg) {
+ return len(msg), &Error{err: "overflow packing base32"}
+ }
+ copy(msg[off:off+len(b32)], b32)
+ off += len(b32)
+ return off, nil
+}
+
+func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
+ // Rest of the RR is base64 encoded value, so we don't need an explicit length
+ // to be set. Thus far all RR's that have base64 encoded fields have those as their
+ // last one. What we do need is the end of the RR!
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking base64"}
+ }
+ s := toBase64(msg[off:end])
+ return s, end, nil
+}
+
+func packStringBase64(s string, msg []byte, off int) (int, error) {
+ b64, err := fromBase64([]byte(s))
+ if err != nil {
+ return len(msg), err
+ }
+ if off+len(b64) > len(msg) {
+ return len(msg), &Error{err: "overflow packing base64"}
+ }
+ copy(msg[off:off+len(b64)], b64)
+ off += len(b64)
+ return off, nil
+}
+
+func unpackStringHex(msg []byte, off, end int) (string, int, error) {
+ // Rest of the RR is hex encoded value, so we don't need an explicit length
+ // to be set. NSEC and TSIG have hex fields with a length field.
+ // What we do need is the end of the RR!
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking hex"}
+ }
+
+ s := hex.EncodeToString(msg[off:end])
+ return s, end, nil
+}
+
+func packStringHex(s string, msg []byte, off int) (int, error) {
+ h, err := hex.DecodeString(s)
+ if err != nil {
+ return len(msg), err
+ }
+ if off+(len(h)) > len(msg) {
+ return len(msg), &Error{err: "overflow packing hex"}
+ }
+ copy(msg[off:off+len(h)], h)
+ off += len(h)
+ return off, nil
+}
+
+func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
+ txt, off, err := unpackTxt(msg, off)
+ if err != nil {
+ return nil, len(msg), err
+ }
+ return txt, off, nil
+}
+
+func packStringTxt(s []string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
+ off, err := packTxt(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
+ var edns []EDNS0
+Option:
+ code := uint16(0)
+ if off+4 > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking opt"}
+ }
+ code = binary.BigEndian.Uint16(msg[off:])
+ off += 2
+ optlen := binary.BigEndian.Uint16(msg[off:])
+ off += 2
+ if off+int(optlen) > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking opt"}
+ }
+ switch code {
+ case EDNS0NSID:
+ e := new(EDNS0_NSID)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0SUBNET, EDNS0SUBNETDRAFT:
+ e := new(EDNS0_SUBNET)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ if code == EDNS0SUBNETDRAFT {
+ e.DraftOption = true
+ }
+ case EDNS0COOKIE:
+ e := new(EDNS0_COOKIE)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0UL:
+ e := new(EDNS0_UL)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0LLQ:
+ e := new(EDNS0_LLQ)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0DAU:
+ e := new(EDNS0_DAU)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0DHU:
+ e := new(EDNS0_DHU)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0N3U:
+ e := new(EDNS0_N3U)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ default:
+ e := new(EDNS0_LOCAL)
+ e.Code = code
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ }
+
+ if off < len(msg) {
+ goto Option
+ }
+
+ return edns, off, nil
+}
+
+func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
+ for _, el := range options {
+ b, err := el.pack()
+ if err != nil || off+3 > len(msg) {
+ return len(msg), &Error{err: "overflow packing opt"}
+ }
+ binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
+ binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
+ off += 4
+ if off+len(b) > len(msg) {
+ copy(msg[off:], b)
+ off = len(msg)
+ continue
+ }
+ // Actual data
+ copy(msg[off:off+len(b)], b)
+ off += len(b)
+ }
+ return off, nil
+}
+
+func unpackStringOctet(msg []byte, off int) (string, int, error) {
+ s := string(msg[off:])
+ return s, len(msg), nil
+}
+
+func packStringOctet(s string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1)
+ off, err := packOctetString(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
+ var nsec []uint16
+ length, window, lastwindow := 0, 0, -1
+ for off < len(msg) {
+ if off+2 > len(msg) {
+ return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
+ }
+ window = int(msg[off])
+ length = int(msg[off+1])
+ off += 2
+ if window <= lastwindow {
+ // RFC 4034: Blocks are present in the NSEC RR RDATA in
+ // increasing numerical order.
+ return nsec, len(msg), &Error{err: "out of order NSEC block"}
+ }
+ if length == 0 {
+ // RFC 4034: Blocks with no types present MUST NOT be included.
+ return nsec, len(msg), &Error{err: "empty NSEC block"}
+ }
+ if length > 32 {
+ return nsec, len(msg), &Error{err: "NSEC block too long"}
+ }
+ if off+length > len(msg) {
+ return nsec, len(msg), &Error{err: "overflowing NSEC block"}
+ }
+
+ // Walk the bytes in the window and extract the type bits
+ for j := 0; j < length; j++ {
+ b := msg[off+j]
+ // Check the bits one by one, and set the type
+ if b&0x80 == 0x80 {
+ nsec = append(nsec, uint16(window*256+j*8+0))
+ }
+ if b&0x40 == 0x40 {
+ nsec = append(nsec, uint16(window*256+j*8+1))
+ }
+ if b&0x20 == 0x20 {
+ nsec = append(nsec, uint16(window*256+j*8+2))
+ }
+ if b&0x10 == 0x10 {
+ nsec = append(nsec, uint16(window*256+j*8+3))
+ }
+ if b&0x8 == 0x8 {
+ nsec = append(nsec, uint16(window*256+j*8+4))
+ }
+ if b&0x4 == 0x4 {
+ nsec = append(nsec, uint16(window*256+j*8+5))
+ }
+ if b&0x2 == 0x2 {
+ nsec = append(nsec, uint16(window*256+j*8+6))
+ }
+ if b&0x1 == 0x1 {
+ nsec = append(nsec, uint16(window*256+j*8+7))
+ }
+ }
+ off += length
+ lastwindow = window
+ }
+ return nsec, off, nil
+}
+
+func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
+ if len(bitmap) == 0 {
+ return off, nil
+ }
+ var lastwindow, lastlength uint16
+ for j := 0; j < len(bitmap); j++ {
+ t := bitmap[j]
+ window := t / 256
+ length := (t-window*256)/8 + 1
+ if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
+ off += int(lastlength) + 2
+ lastlength = 0
+ }
+ if window < lastwindow || length < lastlength {
+ return len(msg), &Error{err: "nsec bits out of order"}
+ }
+ if off+2+int(length) > len(msg) {
+ return len(msg), &Error{err: "overflow packing nsec"}
+ }
+ // Setting the window #
+ msg[off] = byte(window)
+ // Setting the octets length
+ msg[off+1] = byte(length)
+ // Setting the bit value for the type in the right octet
+ msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
+ lastwindow, lastlength = window, length
+ }
+ off += int(lastlength) + 2
+ return off, nil
+}
+
+func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
+ var (
+ servers []string
+ s string
+ err error
+ )
+ if end > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking domain names"}
+ }
+ for off < end {
+ s, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return servers, len(msg), err
+ }
+ servers = append(servers, s)
+ }
+ return servers, off, nil
+}
+
+func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ var err error
+ for j := 0; j < len(names); j++ {
+ off, err = PackDomainName(names[j], msg, off, compression, false && compress)
+ if err != nil {
+ return len(msg), err
+ }
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go
new file mode 100644
index 000000000..6f10f3e65
--- /dev/null
+++ b/vendor/github.com/miekg/dns/nsecx.go
@@ -0,0 +1,119 @@
+package dns
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+ "strings"
+)
+
+type saltWireFmt struct {
+ Salt string `dns:"size-hex"`
+}
+
+// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
+func HashName(label string, ha uint8, iter uint16, salt string) string {
+ saltwire := new(saltWireFmt)
+ saltwire.Salt = salt
+ wire := make([]byte, DefaultMsgSize)
+ n, err := packSaltWire(saltwire, wire)
+ if err != nil {
+ return ""
+ }
+ wire = wire[:n]
+ name := make([]byte, 255)
+ off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
+ if err != nil {
+ return ""
+ }
+ name = name[:off]
+ var s hash.Hash
+ switch ha {
+ case SHA1:
+ s = sha1.New()
+ default:
+ return ""
+ }
+
+ // k = 0
+ name = append(name, wire...)
+ io.WriteString(s, string(name))
+ nsec3 := s.Sum(nil)
+ // k > 0
+ for k := uint16(0); k < iter; k++ {
+ s.Reset()
+ nsec3 = append(nsec3, wire...)
+ io.WriteString(s, string(nsec3))
+ nsec3 = s.Sum(nil)
+ }
+ return toBase32(nsec3)
+}
+
+// Denialer is an interface that should be implemented by types that are used to denial
+// answers in DNSSEC.
+type Denialer interface {
+ // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
+ Cover(name string) bool
+ // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
+ Match(name string) bool
+}
+
+// Cover implements the Denialer interface.
+func (rr *NSEC) Cover(name string) bool {
+ return true
+}
+
+// Match implements the Denialer interface.
+func (rr *NSEC) Match(name string) bool {
+ return true
+}
+
+// Cover implements the Denialer interface.
+func (rr *NSEC3) Cover(name string) bool {
+ // FIXME(miek): check if the zones match
+ // FIXME(miek): check if we're not dealing with parent nsec3
+ hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ labels := Split(rr.Hdr.Name)
+ if len(labels) < 2 {
+ return false
+ }
+ hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
+ if hash == rr.NextDomain {
+ return false // empty interval
+ }
+ if hash > rr.NextDomain { // last name, points to apex
+ // hname > hash
+ // hname > rr.NextDomain
+ // TODO(miek)
+ }
+ if hname <= hash {
+ return false
+ }
+ if hname >= rr.NextDomain {
+ return false
+ }
+ return true
+}
+
+// Match implements the Denialer interface.
+func (rr *NSEC3) Match(name string) bool {
+ // FIXME(miek): Check if we are in the same zone
+ hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ labels := Split(rr.Hdr.Name)
+ if len(labels) < 2 {
+ return false
+ }
+ hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
+ if hash == hname {
+ return true
+ }
+ return false
+}
+
+func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
+ off, err := packStringHex(sw.Salt, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/miekg/dns/nsecx_test.go
new file mode 100644
index 000000000..93e0c63fc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/nsecx_test.go
@@ -0,0 +1,29 @@
+package dns
+
+import (
+ "testing"
+)
+
+func TestPackNsec3(t *testing.T) {
+ nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD")
+ if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" {
+ t.Error(nsec3)
+ }
+
+ nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD")
+ if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" {
+ t.Error(nsec3)
+ }
+}
+
+func TestNsec3(t *testing.T) {
+ // examples taken from .nl
+ nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG")
+ if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3
+ t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
+ }
+ nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM")
+ if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl.
+ t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
+ }
+}
diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go
new file mode 100644
index 000000000..3b38dba65
--- /dev/null
+++ b/vendor/github.com/miekg/dns/parse_test.go
@@ -0,0 +1,1493 @@
+package dns
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+ "testing/quick"
+ "time"
+)
+
+func TestDotInName(t *testing.T) {
+ buf := make([]byte, 20)
+ PackDomainName("aa\\.bb.nl.", buf, 0, nil, false)
+ // index 3 must be a real dot
+ if buf[3] != '.' {
+ t.Error("dot should be a real dot")
+ }
+
+ if buf[6] != 2 {
+ t.Error("this must have the value 2")
+ }
+ dom, _, _ := UnpackDomainName(buf, 0)
+ // printing it should yield the backspace again
+ if dom != "aa\\.bb.nl." {
+ t.Error("dot should have been escaped: ", dom)
+ }
+}
+
+func TestDotLastInLabel(t *testing.T) {
+ sample := "aa\\..au."
+ buf := make([]byte, 20)
+ _, err := PackDomainName(sample, buf, 0, nil, false)
+ if err != nil {
+ t.Fatalf("unexpected error packing domain: %v", err)
+ }
+ dom, _, _ := UnpackDomainName(buf, 0)
+ if dom != sample {
+ t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom)
+ }
+}
+
+func TestTooLongDomainName(t *testing.T) {
+ l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt."
+ dom := l + l + l + l + l + l + l
+ _, err := NewRR(dom + " IN A 127.0.0.1")
+ if err == nil {
+ t.Error("should be too long")
+ } else {
+ t.Logf("error is %v", err)
+ }
+ _, err = NewRR("..com. IN A 127.0.0.1")
+ if err == nil {
+ t.Error("should fail")
+ } else {
+ t.Logf("error is %v", err)
+ }
+}
+
+func TestDomainName(t *testing.T) {
+ tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.",
+ "www.*.miek.nl.", "www.*.miek.nl.",
+ }
+ dbuff := make([]byte, 40)
+
+ for _, ts := range tests {
+ if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil {
+ t.Error("not a valid domain name")
+ continue
+ }
+ n, _, err := UnpackDomainName(dbuff, 0)
+ if err != nil {
+ t.Error("failed to unpack packed domain name")
+ continue
+ }
+ if ts != n {
+ t.Errorf("must be equal: in: %s, out: %s", ts, n)
+ }
+ }
+}
+
+func TestDomainNameAndTXTEscapes(t *testing.T) {
+ tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255}
+ for _, b := range tests {
+ rrbytes := []byte{
+ 1, b, 0, // owner
+ byte(TypeTXT >> 8), byte(TypeTXT),
+ byte(ClassINET >> 8), byte(ClassINET),
+ 0, 0, 0, 1, // TTL
+ 0, 2, 1, b, // Data
+ }
+ rr1, _, err := UnpackRR(rrbytes, 0)
+ if err != nil {
+ panic(err)
+ }
+ s := rr1.String()
+ rr2, err := NewRR(s)
+ if err != nil {
+ t.Errorf("Error parsing unpacked RR's string: %v", err)
+ t.Errorf(" Bytes: %v", rrbytes)
+ t.Errorf("String: %v", s)
+ }
+ repacked := make([]byte, len(rrbytes))
+ if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil {
+ t.Errorf("error packing parsed RR: %v", err)
+ t.Errorf(" original Bytes: %v", rrbytes)
+ t.Errorf("unpacked Struct: %v", rr1)
+ t.Errorf(" parsed Struct: %v", rr2)
+ }
+ if !bytes.Equal(repacked, rrbytes) {
+ t.Error("packed bytes don't match original bytes")
+ t.Errorf(" original bytes: %v", rrbytes)
+ t.Errorf(" packed bytes: %v", repacked)
+ t.Errorf("unpacked struct: %v", rr1)
+ t.Errorf(" parsed struct: %v", rr2)
+ }
+ }
+}
+
+func TestTXTEscapeParsing(t *testing.T) {
+ test := [][]string{
+ {`";"`, `";"`},
+ {`\;`, `";"`},
+ {`"\t"`, `"\t"`},
+ {`"\r"`, `"\r"`},
+ {`"\ "`, `" "`},
+ {`"\;"`, `";"`},
+ {`"\;\""`, `";\""`},
+ {`"\(a\)"`, `"(a)"`},
+ {`"\(a)"`, `"(a)"`},
+ {`"(a\)"`, `"(a)"`},
+ {`"(a)"`, `"(a)"`},
+ {`"\048"`, `"0"`},
+ {`"\` + "\n" + `"`, `"\n"`},
+ {`"\` + "\r" + `"`, `"\r"`},
+ {`"\` + "\x11" + `"`, `"\017"`},
+ {`"\'"`, `"'"`},
+ }
+ for _, s := range test {
+ rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0]))
+ if err != nil {
+ t.Errorf("could not parse %v TXT: %s", s[0], err)
+ continue
+ }
+
+ txt := sprintTxt(rr.(*TXT).Txt)
+ if txt != s[1] {
+ t.Errorf("mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1])
+ }
+ }
+}
+
+func GenerateDomain(r *rand.Rand, size int) []byte {
+ dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs
+ var dn []byte
+ done := false
+ for i := 0; i < dnLen && !done; {
+ max := dnLen - i
+ if max > 63 {
+ max = 63
+ }
+ lLen := max
+ if lLen != 0 {
+ lLen = int(r.Int31()) % max
+ }
+ done = lLen == 0
+ if done {
+ continue
+ }
+ l := make([]byte, lLen+1)
+ l[0] = byte(lLen)
+ for j := 0; j < lLen; j++ {
+ l[j+1] = byte(rand.Int31())
+ }
+ dn = append(dn, l...)
+ i += 1 + lLen
+ }
+ return append(dn, 0)
+}
+
+func TestDomainQuick(t *testing.T) {
+ r := rand.New(rand.NewSource(0))
+ f := func(l int) bool {
+ db := GenerateDomain(r, l)
+ ds, _, err := UnpackDomainName(db, 0)
+ if err != nil {
+ panic(err)
+ }
+ buf := make([]byte, 255)
+ off, err := PackDomainName(ds, buf, 0, nil, false)
+ if err != nil {
+ t.Errorf("error packing domain: %v", err)
+ t.Errorf(" bytes: %v", db)
+ t.Errorf("string: %v", ds)
+ return false
+ }
+ if !bytes.Equal(db, buf[:off]) {
+ t.Errorf("repacked domain doesn't match original:")
+ t.Errorf("src bytes: %v", db)
+ t.Errorf(" string: %v", ds)
+ t.Errorf("out bytes: %v", buf[:off])
+ return false
+ }
+ return true
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func GenerateTXT(r *rand.Rand, size int) []byte {
+ rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs
+ var rd []byte
+ for i := 0; i < rdLen; {
+ max := rdLen - 1
+ if max > 255 {
+ max = 255
+ }
+ sLen := max
+ if max != 0 {
+ sLen = int(r.Int31()) % max
+ }
+ s := make([]byte, sLen+1)
+ s[0] = byte(sLen)
+ for j := 0; j < sLen; j++ {
+ s[j+1] = byte(rand.Int31())
+ }
+ rd = append(rd, s...)
+ i += 1 + sLen
+ }
+ return rd
+}
+
+// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt
+// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't
+// place the quotes where they need to be.
+// So either add some code the places the quotes in just the right spots, make this non random
+// or do something else.
+// Disabled for now. (miek)
+func testTXTRRQuick(t *testing.T) {
+ s := rand.NewSource(0)
+ r := rand.New(s)
+ typeAndClass := []byte{
+ byte(TypeTXT >> 8), byte(TypeTXT),
+ byte(ClassINET >> 8), byte(ClassINET),
+ 0, 0, 0, 1, // TTL
+ }
+ f := func(l int) bool {
+ owner := GenerateDomain(r, l)
+ rdata := GenerateTXT(r, l)
+ rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata))
+ rrbytes = append(rrbytes, owner...)
+ rrbytes = append(rrbytes, typeAndClass...)
+ rrbytes = append(rrbytes, byte(len(rdata)>>8))
+ rrbytes = append(rrbytes, byte(len(rdata)))
+ rrbytes = append(rrbytes, rdata...)
+ rr, _, err := UnpackRR(rrbytes, 0)
+ if err != nil {
+ panic(err)
+ }
+ buf := make([]byte, len(rrbytes)*3)
+ off, err := PackRR(rr, buf, 0, nil, false)
+ if err != nil {
+ t.Errorf("pack Error: %v\nRR: %v", err, rr)
+ return false
+ }
+ buf = buf[:off]
+ if !bytes.Equal(buf, rrbytes) {
+ t.Errorf("packed bytes don't match original bytes")
+ t.Errorf("src bytes: %v", rrbytes)
+ t.Errorf(" struct: %v", rr)
+ t.Errorf("out bytes: %v", buf)
+ return false
+ }
+ if len(rdata) == 0 {
+ // string'ing won't produce any data to parse
+ return true
+ }
+ rrString := rr.String()
+ rr2, err := NewRR(rrString)
+ if err != nil {
+ t.Errorf("error parsing own output: %v", err)
+ t.Errorf("struct: %v", rr)
+ t.Errorf("string: %v", rrString)
+ return false
+ }
+ if rr2.String() != rrString {
+ t.Errorf("parsed rr.String() doesn't match original string")
+ t.Errorf("original: %v", rrString)
+ t.Errorf(" parsed: %v", rr2.String())
+ return false
+ }
+
+ buf = make([]byte, len(rrbytes)*3)
+ off, err = PackRR(rr2, buf, 0, nil, false)
+ if err != nil {
+ t.Errorf("error packing parsed rr: %v", err)
+ t.Errorf("unpacked Struct: %v", rr)
+ t.Errorf(" string: %v", rrString)
+ t.Errorf(" parsed Struct: %v", rr2)
+ return false
+ }
+ buf = buf[:off]
+ if !bytes.Equal(buf, rrbytes) {
+ t.Errorf("parsed packed bytes don't match original bytes")
+ t.Errorf(" source bytes: %v", rrbytes)
+ t.Errorf("unpacked struct: %v", rr)
+ t.Errorf(" string: %v", rrString)
+ t.Errorf(" parsed struct: %v", rr2)
+ t.Errorf(" repacked bytes: %v", buf)
+ return false
+ }
+ return true
+ }
+ c := &quick.Config{MaxCountScale: 10}
+ if err := quick.Check(f, c); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestParseDirectiveMisc(t *testing.T) {
+ tests := map[string]string{
+ "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.",
+ "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.",
+ "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.",
+ `name. IN SOA a6.nstld.com. hostmaster.nic.name. (
+ 203362132 ; serial
+ 5m ; refresh (5 minutes)
+ 5m ; retry (5 minutes)
+ 2w ; expire (2 weeks)
+ 300 ; minimum (5 minutes)
+)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300",
+ ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.",
+ "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1",
+ }
+ for i, o := range tests {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestNSEC(t *testing.T) {
+ nsectests := map[string]string{
+ "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F",
+ "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM",
+ "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC",
+ "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
+ "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
+ "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test. NSEC3 1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA": "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test.\t3600\tIN\tNSEC3\t1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA",
+ }
+ for i, o := range nsectests {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseLOC(t *testing.T) {
+ lt := map[string]string{
+ "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
+ "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
+ }
+ for i, o := range lt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseDS(t *testing.T) {
+ dt := map[string]string{
+ "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F",
+ }
+ for i, o := range dt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestQuotes(t *testing.T) {
+ tests := map[string]string{
+ `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"",
+ `t.example.com. IN TXT "a
+ bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"",
+ `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"",
+ `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"",
+ `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"",
+ `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
+ `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
+ `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
+ `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa \"",
+ `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
+ `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
+ `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
+ "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.",
+ "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.",
+ "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.",
+ "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .",
+ }
+ for i, o := range tests {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseClass(t *testing.T) {
+ tests := map[string]string{
+ "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1",
+ "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1",
+ "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1",
+ // ClassANY can not occur in zone files
+ // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1",
+ "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1",
+ }
+ for i, o := range tests {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestBrace(t *testing.T) {
+ tests := map[string]string{
+ "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1",
+ "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.",
+ `miek.nl. IN (
+ 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1",
+ "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1",
+ "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1",
+ "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.",
+ "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.",
+ `(miek.nl.) (
+ (IN)
+ (AAAA)
+ (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1",
+ `(miek.nl.) (
+ (IN)
+ (AAAA)
+ (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1",
+ "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2",
+ `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) (
+ 2009032802 ; serial
+ 21600 ; refresh (6 hours)
+ 7(2)00 ; retry (2 hours)
+ 604()800 ; expire (1 week)
+ 3600 ; minimum (1 hour)
+ )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600",
+ "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10",
+ "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11",
+ "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12",
+ `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. (
+ 2009032802 ; serial
+ 21600 ; refresh (6 hours)
+ 7200 ; retry (2 hours)
+ 604800 ; expire (1 week)
+ 3600 ; minimum (1 hour)
+ )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600",
+ }
+ for i, o := range tests {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Errorf("failed to parse RR: %v\n\t%s", err, i)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseFailure(t *testing.T) {
+ tests := []string{"miek.nl. IN A 327.0.0.1",
+ "miek.nl. IN AAAA ::x",
+ "miek.nl. IN MX a0 miek.nl.",
+ "miek.nl aap IN MX mx.miek.nl.",
+ "miek.nl 200 IN mxx 10 mx.miek.nl.",
+ "miek.nl. inn MX 10 mx.miek.nl.",
+ // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata
+ "miek.nl. IN CNAME ..",
+ "miek.nl. PA MX 10 miek.nl.",
+ "miek.nl. ) IN MX 10 miek.nl.",
+ }
+
+ for _, s := range tests {
+ _, err := NewRR(s)
+ if err == nil {
+ t.Errorf("should have triggered an error: \"%s\"", s)
+ }
+ }
+}
+
+func TestZoneParsing(t *testing.T) {
+ // parse_test.db
+ db := `
+a.example.com. IN A 127.0.0.1
+8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG
+$ORIGIN a.example.com.
+test IN A 127.0.0.1
+ IN SSHFP 1 2 (
+ BC6533CDC95A79078A39A56EA7635984ED655318ADA9
+ B6159E30723665DA95BB )
+$ORIGIN b.example.com.
+test IN CNAME test.a.example.com.
+`
+ start := time.Now().UnixNano()
+ to := ParseZone(strings.NewReader(db), "", "parse_test.db")
+ var i int
+ for x := range to {
+ i++
+ if x.Error != nil {
+ t.Error(x.Error)
+ continue
+ }
+ t.Log(x.RR)
+ }
+ delta := time.Now().UnixNano() - start
+ t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
+}
+
+func ExampleParseZone() {
+ zone := `$ORIGIN .
+$TTL 3600 ; 1 hour
+name IN SOA a6.nstld.com. hostmaster.nic.name. (
+ 203362132 ; serial
+ 300 ; refresh (5 minutes)
+ 300 ; retry (5 minutes)
+ 1209600 ; expire (2 weeks)
+ 300 ; minimum (5 minutes)
+ )
+$TTL 10800 ; 3 hours
+name. 10800 IN NS name.
+ IN NS g6.nstld.com.
+ 7200 NS h6.nstld.com.
+ 3600 IN NS j6.nstld.com.
+ IN 3600 NS k6.nstld.com.
+ NS l6.nstld.com.
+ NS a6.nstld.com.
+ NS c6.nstld.com.
+ NS d6.nstld.com.
+ NS f6.nstld.com.
+ NS m6.nstld.com.
+(
+ NS m7.nstld.com.
+)
+$ORIGIN name.
+0-0onlus NS ns7.ehiweb.it.
+ NS ns8.ehiweb.it.
+0-g MX 10 mx01.nic
+ MX 10 mx02.nic
+ MX 10 mx03.nic
+ MX 10 mx04.nic
+$ORIGIN 0-g.name
+moutamassey NS ns01.yahoodomains.jp.
+ NS ns02.yahoodomains.jp.
+`
+ to := ParseZone(strings.NewReader(zone), "", "testzone")
+ for x := range to {
+ fmt.Println(x.RR)
+ }
+ // Output:
+ // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
+ // name. 10800 IN NS name.
+ // name. 10800 IN NS g6.nstld.com.
+ // name. 7200 IN NS h6.nstld.com.
+ // name. 3600 IN NS j6.nstld.com.
+ // name. 3600 IN NS k6.nstld.com.
+ // name. 10800 IN NS l6.nstld.com.
+ // name. 10800 IN NS a6.nstld.com.
+ // name. 10800 IN NS c6.nstld.com.
+ // name. 10800 IN NS d6.nstld.com.
+ // name. 10800 IN NS f6.nstld.com.
+ // name. 10800 IN NS m6.nstld.com.
+ // name. 10800 IN NS m7.nstld.com.
+ // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it.
+ // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it.
+ // 0-g.name. 10800 IN MX 10 mx01.nic.name.
+ // 0-g.name. 10800 IN MX 10 mx02.nic.name.
+ // 0-g.name. 10800 IN MX 10 mx03.nic.name.
+ // 0-g.name. 10800 IN MX 10 mx04.nic.name.
+ // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp.
+ // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp.
+}
+
+func ExampleHIP() {
+ h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578
+ AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p
+9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ
+b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
+ rvs.example.com. )`
+ if hip, err := NewRR(h); err == nil {
+ fmt.Println(hip.String())
+ }
+ // Output:
+ // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
+}
+
+func TestHIP(t *testing.T) {
+ h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578
+ AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p
+9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ
+b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
+ rvs1.example.com.
+ rvs2.example.com. )`
+ rr, err := NewRR(h)
+ if err != nil {
+ t.Fatalf("failed to parse RR: %v", err)
+ }
+ t.Logf("RR: %s", rr)
+ msg := new(Msg)
+ msg.Answer = []RR{rr, rr}
+ bytes, err := msg.Pack()
+ if err != nil {
+ t.Fatalf("failed to pack msg: %v", err)
+ }
+ if err := msg.Unpack(bytes); err != nil {
+ t.Fatalf("failed to unpack msg: %v", err)
+ }
+ if len(msg.Answer) != 2 {
+ t.Fatalf("2 answers expected: %v", msg)
+ }
+ for i, rr := range msg.Answer {
+ rr := rr.(*HIP)
+ t.Logf("RR: %s", rr)
+ if l := len(rr.RendezvousServers); l != 2 {
+ t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg)
+ }
+ for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} {
+ if rr.RendezvousServers[j] != s {
+ t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg)
+ }
+ }
+ }
+}
+
+func ExampleSOA() {
+ s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
+ if soa, err := NewRR(s); err == nil {
+ fmt.Println(soa.String())
+ }
+ // Output:
+ // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
+}
+
+func TestLineNumberError(t *testing.T) {
+ s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
+ if _, err := NewRR(s); err != nil {
+ if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
+ t.Error("not expecting this error: ", err)
+ }
+ }
+}
+
+// Test with no known RR on the line
+func TestLineNumberError2(t *testing.T) {
+ tests := map[string]string{
+ "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21",
+ "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57",
+ "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60",
+ `example.com 1000 IN TALINK ( a.example.com.
+ bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18",
+ // This is a bug, it should report an error on line 1, but the new is already processed.
+ `example.com 1000 IN TALINK ( a.example.com. b...example.com.
+ )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"}
+
+ for in, errStr := range tests {
+ _, err := NewRR(in)
+ if err == nil {
+ t.Error("err is nil")
+ } else {
+ if err.Error() != errStr {
+ t.Errorf("%s: error should be %s is %v", in, errStr, err)
+ }
+ }
+ }
+}
+
+// Test if the calculations are correct
+func TestRfc1982(t *testing.T) {
+ // If the current time and the timestamp are more than 68 years apart
+ // it means the date has wrapped. 0 is 1970
+
+ // fall in the current 68 year span
+ strtests := []string{"20120525134203", "19700101000000", "20380119031408"}
+ for _, v := range strtests {
+ if x, _ := StringToTime(v); v != TimeToString(x) {
+ t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
+ }
+ }
+
+ inttests := map[uint32]string{0: "19700101000000",
+ 1 << 31: "20380119031408",
+ 1<<32 - 1: "21060207062815",
+ }
+ for i, v := range inttests {
+ if TimeToString(i) != v {
+ t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
+ }
+ }
+
+ // Future tests, these dates get parsed to a date within the current 136 year span
+ future := map[string]string{"22680119031408": "20631123173144",
+ "19010101121212": "20370206184028",
+ "19210101121212": "20570206184028",
+ "19500101121212": "20860206184028",
+ "19700101000000": "19700101000000",
+ "19690101000000": "21050207062816",
+ "29210101121212": "21040522212236",
+ }
+ for from, to := range future {
+ x, _ := StringToTime(from)
+ y := TimeToString(x)
+ if y != to {
+ t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
+ }
+ }
+}
+
+func TestEmpty(t *testing.T) {
+ for range ParseZone(strings.NewReader(""), "", "") {
+ t.Errorf("should be empty")
+ }
+}
+
+func TestLowercaseTokens(t *testing.T) {
+ var testrecords = []string{
+ "example.org. 300 IN a 1.2.3.4",
+ "example.org. 300 in A 1.2.3.4",
+ "example.org. 300 in a 1.2.3.4",
+ "example.org. 300 a 1.2.3.4",
+ "example.org. 300 A 1.2.3.4",
+ "example.org. IN a 1.2.3.4",
+ "example.org. in A 1.2.3.4",
+ "example.org. in a 1.2.3.4",
+ "example.org. a 1.2.3.4",
+ "example.org. A 1.2.3.4",
+ "example.org. a 1.2.3.4",
+ "$ORIGIN example.org.\n a 1.2.3.4",
+ "$Origin example.org.\n a 1.2.3.4",
+ "$origin example.org.\n a 1.2.3.4",
+ "example.org. Class1 Type1 1.2.3.4",
+ }
+ for _, testrr := range testrecords {
+ _, err := NewRR(testrr)
+ if err != nil {
+ t.Errorf("failed to parse %#v, got %v", testrr, err)
+ }
+ }
+}
+
+func ExampleParseZone_generate() {
+ // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761
+ zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0"
+ to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
+ for x := range to {
+ if x.Error == nil {
+ fmt.Println(x.RR.String())
+ }
+ }
+ // Output:
+ // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE.
+ // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE.
+ // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA.
+ // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA.
+ // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA.
+ // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA.
+ // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA.
+ // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA.
+ // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA.
+ // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA.
+}
+
+func TestSRVPacking(t *testing.T) {
+ msg := Msg{}
+
+ things := []string{"1.2.3.4:8484",
+ "45.45.45.45:8484",
+ "84.84.84.84:8484",
+ }
+
+ for i, n := range things {
+ h, p, err := net.SplitHostPort(n)
+ if err != nil {
+ continue
+ }
+ port := 8484
+ tmp, err := strconv.Atoi(p)
+ if err == nil {
+ port = tmp
+ }
+
+ rr := &SRV{
+ Hdr: RR_Header{Name: "somename.",
+ Rrtype: TypeSRV,
+ Class: ClassINET,
+ Ttl: 5},
+ Priority: uint16(i),
+ Weight: 5,
+ Port: uint16(port),
+ Target: h + ".",
+ }
+
+ msg.Answer = append(msg.Answer, rr)
+ }
+
+ _, err := msg.Pack()
+ if err != nil {
+ t.Fatalf("couldn't pack %v: %v", msg, err)
+ }
+}
+
+func TestParseBackslash(t *testing.T) {
+ if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil {
+ t.Errorf("could not create RR with \\000 in it")
+ } else {
+ t.Logf("parsed %s", r.String())
+ }
+ if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil {
+ t.Errorf("could not create RR with \\000 in it")
+ } else {
+ t.Logf("parsed %s", r.String())
+ }
+ if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil {
+ t.Errorf("could not create RR with \\ and \\@ in it")
+ } else {
+ t.Logf("parsed %s", r.String())
+ }
+}
+
+func TestILNP(t *testing.T) {
+ tests := []string{
+ "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64",
+ "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65",
+ "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66",
+ "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0",
+ "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0",
+ "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0",
+ "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000",
+ "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000",
+ "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000",
+ "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.",
+ "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.",
+ "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.",
+ }
+ for _, t1 := range tests {
+ r, err := NewRR(t1)
+ if err != nil {
+ t.Fatalf("an error occurred: %v", err)
+ } else {
+ if t1 != r.String() {
+ t.Fatalf("strings should be equal %s %s", t1, r.String())
+ }
+ }
+ }
+}
+
+func TestGposEidNimloc(t *testing.T) {
+ dt := map[string]string{
+ "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.",
+ "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0",
+ "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0",
+ "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920",
+ "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793",
+ }
+ for i, o := range dt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestPX(t *testing.T) {
+ dt := map[string]string{
+ "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.",
+ "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.",
+ }
+ for i, o := range dt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestComment(t *testing.T) {
+ // Comments we must see
+ comments := map[string]bool{"; this is comment 1": true,
+ "; this is comment 4": true, "; this is comment 6": true,
+ "; this is comment 7": true, "; this is comment 8": true}
+ zone := `
+foo. IN A 10.0.0.1 ; this is comment 1
+foo. IN A (
+ 10.0.0.2 ; this is comment2
+)
+; this is comment3
+foo. IN A 10.0.0.3
+foo. IN A ( 10.0.0.4 ); this is comment 4
+
+foo. IN A 10.0.0.5
+; this is comment5
+
+foo. IN A 10.0.0.6
+
+foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6
+foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7
+foo. IN TXT "THIS IS TEXT MAN"; this is comment 8
+`
+ for x := range ParseZone(strings.NewReader(zone), ".", "") {
+ if x.Error == nil {
+ if x.Comment != "" {
+ if _, ok := comments[x.Comment]; !ok {
+ t.Errorf("wrong comment %s", x.Comment)
+ }
+ }
+ }
+ }
+}
+
+func TestEUIxx(t *testing.T) {
+ tests := map[string]string{
+ "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a",
+ "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a",
+ }
+ for i, o := range tests {
+ r, err := NewRR(i)
+ if err != nil {
+ t.Errorf("failed to parse %s: %v", i, err)
+ }
+ if r.String() != o {
+ t.Errorf("want %s, got %s", o, r.String())
+ }
+ }
+}
+
+func TestUserRR(t *testing.T) {
+ tests := map[string]string{
+ "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234",
+ "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556",
+ "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"",
+ }
+ for i, o := range tests {
+ r, err := NewRR(i)
+ if err != nil {
+ t.Errorf("failed to parse %s: %v", i, err)
+ }
+ if r.String() != o {
+ t.Errorf("want %s, got %s", o, r.String())
+ }
+ }
+}
+
+func TestTXT(t *testing.T) {
+ // Test single entry TXT record
+ rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`)
+ if err != nil {
+ t.Error("failed to parse single value TXT record", err)
+ } else if rr, ok := rr.(*TXT); !ok {
+ t.Error("wrong type, record should be of type TXT")
+ } else {
+ if len(rr.Txt) != 1 {
+ t.Error("bad size of TXT value:", len(rr.Txt))
+ } else if rr.Txt[0] != "single value" {
+ t.Error("bad single value")
+ }
+ if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` {
+ t.Error("bad representation of TXT record:", rr.String())
+ }
+ if rr.len() != 28+1+12 {
+ t.Error("bad size of serialized record:", rr.len())
+ }
+ }
+
+ // Test multi entries TXT record
+ rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`)
+ if err != nil {
+ t.Error("failed to parse multi-values TXT record", err)
+ } else if rr, ok := rr.(*TXT); !ok {
+ t.Error("wrong type, record should be of type TXT")
+ } else {
+ if len(rr.Txt) != 4 {
+ t.Error("bad size of TXT multi-value:", len(rr.Txt))
+ } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" {
+ t.Error("bad values in TXT records")
+ }
+ if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` {
+ t.Error("bad representation of TXT multi value record:", rr.String())
+ }
+ if rr.len() != 28+1+3+1+3+1+3+1+3 {
+ t.Error("bad size of serialized multi value record:", rr.len())
+ }
+ }
+
+ // Test empty-string in TXT record
+ rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`)
+ if err != nil {
+ t.Error("failed to parse empty-string TXT record", err)
+ } else if rr, ok := rr.(*TXT); !ok {
+ t.Error("wrong type, record should be of type TXT")
+ } else {
+ if len(rr.Txt) != 1 {
+ t.Error("bad size of TXT empty-string value:", len(rr.Txt))
+ } else if rr.Txt[0] != "" {
+ t.Error("bad value for empty-string TXT record")
+ }
+ if rr.String() != `_raop._tcp.local. 60 IN TXT ""` {
+ t.Error("bad representation of empty-string TXT record:", rr.String())
+ }
+ if rr.len() != 28+1 {
+ t.Error("bad size of serialized record:", rr.len())
+ }
+ }
+
+ // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser
+ s := ""
+ for i := 0; i < 255; i++ {
+ s += "a"
+ }
+ s += "b"
+ rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`)
+ if err != nil {
+ t.Error("failed to parse empty-string TXT record", err)
+ }
+ if rr.(*TXT).Txt[1] != "b" {
+ t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1])
+ }
+ t.Log(rr.String())
+}
+
+func TestTypeXXXX(t *testing.T) {
+ _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd")
+ if err != nil {
+ t.Errorf("failed to parse TYPE1234 RR: %v", err)
+ }
+ _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd")
+ if err == nil {
+ t.Errorf("this should not work, for TYPE655341")
+ }
+ _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001")
+ if err == nil {
+ t.Errorf("this should not work")
+ }
+}
+
+func TestPTR(t *testing.T) {
+ _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.")
+ if err != nil {
+ t.Error("failed to parse ", err)
+ }
+}
+
+func TestDigit(t *testing.T) {
+ tests := map[string]byte{
+ "miek\\000.nl. 100 IN TXT \"A\"": 0,
+ "miek\\001.nl. 100 IN TXT \"A\"": 1,
+ "miek\\254.nl. 100 IN TXT \"A\"": 254,
+ "miek\\255.nl. 100 IN TXT \"A\"": 255,
+ "miek\\256.nl. 100 IN TXT \"A\"": 0,
+ "miek\\257.nl. 100 IN TXT \"A\"": 1,
+ "miek\\004.nl. 100 IN TXT \"A\"": 4,
+ }
+ for s, i := range tests {
+ r, err := NewRR(s)
+ buf := make([]byte, 40)
+ if err != nil {
+ t.Fatalf("failed to parse %v", err)
+ }
+ PackRR(r, buf, 0, nil, false)
+ t.Log(buf)
+ if buf[5] != i {
+ t.Fatalf("5 pos must be %d, is %d", i, buf[5])
+ }
+ r1, _, _ := UnpackRR(buf, 0)
+ if r1.Header().Ttl != 100 {
+ t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl)
+ }
+ }
+}
+
+func TestParseRRSIGTimestamp(t *testing.T) {
+ tests := map[string]bool{
+ `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
+ `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
+ }
+ for r := range tests {
+ _, err := NewRR(r)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func TestTxtEqual(t *testing.T) {
+ rr1 := new(TXT)
+ rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
+ rr1.Txt = []string{"a\"a", "\"", "b"}
+ rr2, _ := NewRR(rr1.String())
+ if rr1.String() != rr2.String() {
+ // This is not an error, but keep this test.
+ t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String())
+ }
+ t.Logf("%s\n%s", rr1.String(), rr2.String())
+}
+
+func TestTxtLong(t *testing.T) {
+ rr1 := new(TXT)
+ rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
+ // Make a long txt record, this breaks when sending the packet,
+ // but not earlier.
+ rr1.Txt = []string{"start-"}
+ for i := 0; i < 200; i++ {
+ rr1.Txt[0] += "start-"
+ }
+ str := rr1.String()
+ if len(str) < len(rr1.Txt[0]) {
+ t.Error("string conversion should work")
+ }
+}
+
+// Basically, don't crash.
+func TestMalformedPackets(t *testing.T) {
+ var packets = []string{
+ "0021641c0000000100000000000078787878787878787878787303636f6d0000100001",
+ }
+
+ // com = 63 6f 6d
+ for _, packet := range packets {
+ data, _ := hex.DecodeString(packet)
+ // for _, v := range data {
+ // t.Log(v)
+ // }
+ var msg Msg
+ msg.Unpack(data)
+ // println(msg.String())
+ }
+}
+
+type algorithm struct {
+ name uint8
+ bits int
+}
+
+func TestNewPrivateKey(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ algorithms := []algorithm{
+ {ECDSAP256SHA256, 256},
+ {ECDSAP384SHA384, 384},
+ {RSASHA1, 1024},
+ {RSASHA256, 2048},
+ {DSA, 1024},
+ }
+
+ for _, algo := range algorithms {
+ key := new(DNSKEY)
+ key.Hdr.Rrtype = TypeDNSKEY
+ key.Hdr.Name = "miek.nl."
+ key.Hdr.Class = ClassINET
+ key.Hdr.Ttl = 14400
+ key.Flags = 256
+ key.Protocol = 3
+ key.Algorithm = algo.name
+ privkey, err := key.Generate(algo.bits)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey))
+ if err != nil {
+ t.Error(key.String())
+ t.Error(key.PrivateKeyString(privkey))
+ t.Fatal(err)
+ }
+
+ switch newPrivKey := newPrivKey.(type) {
+ case *rsa.PrivateKey:
+ newPrivKey.Precompute()
+ }
+
+ if !reflect.DeepEqual(privkey, newPrivKey) {
+ t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey)
+ }
+ }
+}
+
+// special input test
+func TestNewRRSpecial(t *testing.T) {
+ var (
+ rr RR
+ err error
+ expect string
+ )
+
+ rr, err = NewRR("; comment")
+ expect = ""
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr != nil {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+
+ rr, err = NewRR("")
+ expect = ""
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr != nil {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+
+ rr, err = NewRR("$ORIGIN foo.")
+ expect = ""
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr != nil {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+
+ rr, err = NewRR(" ")
+ expect = ""
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr != nil {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+
+ rr, err = NewRR("\n")
+ expect = ""
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr != nil {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+
+ rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2")
+ expect = "foo.\t3600\tIN\tA\t1.1.1.1"
+ if err != nil {
+ t.Errorf("unexpected err: %v", err)
+ }
+ if rr == nil || rr.String() != expect {
+ t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
+ }
+}
+
+func TestPrintfVerbsRdata(t *testing.T) {
+ x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.")
+ if Field(x, 1) != "20" {
+ t.Errorf("should be 20")
+ }
+ if Field(x, 2) != "mx.miek.nl." {
+ t.Errorf("should be mx.miek.nl.")
+ }
+
+ x, _ = NewRR("www.miek.nl. IN A 127.0.0.1")
+ if Field(x, 1) != "127.0.0.1" {
+ t.Errorf("should be 127.0.0.1")
+ }
+
+ x, _ = NewRR("www.miek.nl. IN AAAA ::1")
+ if Field(x, 1) != "::1" {
+ t.Errorf("should be ::1")
+ }
+
+ x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA")
+ if Field(x, 1) != "a.miek.nl." {
+ t.Errorf("should be a.miek.nl.")
+ }
+ if Field(x, 2) != "A NS SOA MX AAAA" {
+ t.Errorf("should be A NS SOA MX AAAA")
+ }
+
+ x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"")
+ if Field(x, 1) != "first second" {
+ t.Errorf("should be first second")
+ }
+ if Field(x, 0) != "" {
+ t.Errorf("should be empty")
+ }
+}
+
+func TestParseTokenOverflow(t *testing.T) {
+ _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797")
+ if err == nil {
+ t.Fatalf("token overflow should return an error")
+ }
+ t.Logf("err: %s\n", err)
+}
+
+func TestParseTLSA(t *testing.T) {
+ lt := []string{
+ "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00",
+ "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3",
+ "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2",
+ }
+ for _, o := range lt {
+ rr, err := NewRR(o)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseSSHFP(t *testing.T) {
+ lt := []string{
+ "test.example.org.\t300\tSSHFP\t1 2 (\n" +
+ "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" +
+ "\t\t\t\t\tB6159E30723665DA95BB )",
+ "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )",
+ }
+ result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB"
+ for _, o := range lt {
+ rr, err := NewRR(o)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != result {
+ t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseHINFO(t *testing.T) {
+ dt := map[string]string{
+ "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"",
+ "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"",
+ "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"",
+ "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"",
+ // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"",
+ // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html
+ // but effectively, even Bind would replace it to correctly formed text when you AXFR
+ // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function
+ }
+ for i, o := range dt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestParseCAA(t *testing.T) {
+ lt := map[string]string{
+ "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"",
+ "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"",
+ "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"",
+ "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"",
+ "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"",
+ }
+ for i, o := range lt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
+
+func TestPackCAA(t *testing.T) {
+ m := new(Msg)
+ record := new(CAA)
+ record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0}
+ record.Tag = "issue"
+ record.Value = "symantec.com"
+ record.Flag = 1
+
+ m.Answer = append(m.Answer, record)
+ bytes, err := m.Pack()
+ if err != nil {
+ t.Fatalf("failed to pack msg: %v", err)
+ }
+ if err := m.Unpack(bytes); err != nil {
+ t.Fatalf("failed to unpack msg: %v", err)
+ }
+ if len(m.Answer) != 1 {
+ t.Fatalf("incorrect number of answers unpacked")
+ }
+ rr := m.Answer[0].(*CAA)
+ if rr.Tag != "issue" {
+ t.Fatalf("invalid tag for unpacked answer")
+ } else if rr.Value != "symantec.com" {
+ t.Fatalf("invalid value for unpacked answer")
+ } else if rr.Flag != 1 {
+ t.Fatalf("invalid flag for unpacked answer")
+ }
+}
+
+func TestParseURI(t *testing.T) {
+ lt := map[string]string{
+ "_http._tcp. IN URI 10 1 \"http://www.example.com/path\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"http://www.example.com/path\"",
+ "_http._tcp. IN URI 10 1 \"\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"\"",
+ }
+ for i, o := range lt {
+ rr, err := NewRR(i)
+ if err != nil {
+ t.Error("failed to parse RR: ", err)
+ continue
+ }
+ if rr.String() != o {
+ t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
+ } else {
+ t.Logf("RR is OK: `%s'", rr.String())
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
new file mode 100644
index 000000000..6b08e6e95
--- /dev/null
+++ b/vendor/github.com/miekg/dns/privaterr.go
@@ -0,0 +1,149 @@
+package dns
+
+import (
+ "fmt"
+ "strings"
+)
+
+// PrivateRdata is an interface used for implementing "Private Use" RR types, see
+// RFC 6895. This allows one to experiment with new RR types, without requesting an
+// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
+type PrivateRdata interface {
+ // String returns the text presentaton of the Rdata of the Private RR.
+ String() string
+ // Parse parses the Rdata of the private RR.
+ Parse([]string) error
+ // Pack is used when packing a private RR into a buffer.
+ Pack([]byte) (int, error)
+ // Unpack is used when unpacking a private RR from a buffer.
+ // TODO(miek): diff. signature than Pack, see edns0.go for instance.
+ Unpack([]byte) (int, error)
+ // Copy copies the Rdata.
+ Copy(PrivateRdata) error
+ // Len returns the length in octets of the Rdata.
+ Len() int
+}
+
+// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
+// It mocks normal RRs and implements dns.RR interface.
+type PrivateRR struct {
+ Hdr RR_Header
+ Data PrivateRdata
+}
+
+func mkPrivateRR(rrtype uint16) *PrivateRR {
+ // Panics if RR is not an instance of PrivateRR.
+ rrfunc, ok := TypeToRR[rrtype]
+ if !ok {
+ panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
+ }
+
+ anyrr := rrfunc()
+ switch rr := anyrr.(type) {
+ case *PrivateRR:
+ return rr
+ }
+ panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
+}
+
+// Header return the RR header of r.
+func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
+
+func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
+
+// Private len and copy parts to satisfy RR interface.
+func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
+func (r *PrivateRR) copy() RR {
+ // make new RR like this:
+ rr := mkPrivateRR(r.Hdr.Rrtype)
+ newh := r.Hdr.copyHeader()
+ rr.Hdr = *newh
+
+ err := r.Data.Copy(rr.Data)
+ if err != nil {
+ panic("dns: got value that could not be used to copy Private rdata")
+ }
+ return rr
+}
+func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := r.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ n, err := r.Data.Pack(msg[off:])
+ if err != nil {
+ return len(msg), err
+ }
+ off += n
+ r.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+// PrivateHandle registers a private resource record type. It requires
+// string and numeric representation of private RR type and generator function as argument.
+func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
+ rtypestr = strings.ToUpper(rtypestr)
+
+ TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
+ TypeToString[rtype] = rtypestr
+ StringToType[rtypestr] = rtype
+
+ typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
+ if noRdata(h) {
+ return &h, off, nil
+ }
+ var err error
+
+ rr := mkPrivateRR(h.Rrtype)
+ rr.Hdr = h
+
+ off1, err := rr.Data.Unpack(msg[off:])
+ off += off1
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+ }
+
+ setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := mkPrivateRR(h.Rrtype)
+ rr.Hdr = h
+
+ var l lex
+ text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
+ Fetch:
+ for {
+ // TODO(miek): we could also be returning _QUOTE, this might or might not
+ // be an issue (basically parsing TXT becomes hard)
+ switch l = <-c; l.value {
+ case zNewline, zEOF:
+ break Fetch
+ case zString:
+ text = append(text, l.token)
+ }
+ }
+
+ err := rr.Data.Parse(text)
+ if err != nil {
+ return nil, &ParseError{f, err.Error(), l}, ""
+ }
+
+ return rr, nil, ""
+ }
+
+ typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
+}
+
+// PrivateHandleRemove removes defenitions required to support private RR type.
+func PrivateHandleRemove(rtype uint16) {
+ rtypestr, ok := TypeToString[rtype]
+ if ok {
+ delete(TypeToRR, rtype)
+ delete(TypeToString, rtype)
+ delete(typeToparserFunc, rtype)
+ delete(StringToType, rtypestr)
+ delete(typeToUnpack, rtype)
+ }
+ return
+}
diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go
new file mode 100644
index 000000000..5f177aa47
--- /dev/null
+++ b/vendor/github.com/miekg/dns/privaterr_test.go
@@ -0,0 +1,171 @@
+package dns_test
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/miekg/dns"
+)
+
+const TypeISBN uint16 = 0x0F01
+
+// A crazy new RR type :)
+type ISBN struct {
+ x string // rdata with 10 or 13 numbers, dashes or spaces allowed
+}
+
+func NewISBN() dns.PrivateRdata { return &ISBN{""} }
+
+func (rd *ISBN) Len() int { return len([]byte(rd.x)) }
+func (rd *ISBN) String() string { return rd.x }
+
+func (rd *ISBN) Parse(txt []string) error {
+ rd.x = strings.TrimSpace(strings.Join(txt, " "))
+ return nil
+}
+
+func (rd *ISBN) Pack(buf []byte) (int, error) {
+ b := []byte(rd.x)
+ n := copy(buf, b)
+ if n != len(b) {
+ return n, dns.ErrBuf
+ }
+ return n, nil
+}
+
+func (rd *ISBN) Unpack(buf []byte) (int, error) {
+ rd.x = string(buf)
+ return len(buf), nil
+}
+
+func (rd *ISBN) Copy(dest dns.PrivateRdata) error {
+ isbn, ok := dest.(*ISBN)
+ if !ok {
+ return dns.ErrRdata
+ }
+ isbn.x = rd.x
+ return nil
+}
+
+var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t")
+
+func TestPrivateText(t *testing.T) {
+ dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
+ defer dns.PrivateHandleRemove(TypeISBN)
+
+ rr, err := dns.NewRR(testrecord)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rr.String() != testrecord {
+ t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord)
+ } else {
+ t.Log(rr.String())
+ }
+}
+
+func TestPrivateByteSlice(t *testing.T) {
+ dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
+ defer dns.PrivateHandleRemove(TypeISBN)
+
+ rr, err := dns.NewRR(testrecord)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := make([]byte, 100)
+ off, err := dns.PackRR(rr, buf, 0, nil, false)
+ if err != nil {
+ t.Errorf("got error packing ISBN: %v", err)
+ }
+
+ custrr := rr.(*dns.PrivateRR)
+ if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off {
+ t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln)
+ }
+
+ rr1, off1, err := dns.UnpackRR(buf[:off], 0)
+ if err != nil {
+ t.Errorf("got error unpacking ISBN: %v", err)
+ return
+ }
+
+ if off1 != off {
+ t.Errorf("offset after unpacking differs: %d != %d", off1, off)
+ }
+
+ if rr1.String() != testrecord {
+ t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord)
+ } else {
+ t.Log(rr1.String())
+ }
+}
+
+const TypeVERSION uint16 = 0x0F02
+
+type VERSION struct {
+ x string
+}
+
+func NewVersion() dns.PrivateRdata { return &VERSION{""} }
+
+func (rd *VERSION) String() string { return rd.x }
+func (rd *VERSION) Parse(txt []string) error {
+ rd.x = strings.TrimSpace(strings.Join(txt, " "))
+ return nil
+}
+
+func (rd *VERSION) Pack(buf []byte) (int, error) {
+ b := []byte(rd.x)
+ n := copy(buf, b)
+ if n != len(b) {
+ return n, dns.ErrBuf
+ }
+ return n, nil
+}
+
+func (rd *VERSION) Unpack(buf []byte) (int, error) {
+ rd.x = string(buf)
+ return len(buf), nil
+}
+
+func (rd *VERSION) Copy(dest dns.PrivateRdata) error {
+ isbn, ok := dest.(*VERSION)
+ if !ok {
+ return dns.ErrRdata
+ }
+ isbn.x = rd.x
+ return nil
+}
+
+func (rd *VERSION) Len() int {
+ return len([]byte(rd.x))
+}
+
+var smallzone = `$ORIGIN example.org.
+@ SOA sns.dns.icann.org. noc.dns.icann.org. (
+ 2014091518 7200 3600 1209600 3600
+)
+ A 1.2.3.4
+ok ISBN 1231-92110-12
+go VERSION (
+ 1.3.1 ; comment
+)
+www ISBN 1231-92110-16
+* CNAME @
+`
+
+func TestPrivateZoneParser(t *testing.T) {
+ dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
+ dns.PrivateHandle("VERSION", TypeVERSION, NewVersion)
+ defer dns.PrivateHandleRemove(TypeISBN)
+ defer dns.PrivateHandleRemove(TypeVERSION)
+
+ r := strings.NewReader(smallzone)
+ for x := range dns.ParseZone(r, ".", "") {
+ if err := x.Error; err != nil {
+ t.Fatal(err)
+ }
+ t.Log(x.RR)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go
new file mode 100644
index 000000000..6e21fba7e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/rawmsg.go
@@ -0,0 +1,49 @@
+package dns
+
+import "encoding/binary"
+
+// rawSetRdlength sets the rdlength in the header of
+// the RR. The offset 'off' must be positioned at the
+// start of the header of the RR, 'end' must be the
+// end of the RR.
+func rawSetRdlength(msg []byte, off, end int) bool {
+ l := len(msg)
+Loop:
+ for {
+ if off+1 > l {
+ return false
+ }
+ c := int(msg[off])
+ off++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // End of the domainname
+ break Loop
+ }
+ if off+c > l {
+ return false
+ }
+ off += c
+
+ case 0xC0:
+ // pointer, next byte included, ends domainname
+ off++
+ break Loop
+ }
+ }
+ // The domainname has been seen, we at the start of the fixed part in the header.
+ // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
+ off += 2 + 2 + 4
+ if off+2 > l {
+ return false
+ }
+ //off+1 is the end of the header, 'end' is the end of the rr
+ //so 'end' - 'off+2' is the length of the rdata
+ rdatalen := end - (off + 2)
+ if rdatalen > 0xFFFF {
+ return false
+ }
+ binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
+ return true
+}
diff --git a/vendor/github.com/miekg/dns/remote_test.go b/vendor/github.com/miekg/dns/remote_test.go
new file mode 100644
index 000000000..4cf701fe4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/remote_test.go
@@ -0,0 +1,19 @@
+package dns
+
+import "testing"
+
+const LinodeAddr = "176.58.119.54:53"
+
+func TestClientRemote(t *testing.T) {
+ m := new(Msg)
+ m.SetQuestion("go.dns.miek.nl.", TypeTXT)
+
+ c := new(Client)
+ r, _, err := c.Exchange(m, LinodeAddr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go
new file mode 100644
index 000000000..099dac948
--- /dev/null
+++ b/vendor/github.com/miekg/dns/reverse.go
@@ -0,0 +1,38 @@
+package dns
+
+// StringToType is the reverse of TypeToString, needed for string parsing.
+var StringToType = reverseInt16(TypeToString)
+
+// StringToClass is the reverse of ClassToString, needed for string parsing.
+var StringToClass = reverseInt16(ClassToString)
+
+// Map of opcodes strings.
+var StringToOpcode = reverseInt(OpcodeToString)
+
+// Map of rcodes strings.
+var StringToRcode = reverseInt(RcodeToString)
+
+// Reverse a map
+func reverseInt8(m map[uint8]string) map[string]uint8 {
+ n := make(map[string]uint8, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt16(m map[uint16]string) map[string]uint16 {
+ n := make(map[string]uint16, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt(m map[int]string) map[string]int {
+ n := make(map[string]int, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go
new file mode 100644
index 000000000..b489f3f05
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sanitize.go
@@ -0,0 +1,84 @@
+package dns
+
+// Dedup removes identical RRs from rrs. It preserves the original ordering.
+// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
+// rrs.
+// m is used to store the RRs temporay. If it is nil a new map will be allocated.
+func Dedup(rrs []RR, m map[string]RR) []RR {
+ if m == nil {
+ m = make(map[string]RR)
+ }
+ // Save the keys, so we don't have to call normalizedString twice.
+ keys := make([]*string, 0, len(rrs))
+
+ for _, r := range rrs {
+ key := normalizedString(r)
+ keys = append(keys, &key)
+ if _, ok := m[key]; ok {
+ // Shortest TTL wins.
+ if m[key].Header().Ttl > r.Header().Ttl {
+ m[key].Header().Ttl = r.Header().Ttl
+ }
+ continue
+ }
+
+ m[key] = r
+ }
+ // If the length of the result map equals the amount of RRs we got,
+ // it means they were all different. We can then just return the original rrset.
+ if len(m) == len(rrs) {
+ return rrs
+ }
+
+ j := 0
+ for i, r := range rrs {
+ // If keys[i] lives in the map, we should copy and remove it.
+ if _, ok := m[*keys[i]]; ok {
+ delete(m, *keys[i])
+ rrs[j] = r
+ j++
+ }
+
+ if len(m) == 0 {
+ break
+ }
+ }
+
+ return rrs[:j]
+}
+
+// normalizedString returns a normalized string from r. The TTL
+// is removed and the domain name is lowercased. We go from this:
+// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
+// lowercasename<TAB>CLASS<TAB>TYPE...
+func normalizedString(r RR) string {
+ // A string Go DNS makes has: domainname<TAB>TTL<TAB>...
+ b := []byte(r.String())
+
+ // find the first non-escaped tab, then another, so we capture where the TTL lives.
+ esc := false
+ ttlStart, ttlEnd := 0, 0
+ for i := 0; i < len(b) && ttlEnd == 0; i++ {
+ switch {
+ case b[i] == '\\':
+ esc = !esc
+ case b[i] == '\t' && !esc:
+ if ttlStart == 0 {
+ ttlStart = i
+ continue
+ }
+ if ttlEnd == 0 {
+ ttlEnd = i
+ }
+ case b[i] >= 'A' && b[i] <= 'Z' && !esc:
+ b[i] += 32
+ default:
+ esc = false
+ }
+ }
+
+ // remove TTL.
+ copy(b[ttlStart:], b[ttlEnd:])
+ cut := ttlEnd - ttlStart
+ return string(b[:len(b)-cut])
+}
diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go
new file mode 100644
index 000000000..c108dc694
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sanitize_test.go
@@ -0,0 +1,84 @@
+package dns
+
+import "testing"
+
+func TestDedup(t *testing.T) {
+ // make it []string
+ testcases := map[[3]RR][]string{
+ [...]RR{
+ newRR(t, "mIek.nl. IN A 127.0.0.1"),
+ newRR(t, "mieK.nl. IN A 127.0.0.1"),
+ newRR(t, "miek.Nl. IN A 127.0.0.1"),
+ }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"},
+ [...]RR{
+ newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"),
+ newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"),
+ newRR(t, "Miek.nL. 500 IN A 127.0.0.1"),
+ }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"},
+ [...]RR{
+ newRR(t, "miek.nl. IN A 127.0.0.1"),
+ newRR(t, "miek.nl. CH A 127.0.0.1"),
+ newRR(t, "miek.nl. IN A 127.0.0.1"),
+ }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1",
+ "miek.nl.\t3600\tCH\tA\t127.0.0.1",
+ },
+ [...]RR{
+ newRR(t, "miek.nl. CH A 127.0.0.1"),
+ newRR(t, "miek.nl. IN A 127.0.0.1"),
+ newRR(t, "miek.de. IN A 127.0.0.1"),
+ }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1",
+ "miek.nl.\t3600\tIN\tA\t127.0.0.1",
+ "miek.de.\t3600\tIN\tA\t127.0.0.1",
+ },
+ [...]RR{
+ newRR(t, "miek.de. IN A 127.0.0.1"),
+ newRR(t, "miek.nl. 200 IN A 127.0.0.1"),
+ newRR(t, "miek.nl. 300 IN A 127.0.0.1"),
+ }: {"miek.de.\t3600\tIN\tA\t127.0.0.1",
+ "miek.nl.\t200\tIN\tA\t127.0.0.1",
+ },
+ }
+
+ for rr, expected := range testcases {
+ out := Dedup([]RR{rr[0], rr[1], rr[2]}, nil)
+ for i, o := range out {
+ if o.String() != expected[i] {
+ t.Fatalf("expected %v, got %v", expected[i], o.String())
+ }
+ }
+ }
+}
+
+func BenchmarkDedup(b *testing.B) {
+ rrs := []RR{
+ newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"),
+ newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"),
+ newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"),
+ }
+ m := make(map[string]RR)
+ for i := 0; i < b.N; i++ {
+ Dedup(rrs, m)
+ }
+}
+
+func TestNormalizedString(t *testing.T) {
+ tests := map[RR]string{
+ newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1",
+ newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1",
+ newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\tiek.nl.\tIN\tA\t127.0.0.1",
+ }
+ for tc, expected := range tests {
+ n := normalizedString(tc)
+ if n != expected {
+ t.Errorf("expected %s, got %s", expected, n)
+ }
+ }
+}
+
+func newRR(t *testing.T, s string) RR {
+ r, err := NewRR(s)
+ if err != nil {
+ t.Logf("newRR: %v", err)
+ }
+ return r
+}
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
new file mode 100644
index 000000000..0e83797fb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -0,0 +1,974 @@
+package dns
+
+import (
+ "io"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type debugging bool
+
+const debug debugging = false
+
+func (d debugging) Printf(format string, args ...interface{}) {
+ if d {
+ log.Printf(format, args...)
+ }
+}
+
+const maxTok = 2048 // Largest token we can return.
+const maxUint16 = 1<<16 - 1
+
+// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
+// * Add ownernames if they are left blank;
+// * Suppress sequences of spaces;
+// * Make each RR fit on one line (_NEWLINE is send as last)
+// * Handle comments: ;
+// * Handle braces - anywhere.
+const (
+ // Zonefile
+ zEOF = iota
+ zString
+ zBlank
+ zQuote
+ zNewline
+ zRrtpe
+ zOwner
+ zClass
+ zDirOrigin // $ORIGIN
+ zDirTtl // $TTL
+ zDirInclude // $INCLUDE
+ zDirGenerate // $GENERATE
+
+ // Privatekey file
+ zValue
+ zKey
+
+ zExpectOwnerDir // Ownername
+ zExpectOwnerBl // Whitespace after the ownername
+ zExpectAny // Expect rrtype, ttl or class
+ zExpectAnyNoClass // Expect rrtype or ttl
+ zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
+ zExpectAnyNoTtl // Expect rrtype or class
+ zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
+ zExpectRrtype // Expect rrtype
+ zExpectRrtypeBl // Whitespace BEFORE rrtype
+ zExpectRdata // The first element of the rdata
+ zExpectDirTtlBl // Space after directive $TTL
+ zExpectDirTtl // Directive $TTL
+ zExpectDirOriginBl // Space after directive $ORIGIN
+ zExpectDirOrigin // Directive $ORIGIN
+ zExpectDirIncludeBl // Space after directive $INCLUDE
+ zExpectDirInclude // Directive $INCLUDE
+ zExpectDirGenerate // Directive $GENERATE
+ zExpectDirGenerateBl // Space after directive $GENERATE
+)
+
+// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
+// where the error occurred.
+type ParseError struct {
+ file string
+ err string
+ lex lex
+}
+
+func (e *ParseError) Error() (s string) {
+ if e.file != "" {
+ s = e.file + ": "
+ }
+ s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
+ strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
+ return
+}
+
+type lex struct {
+ token string // text of the token
+ tokenUpper string // uppercase text of the token
+ length int // length of the token
+ err bool // when true, token text has lexer error
+ value uint8 // value: zString, _BLANK, etc.
+ line int // line in the file
+ column int // column in the file
+ torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
+ comment string // any comment text seen
+}
+
+// Token holds the token that are returned when a zone file is parsed.
+type Token struct {
+ // The scanned resource record when error is not nil.
+ RR
+ // When an error occurred, this has the error specifics.
+ Error *ParseError
+ // A potential comment positioned after the RR and on the same line.
+ Comment string
+}
+
+// NewRR reads the RR contained in the string s. Only the first RR is
+// returned. If s contains no RR, return nil with no error. The class
+// defaults to IN and TTL defaults to 3600. The full zone file syntax
+// like $TTL, $ORIGIN, etc. is supported. All fields of the returned
+// RR are set, except RR.Header().Rdlength which is set to 0.
+func NewRR(s string) (RR, error) {
+ if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
+ return ReadRR(strings.NewReader(s+"\n"), "")
+ }
+ return ReadRR(strings.NewReader(s), "")
+}
+
+// ReadRR reads the RR contained in q.
+// See NewRR for more documentation.
+func ReadRR(q io.Reader, filename string) (RR, error) {
+ r := <-parseZoneHelper(q, ".", filename, 1)
+ if r == nil {
+ return nil, nil
+ }
+
+ if r.Error != nil {
+ return nil, r.Error
+ }
+ return r.RR, nil
+}
+
+// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
+// returned channel, which consist out the parsed RR, a potential comment or an error.
+// If there is an error the RR is nil. The string file is only used
+// in error reporting. The string origin is used as the initial origin, as
+// if the file would start with: $ORIGIN origin .
+// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
+// The channel t is closed by ParseZone when the end of r is reached.
+//
+// Basic usage pattern when reading from a string (z) containing the
+// zone data:
+//
+// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
+// if x.Error != nil {
+// // log.Println(x.Error)
+// } else {
+// // Do something with x.RR
+// }
+// }
+//
+// Comments specified after an RR (and on the same line!) are returned too:
+//
+// foo. IN A 10.0.0.1 ; this is a comment
+//
+// The text "; this is comment" is returned in Token.Comment. Comments inside the
+// RR are discarded. Comments on a line by themselves are discarded too.
+func ParseZone(r io.Reader, origin, file string) chan *Token {
+ return parseZoneHelper(r, origin, file, 10000)
+}
+
+func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
+ t := make(chan *Token, chansize)
+ go parseZone(r, origin, file, t, 0)
+ return t
+}
+
+func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
+ defer func() {
+ if include == 0 {
+ close(t)
+ }
+ }()
+ s := scanInit(r)
+ c := make(chan lex)
+ // Start the lexer
+ go zlexer(s, c)
+ // 6 possible beginnings of a line, _ is a space
+ // 0. zRRTYPE -> all omitted until the rrtype
+ // 1. zOwner _ zRrtype -> class/ttl omitted
+ // 2. zOwner _ zString _ zRrtype -> class omitted
+ // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
+ // 4. zOwner _ zClass _ zRrtype -> ttl omitted
+ // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
+ // After detecting these, we know the zRrtype so we can jump to functions
+ // handling the rdata for each of these types.
+
+ if origin == "" {
+ origin = "."
+ }
+ origin = Fqdn(origin)
+ if _, ok := IsDomainName(origin); !ok {
+ t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
+ return
+ }
+
+ st := zExpectOwnerDir // initial state
+ var h RR_Header
+ var defttl uint32 = defaultTtl
+ var prevName string
+ for l := range c {
+ // Lexer spotted an error already
+ if l.err == true {
+ t <- &Token{Error: &ParseError{f, l.token, l}}
+ return
+
+ }
+ switch st {
+ case zExpectOwnerDir:
+ // We can also expect a directive, like $TTL or $ORIGIN
+ h.Ttl = defttl
+ h.Class = ClassINET
+ switch l.value {
+ case zNewline:
+ st = zExpectOwnerDir
+ case zOwner:
+ h.Name = l.token
+ if l.token[0] == '@' {
+ h.Name = origin
+ prevName = h.Name
+ st = zExpectOwnerBl
+ break
+ }
+ if h.Name[l.length-1] != '.' {
+ h.Name = appendOrigin(h.Name, origin)
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "bad owner name", l}}
+ return
+ }
+ prevName = h.Name
+ st = zExpectOwnerBl
+ case zDirTtl:
+ st = zExpectDirTtlBl
+ case zDirOrigin:
+ st = zExpectDirOriginBl
+ case zDirInclude:
+ st = zExpectDirIncludeBl
+ case zDirGenerate:
+ st = zExpectDirGenerateBl
+ case zRrtpe:
+ h.Name = prevName
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zClass:
+ h.Name = prevName
+ h.Class = l.torc
+ st = zExpectAnyNoClassBl
+ case zBlank:
+ // Discard, can happen when there is nothing on the
+ // line except the RR type
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // Don't about the defttl, we should take the $TTL value
+ // defttl = ttl
+ st = zExpectAnyNoTtlBl
+
+ default:
+ t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
+ return
+ }
+ case zExpectDirIncludeBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
+ return
+ }
+ st = zExpectDirInclude
+ case zExpectDirInclude:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
+ return
+ }
+ neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
+ l := <-c
+ switch l.value {
+ case zBlank:
+ l := <-c
+ if l.value == zString {
+ if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
+ t <- &Token{Error: &ParseError{f, "bad origin name", l}}
+ return
+ }
+ // a new origin is specified.
+ if l.token[l.length-1] != '.' {
+ if origin != "." { // Prevent .. endings
+ neworigin = l.token + "." + origin
+ } else {
+ neworigin = l.token + origin
+ }
+ } else {
+ neworigin = l.token
+ }
+ }
+ case zNewline, zEOF:
+ // Ok
+ default:
+ t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
+ return
+ }
+ // Start with the new file
+ r1, e1 := os.Open(l.token)
+ if e1 != nil {
+ t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
+ return
+ }
+ if include+1 > 7 {
+ t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
+ return
+ }
+ parseZone(r1, l.token, neworigin, t, include+1)
+ st = zExpectOwnerDir
+ case zExpectDirTtlBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
+ return
+ }
+ st = zExpectDirTtl
+ case zExpectDirTtl:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
+ return
+ }
+ if e, _ := slurpRemainder(c, f); e != nil {
+ t <- &Token{Error: e}
+ return
+ }
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
+ return
+ }
+ defttl = ttl
+ st = zExpectOwnerDir
+ case zExpectDirOriginBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
+ return
+ }
+ st = zExpectDirOrigin
+ case zExpectDirOrigin:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
+ return
+ }
+ if e, _ := slurpRemainder(c, f); e != nil {
+ t <- &Token{Error: e}
+ }
+ if _, ok := IsDomainName(l.token); !ok {
+ t <- &Token{Error: &ParseError{f, "bad origin name", l}}
+ return
+ }
+ if l.token[l.length-1] != '.' {
+ if origin != "." { // Prevent .. endings
+ origin = l.token + "." + origin
+ } else {
+ origin = l.token + origin
+ }
+ } else {
+ origin = l.token
+ }
+ st = zExpectOwnerDir
+ case zExpectDirGenerateBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
+ return
+ }
+ st = zExpectDirGenerate
+ case zExpectDirGenerate:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
+ return
+ }
+ if errMsg := generate(l, c, t, origin); errMsg != "" {
+ t <- &Token{Error: &ParseError{f, errMsg, l}}
+ return
+ }
+ st = zExpectOwnerDir
+ case zExpectOwnerBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
+ return
+ }
+ st = zExpectAny
+ case zExpectAny:
+ switch l.value {
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zClass:
+ h.Class = l.torc
+ st = zExpectAnyNoClassBl
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // defttl = ttl // don't set the defttl here
+ st = zExpectAnyNoTtlBl
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
+ return
+ }
+ case zExpectAnyNoClassBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before class", l}}
+ return
+ }
+ st = zExpectAnyNoClass
+ case zExpectAnyNoTtlBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
+ return
+ }
+ st = zExpectAnyNoTtl
+ case zExpectAnyNoTtl:
+ switch l.value {
+ case zClass:
+ h.Class = l.torc
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
+ return
+ }
+ case zExpectAnyNoClass:
+ switch l.value {
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // defttl = ttl // don't set the def ttl anymore
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
+ return
+ }
+ case zExpectRrtypeBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
+ return
+ }
+ st = zExpectRrtype
+ case zExpectRrtype:
+ if l.value != zRrtpe {
+ t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
+ return
+ }
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zExpectRdata:
+ r, e, c1 := setRR(h, c, origin, f)
+ if e != nil {
+ // If e.lex is nil than we have encounter a unknown RR type
+ // in that case we substitute our current lex token
+ if e.lex.token == "" && e.lex.value == 0 {
+ e.lex = l // Uh, dirty
+ }
+ t <- &Token{Error: e}
+ return
+ }
+ t <- &Token{RR: r, Comment: c1}
+ st = zExpectOwnerDir
+ }
+ }
+ // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
+ // is not an error, because an empty zone file is still a zone file.
+}
+
+// zlexer scans the sourcefile and returns tokens on the channel c.
+func zlexer(s *scan, c chan lex) {
+ var l lex
+ str := make([]byte, maxTok) // Should be enough for any token
+ stri := 0 // Offset in str (0 means empty)
+ com := make([]byte, maxTok) // Hold comment text
+ comi := 0
+ quote := false
+ escape := false
+ space := false
+ commt := false
+ rrtype := false
+ owner := true
+ brace := 0
+ x, err := s.tokenText()
+ defer close(c)
+ for err == nil {
+ l.column = s.position.Column
+ l.line = s.position.Line
+ if stri >= maxTok {
+ l.token = "token length insufficient for parsing"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+ if comi >= maxTok {
+ l.token = "comment length insufficient for parsing"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+
+ switch x {
+ case ' ', '\t':
+ if escape {
+ escape = false
+ str[stri] = x
+ stri++
+ break
+ }
+ if quote {
+ // Inside quotes this is legal
+ str[stri] = x
+ stri++
+ break
+ }
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if stri == 0 {
+ // Space directly in the beginning, handled in the grammar
+ } else if owner {
+ // If we have a string and its the first, make it an owner
+ l.value = zOwner
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+ l.length = stri
+ // escape $... start with a \ not a $, so this will work
+ switch l.tokenUpper {
+ case "$TTL":
+ l.value = zDirTtl
+ case "$ORIGIN":
+ l.value = zDirOrigin
+ case "$INCLUDE":
+ l.value = zDirInclude
+ case "$GENERATE":
+ l.value = zDirGenerate
+ }
+ debug.Printf("[7 %+v]", l.token)
+ c <- l
+ } else {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+ l.length = stri
+ if !rrtype {
+ if t, ok := StringToType[l.tokenUpper]; ok {
+ l.value = zRrtpe
+ l.torc = t
+ rrtype = true
+ } else {
+ if strings.HasPrefix(l.tokenUpper, "TYPE") {
+ t, ok := typeToInt(l.token)
+ if !ok {
+ l.token = "unknown RR type"
+ l.err = true
+ c <- l
+ return
+ }
+ l.value = zRrtpe
+ l.torc = t
+ }
+ }
+ if t, ok := StringToClass[l.tokenUpper]; ok {
+ l.value = zClass
+ l.torc = t
+ } else {
+ if strings.HasPrefix(l.tokenUpper, "CLASS") {
+ t, ok := classToInt(l.token)
+ if !ok {
+ l.token = "unknown class"
+ l.err = true
+ c <- l
+ return
+ }
+ l.value = zClass
+ l.torc = t
+ }
+ }
+ }
+ debug.Printf("[6 %+v]", l.token)
+ c <- l
+ }
+ stri = 0
+ // I reverse space stuff here
+ if !space && !commt {
+ l.value = zBlank
+ l.token = " "
+ l.length = 1
+ debug.Printf("[5 %+v]", l.token)
+ c <- l
+ }
+ owner = false
+ space = true
+ case ';':
+ if escape {
+ escape = false
+ str[stri] = x
+ stri++
+ break
+ }
+ if quote {
+ // Inside quotes this is legal
+ str[stri] = x
+ stri++
+ break
+ }
+ if stri > 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.length = stri
+ debug.Printf("[4 %+v]", l.token)
+ c <- l
+ stri = 0
+ }
+ commt = true
+ com[comi] = ';'
+ comi++
+ case '\r':
+ escape = false
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ // discard if outside of quotes
+ case '\n':
+ escape = false
+ // Escaped newline
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ // inside quotes this is legal
+ if commt {
+ // Reset a comment
+ commt = false
+ rrtype = false
+ stri = 0
+ // If not in a brace this ends the comment AND the RR
+ if brace == 0 {
+ owner = true
+ owner = true
+ l.value = zNewline
+ l.token = "\n"
+ l.length = 1
+ l.comment = string(com[:comi])
+ debug.Printf("[3 %+v %+v]", l.token, l.comment)
+ c <- l
+ l.comment = ""
+ comi = 0
+ break
+ }
+ com[comi] = ' ' // convert newline to space
+ comi++
+ break
+ }
+
+ if brace == 0 {
+ // If there is previous text, we should output it here
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+
+ l.length = stri
+ if !rrtype {
+ if t, ok := StringToType[l.tokenUpper]; ok {
+ l.value = zRrtpe
+ l.torc = t
+ rrtype = true
+ }
+ }
+ debug.Printf("[2 %+v]", l.token)
+ c <- l
+ }
+ l.value = zNewline
+ l.token = "\n"
+ l.length = 1
+ debug.Printf("[1 %+v]", l.token)
+ c <- l
+ stri = 0
+ commt = false
+ rrtype = false
+ owner = true
+ comi = 0
+ }
+ case '\\':
+ // comments do not get escaped chars, everything is copied
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ // something already escaped must be in string
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ // something escaped outside of string gets added to string
+ str[stri] = x
+ stri++
+ escape = true
+ case '"':
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ space = false
+ // send previous gathered text and the quote
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.length = stri
+
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ stri = 0
+ }
+
+ // send quote itself as separate token
+ l.value = zQuote
+ l.token = "\""
+ l.length = 1
+ c <- l
+ quote = !quote
+ case '(', ')':
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ switch x {
+ case ')':
+ brace--
+ if brace < 0 {
+ l.token = "extra closing brace"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+ case '(':
+ brace++
+ }
+ default:
+ escape = false
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ str[stri] = x
+ stri++
+ space = false
+ }
+ x, err = s.tokenText()
+ }
+ if stri > 0 {
+ // Send remainder
+ l.token = string(str[:stri])
+ l.length = stri
+ l.value = zString
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ }
+}
+
+// Extract the class number from CLASSxx
+func classToInt(token string) (uint16, bool) {
+ offset := 5
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ class, ok := strconv.Atoi(token[offset:])
+ if ok != nil || class > maxUint16 {
+ return 0, false
+ }
+ return uint16(class), true
+}
+
+// Extract the rr number from TYPExxx
+func typeToInt(token string) (uint16, bool) {
+ offset := 4
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ typ, ok := strconv.Atoi(token[offset:])
+ if ok != nil || typ > maxUint16 {
+ return 0, false
+ }
+ return uint16(typ), true
+}
+
+// Parse things like 2w, 2m, etc, Return the time in seconds.
+func stringToTtl(token string) (uint32, bool) {
+ s := uint32(0)
+ i := uint32(0)
+ for _, c := range token {
+ switch c {
+ case 's', 'S':
+ s += i
+ i = 0
+ case 'm', 'M':
+ s += i * 60
+ i = 0
+ case 'h', 'H':
+ s += i * 60 * 60
+ i = 0
+ case 'd', 'D':
+ s += i * 60 * 60 * 24
+ i = 0
+ case 'w', 'W':
+ s += i * 60 * 60 * 24 * 7
+ i = 0
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ i *= 10
+ i += uint32(c) - '0'
+ default:
+ return 0, false
+ }
+ }
+ return s + i, true
+}
+
+// Parse LOC records' <digits>[.<digits>][mM] into a
+// mantissa exponent format. Token should contain the entire
+// string (i.e. no spaces allowed)
+func stringToCm(token string) (e, m uint8, ok bool) {
+ if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
+ token = token[0 : len(token)-1]
+ }
+ s := strings.SplitN(token, ".", 2)
+ var meters, cmeters, val int
+ var err error
+ switch len(s) {
+ case 2:
+ if cmeters, err = strconv.Atoi(s[1]); err != nil {
+ return
+ }
+ fallthrough
+ case 1:
+ if meters, err = strconv.Atoi(s[0]); err != nil {
+ return
+ }
+ case 0:
+ // huh?
+ return 0, 0, false
+ }
+ ok = true
+ if meters > 0 {
+ e = 2
+ val = meters
+ } else {
+ e = 0
+ val = cmeters
+ }
+ for val > 10 {
+ e++
+ val /= 10
+ }
+ if e > 9 {
+ ok = false
+ }
+ m = uint8(val)
+ return
+}
+
+func appendOrigin(name, origin string) string {
+ if origin == "." {
+ return name + origin
+ }
+ return name + "." + origin
+}
+
+// LOC record helper function
+func locCheckNorth(token string, latitude uint32) (uint32, bool) {
+ switch token {
+ case "n", "N":
+ return LOC_EQUATOR + latitude, true
+ case "s", "S":
+ return LOC_EQUATOR - latitude, true
+ }
+ return latitude, false
+}
+
+// LOC record helper function
+func locCheckEast(token string, longitude uint32) (uint32, bool) {
+ switch token {
+ case "e", "E":
+ return LOC_EQUATOR + longitude, true
+ case "w", "W":
+ return LOC_EQUATOR - longitude, true
+ }
+ return longitude, false
+}
+
+// "Eat" the rest of the "line". Return potential comments
+func slurpRemainder(c chan lex, f string) (*ParseError, string) {
+ l := <-c
+ com := ""
+ switch l.value {
+ case zBlank:
+ l = <-c
+ com = l.comment
+ if l.value != zNewline && l.value != zEOF {
+ return &ParseError{f, "garbage after rdata", l}, ""
+ }
+ case zNewline:
+ com = l.comment
+ case zEOF:
+ default:
+ return &ParseError{f, "garbage after rdata", l}, ""
+ }
+ return nil, com
+}
+
+// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
+// Used for NID and L64 record.
+func stringToNodeID(l lex) (uint64, *ParseError) {
+ if len(l.token) < 19 {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ // There must be three colons at fixes postitions, if not its a parse error
+ if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
new file mode 100644
index 000000000..e521dc063
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -0,0 +1,2143 @@
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+ "strconv"
+ "strings"
+)
+
+type parserFunc struct {
+ // Func defines the function that parses the tokens and returns the RR
+ // or an error. The last string contains any comments in the line as
+ // they returned by the lexer as well.
+ Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string)
+ // Signals if the RR ending is of variable length, like TXT or records
+ // that have Hexadecimal or Base64 as their last element in the Rdata. Records
+ // that have a fixed ending or for instance A, AAAA, SOA and etc.
+ Variable bool
+}
+
+// Parse the rdata of each rrtype.
+// All data from the channel c is either zString or zBlank.
+// After the rdata there may come a zBlank and then a zNewline
+// or immediately a zNewline. If this is not the case we flag
+// an *ParseError: garbage after rdata.
+func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ parserfunc, ok := typeToparserFunc[h.Rrtype]
+ if ok {
+ r, e, cm := parserfunc.Func(h, c, o, f)
+ if parserfunc.Variable {
+ return r, e, cm
+ }
+ if e != nil {
+ return nil, e, ""
+ }
+ e, cm = slurpRemainder(c, f)
+ if e != nil {
+ return nil, e, ""
+ }
+ return r, nil, cm
+ }
+ // RFC3957 RR (Unknown RR handling)
+ return setRFC3597(h, c, o, f)
+}
+
+// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
+// or an error
+func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) {
+ s := ""
+ l := <-c // zString
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value {
+ case zString:
+ s += l.token
+ case zBlank: // Ok
+ default:
+ return "", &ParseError{f, errstr, l}, ""
+ }
+ l = <-c
+ }
+ return s, nil, l.comment
+}
+
+// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces)
+// or an error
+func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) {
+ // Get the remaining data until we see a zNewline
+ quote := false
+ l := <-c
+ var s []string
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value == zQuote {
+ case true: // A number of quoted string
+ s = make([]string, 0)
+ empty := true
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value {
+ case zString:
+ empty = false
+ if len(l.token) > 255 {
+ // split up tokens that are larger than 255 into 255-chunks
+ sx := []string{}
+ p, i := 0, 255
+ for {
+ if i <= len(l.token) {
+ sx = append(sx, l.token[p:i])
+ } else {
+ sx = append(sx, l.token[p:])
+ break
+
+ }
+ p, i = p+255, i+255
+ }
+ s = append(s, sx...)
+ break
+ }
+
+ s = append(s, l.token)
+ case zBlank:
+ if quote {
+ // zBlank can only be seen in between txt parts.
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ case zQuote:
+ if empty && quote {
+ s = append(s, "")
+ }
+ quote = !quote
+ empty = true
+ default:
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ l = <-c
+ }
+ if quote {
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ case false: // Unquoted text record
+ s = make([]string, 1)
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ s[0] += l.token
+ l = <-c
+ }
+ }
+ return s, nil, l.comment
+}
+
+func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(A)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 { // Dynamic updates.
+ return rr, nil, ""
+ }
+ rr.A = net.ParseIP(l.token)
+ if rr.A == nil || l.err {
+ return nil, &ParseError{f, "bad A A", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(AAAA)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ rr.AAAA = net.ParseIP(l.token)
+ if rr.AAAA == nil || l.err {
+ return nil, &ParseError{f, "bad AAAA AAAA", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NS)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ns = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ns = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NS Ns", l}, ""
+ }
+ if rr.Ns[l.length-1] != '.' {
+ rr.Ns = appendOrigin(rr.Ns, o)
+ }
+ return rr, nil, ""
+}
+
+func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(PTR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ptr = l.token
+ if l.length == 0 { // dynamic update rr.
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ptr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PTR Ptr", l}, ""
+ }
+ if rr.Ptr[l.length-1] != '.' {
+ rr.Ptr = appendOrigin(rr.Ptr, o)
+ }
+ return rr, nil, ""
+}
+
+func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSAPPTR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ptr = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ptr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, ""
+ }
+ if rr.Ptr[l.length-1] != '.' {
+ rr.Ptr = appendOrigin(rr.Ptr, o)
+ }
+ return rr, nil, ""
+}
+
+func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RP)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mbox = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mbox = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RP Mbox", l}, ""
+ }
+ if rr.Mbox[l.length-1] != '.' {
+ rr.Mbox = appendOrigin(rr.Mbox, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.Txt = l.token
+ if l.token == "@" {
+ rr.Txt = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RP Txt", l}, ""
+ }
+ if rr.Txt[l.length-1] != '.' {
+ rr.Txt = appendOrigin(rr.Txt, o)
+ }
+ return rr, nil, ""
+}
+
+func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mr = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MR Mr", l}, ""
+ }
+ if rr.Mr[l.length-1] != '.' {
+ rr.Mr = appendOrigin(rr.Mr, o)
+ }
+ return rr, nil, ""
+}
+
+func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MB)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mb = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mb = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MB Mb", l}, ""
+ }
+ if rr.Mb[l.length-1] != '.' {
+ rr.Mb = appendOrigin(rr.Mb, o)
+ }
+ return rr, nil, ""
+}
+
+func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MG)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mg = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mg = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MG Mg", l}, ""
+ }
+ if rr.Mg[l.length-1] != '.' {
+ rr.Mg = appendOrigin(rr.Mg, o)
+ }
+ return rr, nil, ""
+}
+
+func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(HINFO)
+ rr.Hdr = h
+
+ chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f)
+ if e != nil {
+ return nil, e, c1
+ }
+
+ if ln := len(chunks); ln == 0 {
+ return rr, nil, ""
+ } else if ln == 1 {
+ // Can we split it?
+ if out := strings.Fields(chunks[0]); len(out) > 1 {
+ chunks = out
+ } else {
+ chunks = append(chunks, "")
+ }
+ }
+
+ rr.Cpu = chunks[0]
+ rr.Os = strings.Join(chunks[1:], " ")
+
+ return rr, nil, ""
+}
+
+func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MINFO)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Rmail = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Rmail = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MINFO Rmail", l}, ""
+ }
+ if rr.Rmail[l.length-1] != '.' {
+ rr.Rmail = appendOrigin(rr.Rmail, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.Email = l.token
+ if l.token == "@" {
+ rr.Email = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MINFO Email", l}, ""
+ }
+ if rr.Email[l.length-1] != '.' {
+ rr.Email = appendOrigin(rr.Email, o)
+ }
+ return rr, nil, ""
+}
+
+func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MF)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mf = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mf = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MF Mf", l}, ""
+ }
+ if rr.Mf[l.length-1] != '.' {
+ rr.Mf = appendOrigin(rr.Mf, o)
+ }
+ return rr, nil, ""
+}
+
+func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MD)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Md = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Md = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MD Md", l}, ""
+ }
+ if rr.Md[l.length-1] != '.' {
+ rr.Md = appendOrigin(rr.Md, o)
+ }
+ return rr, nil, ""
+}
+
+func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad MX Pref", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Mx = l.token
+ if l.token == "@" {
+ rr.Mx = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MX Mx", l}, ""
+ }
+ if rr.Mx[l.length-1] != '.' {
+ rr.Mx = appendOrigin(rr.Mx, o)
+ }
+ return rr, nil, ""
+}
+
+func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RT)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil {
+ return nil, &ParseError{f, "bad RT Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Host = l.token
+ if l.token == "@" {
+ rr.Host = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RT Host", l}, ""
+ }
+ if rr.Host[l.length-1] != '.' {
+ rr.Host = appendOrigin(rr.Host, o)
+ }
+ return rr, nil, ""
+}
+
+func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(AFSDB)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
+ }
+ rr.Subtype = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Hostname = l.token
+ if l.token == "@" {
+ rr.Hostname = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad AFSDB Hostname", l}, ""
+ }
+ if rr.Hostname[l.length-1] != '.' {
+ rr.Hostname = appendOrigin(rr.Hostname, o)
+ }
+ return rr, nil, ""
+}
+
+func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(X25)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.err {
+ return nil, &ParseError{f, "bad X25 PSDNAddress", l}, ""
+ }
+ rr.PSDNAddress = l.token
+ return rr, nil, ""
+}
+
+func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(KX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad KX Pref", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Exchanger = l.token
+ if l.token == "@" {
+ rr.Exchanger = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad KX Exchanger", l}, ""
+ }
+ if rr.Exchanger[l.length-1] != '.' {
+ rr.Exchanger = appendOrigin(rr.Exchanger, o)
+ }
+ return rr, nil, ""
+}
+
+func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CNAME)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Target = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad CNAME Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(DNAME)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Target = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad CNAME Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SOA)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ns = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ <-c // zBlank
+ if l.token == "@" {
+ rr.Ns = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SOA Ns", l}, ""
+ }
+ if rr.Ns[l.length-1] != '.' {
+ rr.Ns = appendOrigin(rr.Ns, o)
+ }
+ }
+
+ l = <-c
+ rr.Mbox = l.token
+ if l.token == "@" {
+ rr.Mbox = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SOA Mbox", l}, ""
+ }
+ if rr.Mbox[l.length-1] != '.' {
+ rr.Mbox = appendOrigin(rr.Mbox, o)
+ }
+ }
+ <-c // zBlank
+
+ var (
+ v uint32
+ ok bool
+ )
+ for i := 0; i < 5; i++ {
+ l = <-c
+ if l.err {
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+ }
+ if j, e := strconv.Atoi(l.token); e != nil {
+ if i == 0 {
+ // Serial should be a number
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+ }
+ if v, ok = stringToTtl(l.token); !ok {
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+
+ }
+ } else {
+ v = uint32(j)
+ }
+ switch i {
+ case 0:
+ rr.Serial = v
+ <-c // zBlank
+ case 1:
+ rr.Refresh = v
+ <-c // zBlank
+ case 2:
+ rr.Retry = v
+ <-c // zBlank
+ case 3:
+ rr.Expire = v
+ <-c // zBlank
+ case 4:
+ rr.Minttl = v
+ }
+ }
+ return rr, nil, ""
+}
+
+func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SRV)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Priority", l}, ""
+ }
+ rr.Priority = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Weight", l}, ""
+ }
+ rr.Weight = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Port", l}, ""
+ }
+ rr.Port = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Target = l.token
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SRV Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NAPTR)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NAPTR Order", l}, ""
+ }
+ rr.Order = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ // Flags
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Flags = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Flags = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+
+ // Service
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Service = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Service = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+
+ // Regexp
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Regexp = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Regexp = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ // After quote no space??
+ <-c // zBlank
+ l = <-c // zString
+ rr.Replacement = l.token
+ if l.token == "@" {
+ rr.Replacement = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NAPTR Replacement", l}, ""
+ }
+ if rr.Replacement[l.length-1] != '.' {
+ rr.Replacement = appendOrigin(rr.Replacement, o)
+ }
+ return rr, nil, ""
+}
+
+func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TALINK)
+ rr.Hdr = h
+
+ l := <-c
+ rr.PreviousName = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.PreviousName = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad TALINK PreviousName", l}, ""
+ }
+ if rr.PreviousName[l.length-1] != '.' {
+ rr.PreviousName = appendOrigin(rr.PreviousName, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.NextName = l.token
+ if l.token == "@" {
+ rr.NextName = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad TALINK NextName", l}, ""
+ }
+ if rr.NextName[l.length-1] != '.' {
+ rr.NextName = appendOrigin(rr.NextName, o)
+ }
+ return rr, nil, ""
+}
+
+func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(LOC)
+ rr.Hdr = h
+ // Non zero defaults for LOC record, see RFC 1876, Section 3.
+ rr.HorizPre = 165 // 10000
+ rr.VertPre = 162 // 10
+ rr.Size = 18 // 1
+ ok := false
+ // North
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude", l}, ""
+ }
+ rr.Latitude = 1000 * 60 * 60 * uint32(i)
+
+ <-c // zBlank
+ // Either number, 'N' or 'S'
+ l = <-c
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude minutes", l}, ""
+ }
+ rr.Latitude += 1000 * 60 * uint32(i)
+
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude seconds", l}, ""
+ } else {
+ rr.Latitude += uint32(1000 * i)
+ }
+ <-c // zBlank
+ // Either number, 'N' or 'S'
+ l = <-c
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ // If still alive, flag an error
+ return nil, &ParseError{f, "bad LOC Latitude North/South", l}, ""
+
+East:
+ // East
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude", l}, ""
+ } else {
+ rr.Longitude = 1000 * 60 * 60 * uint32(i)
+ }
+ <-c // zBlank
+ // Either number, 'E' or 'W'
+ l = <-c
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude minutes", l}, ""
+ } else {
+ rr.Longitude += 1000 * 60 * uint32(i)
+ }
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude seconds", l}, ""
+ } else {
+ rr.Longitude += uint32(1000 * i)
+ }
+ <-c // zBlank
+ // Either number, 'E' or 'W'
+ l = <-c
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ // If still alive, flag an error
+ return nil, &ParseError{f, "bad LOC Longitude East/West", l}, ""
+
+Altitude:
+ <-c // zBlank
+ l = <-c
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad LOC Altitude", l}, ""
+ }
+ if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
+ l.token = l.token[0 : len(l.token)-1]
+ }
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil {
+ return nil, &ParseError{f, "bad LOC Altitude", l}, ""
+ } else {
+ rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
+ }
+
+ // And now optionally the other values
+ l = <-c
+ count := 0
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ switch count {
+ case 0: // Size
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC Size", l}, ""
+ }
+ rr.Size = (e & 0x0f) | (m << 4 & 0xf0)
+ case 1: // HorizPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC HorizPre", l}, ""
+ }
+ rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0)
+ case 2: // VertPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC VertPre", l}, ""
+ }
+ rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0)
+ }
+ count++
+ case zBlank:
+ // Ok
+ default:
+ return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, ""
+}
+
+func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(HIP)
+ rr.Hdr = h
+
+ // HitLength is not represented
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
+ }
+ rr.PublicKeyAlgorithm = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP Hit", l}, ""
+ }
+ rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
+ rr.HitLength = uint8(len(rr.Hit)) / 2
+
+ <-c // zBlank
+ l = <-c // zString
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP PublicKey", l}, ""
+ }
+ rr.PublicKey = l.token // This cannot contain spaces
+ rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
+
+ // RendezvousServers (if any)
+ l = <-c
+ var xs []string
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ if l.token == "@" {
+ xs = append(xs, o)
+ l = <-c
+ continue
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
+ }
+ if l.token[l.length-1] != '.' {
+ l.token = appendOrigin(l.token, o)
+ }
+ xs = append(xs, l.token)
+ case zBlank:
+ // Ok
+ default:
+ return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
+ }
+ l = <-c
+ }
+ rr.RendezvousServers = xs
+ return rr, nil, l.comment
+}
+
+func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CERT)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if v, ok := StringToCertType[l.token]; ok {
+ rr.Type = v
+ } else if i, e := strconv.Atoi(l.token); e != nil {
+ return nil, &ParseError{f, "bad CERT Type", l}, ""
+ } else {
+ rr.Type = uint16(i)
+ }
+ <-c // zBlank
+ l = <-c // zString
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad CERT KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ if v, ok := StringToAlgorithm[l.token]; ok {
+ rr.Algorithm = v
+ } else if i, e := strconv.Atoi(l.token); e != nil {
+ return nil, &ParseError{f, "bad CERT Algorithm", l}, ""
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ s, e1, c1 := endingToString(c, "bad CERT Certificate", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.Certificate = s
+ return rr, nil, c1
+}
+
+func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(OPENPGPKEY)
+ rr.Hdr = h
+
+ s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setRRSIG(h, c, o, f)
+ if r != nil {
+ return &SIG{*r.(*RRSIG)}, e, s
+ }
+ return nil, e, s
+}
+
+func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RRSIG)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if t, ok := StringToType[l.tokenUpper]; !ok {
+ if strings.HasPrefix(l.tokenUpper, "TYPE") {
+ t, ok = typeToInt(l.tokenUpper)
+ if !ok {
+ return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
+ }
+ rr.TypeCovered = t
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
+ }
+ } else {
+ rr.TypeCovered = t
+ }
+ <-c // zBlank
+ l = <-c
+ i, err := strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
+ }
+ rr.Labels = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
+ }
+ rr.OrigTtl = uint32(i)
+ <-c // zBlank
+ l = <-c
+ if i, err := StringToTime(l.token); err != nil {
+ // Try to see if all numeric and use it as epoch
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ // TODO(miek): error out on > MAX_UINT32, same below
+ rr.Expiration = uint32(i)
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Expiration", l}, ""
+ }
+ } else {
+ rr.Expiration = i
+ }
+ <-c // zBlank
+ l = <-c
+ if i, err := StringToTime(l.token); err != nil {
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ rr.Inception = uint32(i)
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Inception", l}, ""
+ }
+ } else {
+ rr.Inception = i
+ }
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ rr.SignerName = l.token
+ if l.token == "@" {
+ rr.SignerName = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RRSIG SignerName", l}, ""
+ }
+ if rr.SignerName[l.length-1] != '.' {
+ rr.SignerName = appendOrigin(rr.SignerName, o)
+ }
+ }
+ s, e, c1 := endingToString(c, "bad RRSIG Signature", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Signature = s
+ return rr, nil, c1
+}
+
+func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC)
+ rr.Hdr = h
+
+ l := <-c
+ rr.NextDomain = l.token
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if l.token == "@" {
+ rr.NextDomain = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC NextDomain", l}, ""
+ }
+ if rr.NextDomain[l.length-1] != '.' {
+ rr.NextDomain = appendOrigin(rr.NextDomain, o)
+ }
+ }
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l = <-c
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ if k, ok = StringToType[l.tokenUpper]; !ok {
+ if k, ok = typeToInt(l.tokenUpper); !ok {
+ return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, l.comment
+}
+
+func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC3)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
+ }
+ rr.Hash = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Flags", l}, ""
+ }
+ rr.Flags = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Iterations", l}, ""
+ }
+ rr.Iterations = uint16(i)
+ <-c
+ l = <-c
+ if len(l.token) == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
+ }
+ rr.SaltLength = uint8(len(l.token)) / 2
+ rr.Salt = l.token
+
+ <-c
+ l = <-c
+ if len(l.token) == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, ""
+ }
+ rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
+ rr.NextDomain = l.token
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l = <-c
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ if k, ok = StringToType[l.tokenUpper]; !ok {
+ if k, ok = typeToInt(l.tokenUpper); !ok {
+ return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, l.comment
+}
+
+func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC3PARAM)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
+ }
+ rr.Hash = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, ""
+ }
+ rr.Flags = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, ""
+ }
+ rr.Iterations = uint16(i)
+ <-c
+ l = <-c
+ rr.SaltLength = uint8(len(l.token))
+ rr.Salt = l.token
+ return rr, nil, ""
+}
+
+func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EUI48)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.length != 17 || l.err {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ addr := make([]byte, 12)
+ dash := 0
+ for i := 0; i < 10; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ }
+ addr[10] = l.token[15]
+ addr[11] = l.token[16]
+
+ i, e := strconv.ParseUint(string(addr), 16, 48)
+ if e != nil {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ rr.Address = i
+ return rr, nil, ""
+}
+
+func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EUI64)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.length != 23 || l.err {
+ return nil, &ParseError{f, "bad EUI64 Address", l}, ""
+ }
+ addr := make([]byte, 16)
+ dash := 0
+ for i := 0; i < 14; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return nil, &ParseError{f, "bad EUI64 Address", l}, ""
+ }
+ }
+ addr[14] = l.token[21]
+ addr[15] = l.token[22]
+
+ i, e := strconv.ParseUint(string(addr), 16, 64)
+ if e != nil {
+ return nil, &ParseError{f, "bad EUI68 Address", l}, ""
+ }
+ rr.Address = uint64(i)
+ return rr, nil, ""
+}
+
+func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SSHFP)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SSHFP Type", l}, ""
+ }
+ rr.Type = uint8(i)
+ <-c // zBlank
+ s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.FingerPrint = s
+ return rr, nil, ""
+}
+
+func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
+ rr := new(DNSKEY)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
+ }
+ rr.Flags = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Protocol", l}, ""
+ }
+ rr.Protocol = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "KEY")
+ if r != nil {
+ return &KEY{*r.(*DNSKEY)}, e, s
+ }
+ return nil, e, s
+}
+
+func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY")
+ return r, e, s
+}
+
+func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY")
+ if r != nil {
+ return &CDNSKEY{*r.(*DNSKEY)}, e, s
+ }
+ return nil, e, s
+}
+
+func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RKEY)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Flags", l}, ""
+ }
+ rr.Flags = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Protocol", l}, ""
+ }
+ rr.Protocol = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EID)
+ rr.Hdr = h
+ s, e, c1 := endingToString(c, "bad EID Endpoint", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Endpoint = s
+ return rr, nil, c1
+}
+
+func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NIMLOC)
+ rr.Hdr = h
+ s, e, c1 := endingToString(c, "bad NIMLOC Locator", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Locator = s
+ return rr, nil, c1
+}
+
+func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(GPOS)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ _, e := strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Longitude", l}, ""
+ }
+ rr.Longitude = l.token
+ <-c // zBlank
+ l = <-c
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Latitude", l}, ""
+ }
+ rr.Latitude = l.token
+ <-c // zBlank
+ l = <-c
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Altitude", l}, ""
+ }
+ rr.Altitude = l.token
+ return rr, nil, ""
+}
+
+func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
+ rr := new(DS)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil {
+ i, ok := StringToAlgorithm[l.tokenUpper]
+ if !ok || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " DigestType", l}, ""
+ }
+ rr.DigestType = uint8(i)
+ s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "DS")
+ return r, e, s
+}
+
+func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "DLV")
+ if r != nil {
+ return &DLV{*r.(*DS)}, e, s
+ }
+ return nil, e, s
+}
+
+func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "CDS")
+ if r != nil {
+ return &CDS{*r.(*DS)}, e, s
+ }
+ return nil, e, s
+}
+
+func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TA KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil {
+ i, ok := StringToAlgorithm[l.tokenUpper]
+ if !ok || l.err {
+ return nil, &ParseError{f, "bad TA Algorithm", l}, ""
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TA DigestType", l}, ""
+ }
+ rr.DigestType = uint8(i)
+ s, e, c1 := endingToString(c, "bad TA Digest", f)
+ if e != nil {
+ return nil, e.(*ParseError), c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TLSA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA Usage", l}, ""
+ }
+ rr.Usage = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA Selector", l}, ""
+ }
+ rr.Selector = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
+ }
+ rr.MatchingType = uint8(i)
+ // So this needs be e2 (i.e. different than e), because...??t
+ s, e2, c1 := endingToString(c, "bad TLSA Certificate", f)
+ if e2 != nil {
+ return nil, e2, c1
+ }
+ rr.Certificate = s
+ return rr, nil, c1
+}
+
+func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RFC3597)
+ rr.Hdr = h
+ l := <-c
+ if l.token != "\\#" {
+ return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
+ }
+ <-c // zBlank
+ l = <-c
+ rdlength, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, ""
+ }
+
+ s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ if rdlength*2 != len(s) {
+ return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
+ }
+ rr.Rdata = s
+ return rr, nil, c1
+}
+
+func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SPF)
+ rr.Hdr = h
+
+ s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Txt = s
+ return rr, nil, c1
+}
+
+func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TXT)
+ rr.Hdr = h
+
+ // no zBlank reading here, because all this rdata is TXT
+ s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Txt = s
+ return rr, nil, c1
+}
+
+// identical to setTXT
+func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NINFO)
+ rr.Hdr = h
+
+ s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.ZSData = s
+ return rr, nil, c1
+}
+
+func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(URI)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 { // Dynamic updates.
+ return rr, nil, ""
+ }
+
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad URI Priority", l}, ""
+ }
+ rr.Priority = uint16(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad URI Weight", l}, ""
+ }
+ rr.Weight = uint16(i)
+
+ <-c // zBlank
+ s, err, c1 := endingToTxtSlice(c, "bad URI Target", f)
+ if err != nil {
+ return nil, err, ""
+ }
+ if len(s) > 1 {
+ return nil, &ParseError{f, "bad URI Target", l}, ""
+ }
+ rr.Target = s[0]
+ return rr, nil, c1
+}
+
+func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ // awesome record to parse!
+ rr := new(DHCID)
+ rr.Hdr = h
+
+ s, e, c1 := endingToString(c, "bad DHCID Digest", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NID)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NID Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return nil, err, ""
+ }
+ rr.NodeID = u
+ return rr, nil, ""
+}
+
+func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(L32)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad L32 Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Locator32 = net.ParseIP(l.token)
+ if rr.Locator32 == nil || l.err {
+ return nil, &ParseError{f, "bad L32 Locator", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(LP)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LP Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Fqdn = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Fqdn = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad LP Fqdn", l}, ""
+ }
+ if rr.Fqdn[l.length-1] != '.' {
+ rr.Fqdn = appendOrigin(rr.Fqdn, o)
+ }
+ return rr, nil, ""
+}
+
+func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(L64)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad L64 Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return nil, err, ""
+ }
+ rr.Locator64 = u
+ return rr, nil, ""
+}
+
+func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(UID)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad UID Uid", l}, ""
+ }
+ rr.Uid = uint32(i)
+ return rr, nil, ""
+}
+
+func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(GID)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GID Gid", l}, ""
+ }
+ rr.Gid = uint32(i)
+ return rr, nil, ""
+}
+
+func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(UINFO)
+ rr.Hdr = h
+ s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Uinfo = s[0] // silently discard anything above
+ return rr, nil, c1
+}
+
+func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(PX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad PX Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Map822 = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Map822 = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PX Map822", l}, ""
+ }
+ if rr.Map822[l.length-1] != '.' {
+ rr.Map822 = appendOrigin(rr.Map822, o)
+ }
+ <-c // zBlank
+ l = <-c // zString
+ rr.Mapx400 = l.token
+ if l.token == "@" {
+ rr.Mapx400 = o
+ return rr, nil, ""
+ }
+ _, ok = IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PX Mapx400", l}, ""
+ }
+ if rr.Mapx400[l.length-1] != '.' {
+ rr.Mapx400 = appendOrigin(rr.Mapx400, o)
+ }
+ return rr, nil, ""
+}
+
+func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CAA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, err := strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad CAA Flag", l}, ""
+ }
+ rr.Flag = uint8(i)
+
+ <-c // zBlank
+ l = <-c // zString
+ if l.value != zString {
+ return nil, &ParseError{f, "bad CAA Tag", l}, ""
+ }
+ rr.Tag = l.token
+
+ <-c // zBlank
+ s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ if len(s) > 1 {
+ return nil, &ParseError{f, "bad CAA Value", l}, ""
+ }
+ rr.Value = s[0]
+ return rr, nil, c1
+}
+
+var typeToparserFunc = map[uint16]parserFunc{
+ TypeAAAA: {setAAAA, false},
+ TypeAFSDB: {setAFSDB, false},
+ TypeA: {setA, false},
+ TypeCAA: {setCAA, true},
+ TypeCDS: {setCDS, true},
+ TypeCDNSKEY: {setCDNSKEY, true},
+ TypeCERT: {setCERT, true},
+ TypeCNAME: {setCNAME, false},
+ TypeDHCID: {setDHCID, true},
+ TypeDLV: {setDLV, true},
+ TypeDNAME: {setDNAME, false},
+ TypeKEY: {setKEY, true},
+ TypeDNSKEY: {setDNSKEY, true},
+ TypeDS: {setDS, true},
+ TypeEID: {setEID, true},
+ TypeEUI48: {setEUI48, false},
+ TypeEUI64: {setEUI64, false},
+ TypeGID: {setGID, false},
+ TypeGPOS: {setGPOS, false},
+ TypeHINFO: {setHINFO, true},
+ TypeHIP: {setHIP, true},
+ TypeKX: {setKX, false},
+ TypeL32: {setL32, false},
+ TypeL64: {setL64, false},
+ TypeLOC: {setLOC, true},
+ TypeLP: {setLP, false},
+ TypeMB: {setMB, false},
+ TypeMD: {setMD, false},
+ TypeMF: {setMF, false},
+ TypeMG: {setMG, false},
+ TypeMINFO: {setMINFO, false},
+ TypeMR: {setMR, false},
+ TypeMX: {setMX, false},
+ TypeNAPTR: {setNAPTR, false},
+ TypeNID: {setNID, false},
+ TypeNIMLOC: {setNIMLOC, true},
+ TypeNINFO: {setNINFO, true},
+ TypeNSAPPTR: {setNSAPPTR, false},
+ TypeNSEC3PARAM: {setNSEC3PARAM, false},
+ TypeNSEC3: {setNSEC3, true},
+ TypeNSEC: {setNSEC, true},
+ TypeNS: {setNS, false},
+ TypeOPENPGPKEY: {setOPENPGPKEY, true},
+ TypePTR: {setPTR, false},
+ TypePX: {setPX, false},
+ TypeSIG: {setSIG, true},
+ TypeRKEY: {setRKEY, true},
+ TypeRP: {setRP, false},
+ TypeRRSIG: {setRRSIG, true},
+ TypeRT: {setRT, false},
+ TypeSOA: {setSOA, false},
+ TypeSPF: {setSPF, true},
+ TypeSRV: {setSRV, false},
+ TypeSSHFP: {setSSHFP, true},
+ TypeTALINK: {setTALINK, false},
+ TypeTA: {setTA, true},
+ TypeTLSA: {setTLSA, true},
+ TypeTXT: {setTXT, true},
+ TypeUID: {setUID, false},
+ TypeUINFO: {setUINFO, true},
+ TypeURI: {setURI, true},
+ TypeX25: {setX25, false},
+}
diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go
new file mode 100644
index 000000000..c29bc2f38
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scanner.go
@@ -0,0 +1,43 @@
+package dns
+
+// Implement a simple scanner, return a byte stream from an io reader.
+
+import (
+ "bufio"
+ "io"
+ "text/scanner"
+)
+
+type scan struct {
+ src *bufio.Reader
+ position scanner.Position
+ eof bool // Have we just seen a eof
+}
+
+func scanInit(r io.Reader) *scan {
+ s := new(scan)
+ s.src = bufio.NewReader(r)
+ s.position.Line = 1
+ return s
+}
+
+// tokenText returns the next byte from the input
+func (s *scan) tokenText() (byte, error) {
+ c, err := s.src.ReadByte()
+ if err != nil {
+ return c, err
+ }
+ // delay the newline handling until the next token is delivered,
+ // fixes off-by-one errors when reporting a parse error.
+ if s.eof == true {
+ s.position.Line++
+ s.position.Column = 0
+ s.eof = false
+ }
+ if c == '\n' {
+ s.eof = true
+ return c, nil
+ }
+ s.position.Column++
+ return c, nil
+}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
new file mode 100644
index 000000000..2b4bff49f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/server.go
@@ -0,0 +1,732 @@
+// DNS server implementation.
+
+package dns
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/binary"
+ "io"
+ "net"
+ "sync"
+ "time"
+)
+
+// Maximum number of TCP queries before we close the socket.
+const maxTCPQueries = 128
+
+// Handler is implemented by any value that implements ServeDNS.
+type Handler interface {
+ ServeDNS(w ResponseWriter, r *Msg)
+}
+
+// A ResponseWriter interface is used by an DNS handler to
+// construct an DNS response.
+type ResponseWriter interface {
+ // LocalAddr returns the net.Addr of the server
+ LocalAddr() net.Addr
+ // RemoteAddr returns the net.Addr of the client that sent the current request.
+ RemoteAddr() net.Addr
+ // WriteMsg writes a reply back to the client.
+ WriteMsg(*Msg) error
+ // Write writes a raw buffer back to the client.
+ Write([]byte) (int, error)
+ // Close closes the connection.
+ Close() error
+ // TsigStatus returns the status of the Tsig.
+ TsigStatus() error
+ // TsigTimersOnly sets the tsig timers only boolean.
+ TsigTimersOnly(bool)
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack(), the DNS package will not do anything with the connection.
+ Hijack()
+}
+
+type response struct {
+ hijacked bool // connection has been hijacked by handler
+ tsigStatus error
+ tsigTimersOnly bool
+ tsigRequestMAC string
+ tsigSecret map[string]string // the tsig secrets
+ udp *net.UDPConn // i/o connection if UDP was used
+ tcp net.Conn // i/o connection if TCP was used
+ udpSession *SessionUDP // oob data to get egress interface right
+ remoteAddr net.Addr // address of the client
+ writer Writer // writer to output the raw DNS bits
+}
+
+// ServeMux is an DNS request multiplexer. It matches the
+// zone name of each incoming request against a list of
+// registered patterns add calls the handler for the pattern
+// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
+// that queries for the DS record are redirected to the parent zone (if that
+// is also registered), otherwise the child gets the query.
+// ServeMux is also safe for concurrent access from multiple goroutines.
+type ServeMux struct {
+ z map[string]Handler
+ m *sync.RWMutex
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = NewServeMux()
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as DNS handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(ResponseWriter, *Msg)
+
+// ServeDNS calls f(w, r).
+func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
+ f(w, r)
+}
+
+// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
+func HandleFailed(w ResponseWriter, r *Msg) {
+ m := new(Msg)
+ m.SetRcode(r, RcodeServerFailure)
+ // does not matter if this write fails
+ w.WriteMsg(m)
+}
+
+func failedHandler() Handler { return HandlerFunc(HandleFailed) }
+
+// ListenAndServe Starts a server on address and network specified Invoke handler
+// for incoming queries.
+func ListenAndServe(addr string, network string, handler Handler) error {
+ server := &Server{Addr: addr, Net: network, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
+// http://golang.org/pkg/net/http/#ListenAndServeTLS
+func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ server := &Server{
+ Addr: addr,
+ Net: "tcp-tls",
+ TLSConfig: &config,
+ Handler: handler,
+ }
+
+ return server.ListenAndServe()
+}
+
+// ActivateAndServe activates a server with a listener from systemd,
+// l and p should not both be non-nil.
+// If both l and p are not nil only p will be used.
+// Invoke handler for incoming queries.
+func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
+ server := &Server{Listener: l, PacketConn: p, Handler: handler}
+ return server.ActivateAndServe()
+}
+
+func (mux *ServeMux) match(q string, t uint16) Handler {
+ mux.m.RLock()
+ defer mux.m.RUnlock()
+ var handler Handler
+ b := make([]byte, len(q)) // worst case, one label of length q
+ off := 0
+ end := false
+ for {
+ l := len(q[off:])
+ for i := 0; i < l; i++ {
+ b[i] = q[off+i]
+ if b[i] >= 'A' && b[i] <= 'Z' {
+ b[i] |= ('a' - 'A')
+ }
+ }
+ if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
+ if t != TypeDS {
+ return h
+ }
+ // Continue for DS to see if we have a parent too, if so delegeate to the parent
+ handler = h
+ }
+ off, end = NextLabel(q, off)
+ if end {
+ break
+ }
+ }
+ // Wildcard match, if we have found nothing try the root zone as a last resort.
+ if h, ok := mux.z["."]; ok {
+ return h
+ }
+ return handler
+}
+
+// Handle adds a handler to the ServeMux for pattern.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ mux.z[Fqdn(pattern)] = handler
+ mux.m.Unlock()
+}
+
+// HandleFunc adds a handler function to the ServeMux for pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
+func (mux *ServeMux) HandleRemove(pattern string) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ delete(mux.z, Fqdn(pattern))
+ mux.m.Unlock()
+}
+
+// ServeDNS dispatches the request to the handler whose
+// pattern most closely matches the request message. If DefaultServeMux
+// is used the correct thing for DS queries is done: a possible parent
+// is sought.
+// If no handler is found a standard SERVFAIL message is returned
+// If the request message does not have exactly one question in the
+// question section a SERVFAIL is returned, unlesss Unsafe is true.
+func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
+ var h Handler
+ if len(request.Question) < 1 { // allow more than one question
+ h = failedHandler()
+ } else {
+ if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
+ h = failedHandler()
+ }
+ }
+ h.ServeDNS(w, request)
+}
+
+// Handle registers the handler with the given pattern
+// in the DefaultServeMux. The documentation for
+// ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleRemove deregisters the handle with the given pattern
+// in the DefaultServeMux.
+func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
+
+// HandleFunc registers the handler function with the given pattern
+// in the DefaultServeMux.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
+
+// Writer writes raw DNS messages; each call to Write should send an entire message.
+type Writer interface {
+ io.Writer
+}
+
+// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
+type Reader interface {
+ // ReadTCP reads a raw message from a TCP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
+ // ReadUDP reads a raw message from a UDP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
+}
+
+// defaultReader is an adapter for the Server struct that implements the Reader interface
+// using the readTCP and readUDP func of the embedded Server.
+type defaultReader struct {
+ *Server
+}
+
+func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
+ return dr.readTCP(conn, timeout)
+}
+
+func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ return dr.readUDP(conn, timeout)
+}
+
+// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
+// Implementations should never return a nil Reader.
+type DecorateReader func(Reader) Reader
+
+// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
+// Implementations should never return a nil Writer.
+type DecorateWriter func(Writer) Writer
+
+// A Server defines parameters for running an DNS server.
+type Server struct {
+ // Address to listen on, ":dns" if empty.
+ Addr string
+ // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
+ Net string
+ // TCP Listener to use, this is to aid in systemd's socket activation.
+ Listener net.Listener
+ // TLS connection configuration
+ TLSConfig *tls.Config
+ // UDP "Listener" to use, this is to aid in systemd's socket activation.
+ PacketConn net.PacketConn
+ // Handler to invoke, dns.DefaultServeMux if nil.
+ Handler Handler
+ // Default buffer size to use to read incoming UDP messages. If not set
+ // it defaults to MinMsgSize (512 B).
+ UDPSize int
+ // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
+ ReadTimeout time.Duration
+ // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
+ WriteTimeout time.Duration
+ // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
+ IdleTimeout func() time.Duration
+ // Secret(s) for Tsig map[<zonename>]<base64 secret>.
+ TsigSecret map[string]string
+ // Unsafe instructs the server to disregard any sanity checks and directly hand the message to
+ // the handler. It will specifically not check if the query has the QR bit not set.
+ Unsafe bool
+ // If NotifyStartedFunc is set it is called once the server has started listening.
+ NotifyStartedFunc func()
+ // DecorateReader is optional, allows customization of the process that reads raw DNS messages.
+ DecorateReader DecorateReader
+ // DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
+ DecorateWriter DecorateWriter
+
+ // Graceful shutdown handling
+
+ inFlight sync.WaitGroup
+
+ lock sync.RWMutex
+ started bool
+}
+
+// ListenAndServe starts a nameserver on the configured address in *Server.
+func (srv *Server) ListenAndServe() error {
+ srv.lock.Lock()
+ defer srv.lock.Unlock()
+ if srv.started {
+ return &Error{err: "server already started"}
+ }
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":domain"
+ }
+ if srv.UDPSize == 0 {
+ srv.UDPSize = MinMsgSize
+ }
+ switch srv.Net {
+ case "tcp", "tcp4", "tcp6":
+ a, err := net.ResolveTCPAddr(srv.Net, addr)
+ if err != nil {
+ return err
+ }
+ l, err := net.ListenTCP(srv.Net, a)
+ if err != nil {
+ return err
+ }
+ srv.Listener = l
+ srv.started = true
+ srv.lock.Unlock()
+ err = srv.serveTCP(l)
+ srv.lock.Lock() // to satisfy the defer at the top
+ return err
+ case "tcp-tls", "tcp4-tls", "tcp6-tls":
+ network := "tcp"
+ if srv.Net == "tcp4-tls" {
+ network = "tcp4"
+ } else if srv.Net == "tcp6" {
+ network = "tcp6"
+ }
+
+ l, err := tls.Listen(network, addr, srv.TLSConfig)
+ if err != nil {
+ return err
+ }
+ srv.Listener = l
+ srv.started = true
+ srv.lock.Unlock()
+ err = srv.serveTCP(l)
+ srv.lock.Lock() // to satisfy the defer at the top
+ return err
+ case "udp", "udp4", "udp6":
+ a, err := net.ResolveUDPAddr(srv.Net, addr)
+ if err != nil {
+ return err
+ }
+ l, err := net.ListenUDP(srv.Net, a)
+ if err != nil {
+ return err
+ }
+ if e := setUDPSocketOptions(l); e != nil {
+ return e
+ }
+ srv.PacketConn = l
+ srv.started = true
+ srv.lock.Unlock()
+ err = srv.serveUDP(l)
+ srv.lock.Lock() // to satisfy the defer at the top
+ return err
+ }
+ return &Error{err: "bad network"}
+}
+
+// ActivateAndServe starts a nameserver with the PacketConn or Listener
+// configured in *Server. Its main use is to start a server from systemd.
+func (srv *Server) ActivateAndServe() error {
+ srv.lock.Lock()
+ defer srv.lock.Unlock()
+ if srv.started {
+ return &Error{err: "server already started"}
+ }
+ pConn := srv.PacketConn
+ l := srv.Listener
+ if pConn != nil {
+ if srv.UDPSize == 0 {
+ srv.UDPSize = MinMsgSize
+ }
+ if t, ok := pConn.(*net.UDPConn); ok {
+ if e := setUDPSocketOptions(t); e != nil {
+ return e
+ }
+ srv.started = true
+ srv.lock.Unlock()
+ e := srv.serveUDP(t)
+ srv.lock.Lock() // to satisfy the defer at the top
+ return e
+ }
+ }
+ if l != nil {
+ srv.started = true
+ srv.lock.Unlock()
+ e := srv.serveTCP(l)
+ srv.lock.Lock() // to satisfy the defer at the top
+ return e
+ }
+ return &Error{err: "bad listeners"}
+}
+
+// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
+// ActivateAndServe will return. All in progress queries are completed before the server
+// is taken down. If the Shutdown is taking longer than the reading timeout an error
+// is returned.
+func (srv *Server) Shutdown() error {
+ srv.lock.Lock()
+ if !srv.started {
+ srv.lock.Unlock()
+ return &Error{err: "server not started"}
+ }
+ srv.started = false
+ srv.lock.Unlock()
+
+ if srv.PacketConn != nil {
+ srv.PacketConn.Close()
+ }
+ if srv.Listener != nil {
+ srv.Listener.Close()
+ }
+
+ fin := make(chan bool)
+ go func() {
+ srv.inFlight.Wait()
+ fin <- true
+ }()
+
+ select {
+ case <-time.After(srv.getReadTimeout()):
+ return &Error{err: "server shutdown is pending"}
+ case <-fin:
+ return nil
+ }
+}
+
+// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
+func (srv *Server) getReadTimeout() time.Duration {
+ rtimeout := dnsTimeout
+ if srv.ReadTimeout != 0 {
+ rtimeout = srv.ReadTimeout
+ }
+ return rtimeout
+}
+
+// serveTCP starts a TCP listener for the server.
+// Each request is handled in a separate goroutine.
+func (srv *Server) serveTCP(l net.Listener) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ handler := srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ rtimeout := srv.getReadTimeout()
+ // deadline is not used here
+ for {
+ rw, err := l.Accept()
+ if err != nil {
+ if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
+ continue
+ }
+ return err
+ }
+ m, err := reader.ReadTCP(rw, rtimeout)
+ srv.lock.RLock()
+ if !srv.started {
+ srv.lock.RUnlock()
+ return nil
+ }
+ srv.lock.RUnlock()
+ if err != nil {
+ continue
+ }
+ srv.inFlight.Add(1)
+ go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
+ }
+}
+
+// serveUDP starts a UDP listener for the server.
+// Each request is handled in a separate goroutine.
+func (srv *Server) serveUDP(l *net.UDPConn) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ handler := srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ rtimeout := srv.getReadTimeout()
+ // deadline is not used here
+ for {
+ m, s, err := reader.ReadUDP(l, rtimeout)
+ srv.lock.RLock()
+ if !srv.started {
+ srv.lock.RUnlock()
+ return nil
+ }
+ srv.lock.RUnlock()
+ if err != nil {
+ continue
+ }
+ srv.inFlight.Add(1)
+ go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
+ }
+}
+
+// Serve a new connection.
+func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
+ defer srv.inFlight.Done()
+
+ w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
+ if srv.DecorateWriter != nil {
+ w.writer = srv.DecorateWriter(w)
+ } else {
+ w.writer = w
+ }
+
+ q := 0 // counter for the amount of TCP queries we get
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+Redo:
+ req := new(Msg)
+ err := req.Unpack(m)
+ if err != nil { // Send a FormatError back
+ x := new(Msg)
+ x.SetRcodeFormatError(req)
+ w.WriteMsg(x)
+ goto Exit
+ }
+ if !srv.Unsafe && req.Response {
+ goto Exit
+ }
+
+ w.tsigStatus = nil
+ if w.tsigSecret != nil {
+ if t := req.IsTsig(); t != nil {
+ secret := t.Hdr.Name
+ if _, ok := w.tsigSecret[secret]; !ok {
+ w.tsigStatus = ErrKeyAlg
+ }
+ w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
+ w.tsigTimersOnly = false
+ w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
+ }
+ }
+ h.ServeDNS(w, req) // Writes back to the client
+
+Exit:
+ if w.tcp == nil {
+ return
+ }
+ // TODO(miek): make this number configurable?
+ if q > maxTCPQueries { // close socket after this many queries
+ w.Close()
+ return
+ }
+
+ if w.hijacked {
+ return // client calls Close()
+ }
+ if u != nil { // UDP, "close" and return
+ w.Close()
+ return
+ }
+ idleTimeout := tcpIdleTimeout
+ if srv.IdleTimeout != nil {
+ idleTimeout = srv.IdleTimeout()
+ }
+ m, err = reader.ReadTCP(w.tcp, idleTimeout)
+ if err == nil {
+ q++
+ goto Redo
+ }
+ w.Close()
+ return
+}
+
+func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ l := make([]byte, 2)
+ n, err := conn.Read(l)
+ if err != nil || n != 2 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrShortRead
+ }
+ length := binary.BigEndian.Uint16(l)
+ if length == 0 {
+ return nil, ErrShortRead
+ }
+ m := make([]byte, int(length))
+ n, err = conn.Read(m[:int(length)])
+ if err != nil || n == 0 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrShortRead
+ }
+ i := n
+ for i < int(length) {
+ j, err := conn.Read(m[i:int(length)])
+ if err != nil {
+ return nil, err
+ }
+ i += j
+ }
+ n = i
+ m = m[:n]
+ return m, nil
+}
+
+func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ m := make([]byte, srv.UDPSize)
+ n, s, err := ReadFromSessionUDP(conn, m)
+ if err != nil || n == 0 {
+ if err != nil {
+ return nil, nil, err
+ }
+ return nil, nil, ErrShortRead
+ }
+ m = m[:n]
+ return m, s, nil
+}
+
+// WriteMsg implements the ResponseWriter.WriteMsg method.
+func (w *response) WriteMsg(m *Msg) (err error) {
+ var data []byte
+ if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
+ if t := m.IsTsig(); t != nil {
+ data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+ }
+ }
+ data, err = m.Pack()
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+}
+
+// Write implements the ResponseWriter.Write method.
+func (w *response) Write(m []byte) (int, error) {
+ switch {
+ case w.udp != nil:
+ n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
+ return n, err
+ case w.tcp != nil:
+ lm := len(m)
+ if lm < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ if lm > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+ l := make([]byte, 2, 2+lm)
+ binary.BigEndian.PutUint16(l, uint16(lm))
+ m = append(l, m...)
+
+ n, err := io.Copy(w.tcp, bytes.NewReader(m))
+ return int(n), err
+ }
+ panic("not reached")
+}
+
+// LocalAddr implements the ResponseWriter.LocalAddr method.
+func (w *response) LocalAddr() net.Addr {
+ if w.tcp != nil {
+ return w.tcp.LocalAddr()
+ }
+ return w.udp.LocalAddr()
+}
+
+// RemoteAddr implements the ResponseWriter.RemoteAddr method.
+func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
+
+// TsigStatus implements the ResponseWriter.TsigStatus method.
+func (w *response) TsigStatus() error { return w.tsigStatus }
+
+// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
+func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
+
+// Hijack implements the ResponseWriter.Hijack method.
+func (w *response) Hijack() { w.hijacked = true }
+
+// Close implements the ResponseWriter.Close method
+func (w *response) Close() error {
+ // Can't close the udp conn, as that is actually the listener.
+ if w.tcp != nil {
+ e := w.tcp.Close()
+ w.tcp = nil
+ return e
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go
new file mode 100644
index 000000000..1b5cbc97e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/server_test.go
@@ -0,0 +1,679 @@
+package dns
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+)
+
+func HelloServer(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+
+ m.Extra = make([]RR, 1)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ w.WriteMsg(m)
+}
+
+func HelloServerBadId(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+ m.Id++
+
+ m.Extra = make([]RR, 1)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ w.WriteMsg(m)
+}
+
+func AnotherHelloServer(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+
+ m.Extra = make([]RR, 1)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}}
+ w.WriteMsg(m)
+}
+
+func RunLocalUDPServer(laddr string) (*Server, string, error) {
+ server, l, _, err := RunLocalUDPServerWithFinChan(laddr)
+
+ return server, l, err
+}
+
+func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) {
+ pc, err := net.ListenPacket("udp", laddr)
+ if err != nil {
+ return nil, "", nil, err
+ }
+ server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+
+ fin := make(chan struct{}, 0)
+
+ go func() {
+ server.ActivateAndServe()
+ close(fin)
+ pc.Close()
+ }()
+
+ waitLock.Lock()
+ return server, pc.LocalAddr().String(), fin, nil
+}
+
+func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) {
+ pc, err := net.ListenPacket("udp", laddr)
+ if err != nil {
+ return nil, "", err
+ }
+ server := &Server{PacketConn: pc, Unsafe: true,
+ ReadTimeout: time.Hour, WriteTimeout: time.Hour}
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+
+ go func() {
+ server.ActivateAndServe()
+ pc.Close()
+ }()
+
+ waitLock.Lock()
+ return server, pc.LocalAddr().String(), nil
+}
+
+func RunLocalTCPServer(laddr string) (*Server, string, error) {
+ l, err := net.Listen("tcp", laddr)
+ if err != nil {
+ return nil, "", err
+ }
+
+ server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+
+ go func() {
+ server.ActivateAndServe()
+ l.Close()
+ }()
+
+ waitLock.Lock()
+ return server, l.Addr().String(), nil
+}
+
+func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) {
+ l, err := tls.Listen("tcp", laddr, config)
+ if err != nil {
+ return nil, "", err
+ }
+
+ server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+
+ go func() {
+ server.ActivateAndServe()
+ l.Close()
+ }()
+
+ waitLock.Lock()
+ return server, l.Addr().String(), nil
+}
+
+func TestServing(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ HandleFunc("example.com.", AnotherHelloServer)
+ defer HandleRemove("miek.nl.")
+ defer HandleRemove("example.com.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil || len(r.Extra) == 0 {
+ t.Fatal("failed to exchange miek.nl", err)
+ }
+ txt := r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello world" {
+ t.Error("unexpected result for miek.nl", txt, "!= Hello world")
+ }
+
+ m.SetQuestion("example.com.", TypeTXT)
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatal("failed to exchange example.com", err)
+ }
+ txt = r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello example" {
+ t.Error("unexpected result for example.com", txt, "!= Hello example")
+ }
+
+ // Test Mixes cased as noticed by Ask.
+ m.SetQuestion("eXaMplE.cOm.", TypeTXT)
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Error("failed to exchange eXaMplE.cOm", err)
+ }
+ txt = r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello example" {
+ t.Error("unexpected result for example.com", txt, "!= Hello example")
+ }
+}
+
+func TestServingTLS(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ HandleFunc("example.com.", AnotherHelloServer)
+ defer HandleRemove("miek.nl.")
+ defer HandleRemove("example.com.")
+
+ cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock)
+ if err != nil {
+ t.Fatalf("unable to build certificate: %v", err)
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config)
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ c := new(Client)
+ c.Net = "tcp-tls"
+ c.TLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil || len(r.Extra) == 0 {
+ t.Fatal("failed to exchange miek.nl", err)
+ }
+ txt := r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello world" {
+ t.Error("unexpected result for miek.nl", txt, "!= Hello world")
+ }
+
+ m.SetQuestion("example.com.", TypeTXT)
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatal("failed to exchange example.com", err)
+ }
+ txt = r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello example" {
+ t.Error("unexpected result for example.com", txt, "!= Hello example")
+ }
+
+ // Test Mixes cased as noticed by Ask.
+ m.SetQuestion("eXaMplE.cOm.", TypeTXT)
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Error("failed to exchange eXaMplE.cOm", err)
+ }
+ txt = r.Extra[0].(*TXT).Txt[0]
+ if txt != "Hello example" {
+ t.Error("unexpected result for example.com", txt, "!= Hello example")
+ }
+}
+
+func BenchmarkServe(b *testing.B) {
+ b.StopTimer()
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+ a := runtime.GOMAXPROCS(4)
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl", TypeSOA)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ c.Exchange(m, addrstr)
+ }
+ runtime.GOMAXPROCS(a)
+}
+
+func benchmarkServe6(b *testing.B) {
+ b.StopTimer()
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+ a := runtime.GOMAXPROCS(4)
+ s, addrstr, err := RunLocalUDPServer("[::1]:0")
+ if err != nil {
+ b.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl", TypeSOA)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ c.Exchange(m, addrstr)
+ }
+ runtime.GOMAXPROCS(a)
+}
+
+func HelloServerCompress(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+ m.Extra = make([]RR, 1)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
+ m.Compress = true
+ w.WriteMsg(m)
+}
+
+func BenchmarkServeCompress(b *testing.B) {
+ b.StopTimer()
+ HandleFunc("miek.nl.", HelloServerCompress)
+ defer HandleRemove("miek.nl.")
+ a := runtime.GOMAXPROCS(4)
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl", TypeSOA)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ c.Exchange(m, addrstr)
+ }
+ runtime.GOMAXPROCS(a)
+}
+
+func TestDotAsCatchAllWildcard(t *testing.T) {
+ mux := NewServeMux()
+ mux.Handle(".", HandlerFunc(HelloServer))
+ mux.Handle("example.com.", HandlerFunc(AnotherHelloServer))
+
+ handler := mux.match("www.miek.nl.", TypeTXT)
+ if handler == nil {
+ t.Error("wildcard match failed")
+ }
+
+ handler = mux.match("www.example.com.", TypeTXT)
+ if handler == nil {
+ t.Error("example.com match failed")
+ }
+
+ handler = mux.match("a.www.example.com.", TypeTXT)
+ if handler == nil {
+ t.Error("a.www.example.com match failed")
+ }
+
+ handler = mux.match("boe.", TypeTXT)
+ if handler == nil {
+ t.Error("boe. match failed")
+ }
+}
+
+func TestCaseFolding(t *testing.T) {
+ mux := NewServeMux()
+ mux.Handle("_udp.example.com.", HandlerFunc(HelloServer))
+
+ handler := mux.match("_dns._udp.example.com.", TypeSRV)
+ if handler == nil {
+ t.Error("case sensitive characters folded")
+ }
+
+ handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV)
+ if handler == nil {
+ t.Error("case insensitive characters not folded")
+ }
+}
+
+func TestRootServer(t *testing.T) {
+ mux := NewServeMux()
+ mux.Handle(".", HandlerFunc(HelloServer))
+
+ handler := mux.match(".", TypeNS)
+ if handler == nil {
+ t.Error("root match failed")
+ }
+}
+
+type maxRec struct {
+ max int
+ sync.RWMutex
+}
+
+var M = new(maxRec)
+
+func HelloServerLargeResponse(resp ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+ m.Authoritative = true
+ m1 := 0
+ M.RLock()
+ m1 = M.max
+ M.RUnlock()
+ for i := 0; i < m1; i++ {
+ aRec := &A{
+ Hdr: RR_Header{
+ Name: req.Question[0].Name,
+ Rrtype: TypeA,
+ Class: ClassINET,
+ Ttl: 0,
+ },
+ A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(),
+ }
+ m.Answer = append(m.Answer, aRec)
+ }
+ resp.WriteMsg(m)
+}
+
+func TestServingLargeResponses(t *testing.T) {
+ HandleFunc("example.", HelloServerLargeResponse)
+ defer HandleRemove("example.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ // Create request
+ m := new(Msg)
+ m.SetQuestion("web.service.example.", TypeANY)
+
+ c := new(Client)
+ c.Net = "udp"
+ M.Lock()
+ M.max = 2
+ M.Unlock()
+ _, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ // This must fail
+ M.Lock()
+ M.max = 20
+ M.Unlock()
+ _, _, err = c.Exchange(m, addrstr)
+ if err == nil {
+ t.Error("failed to fail exchange, this should generate packet error")
+ }
+ // But this must work again
+ c.UDPSize = 7000
+ _, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+}
+
+func TestServingResponse(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ HandleFunc("miek.nl.", HelloServer)
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+ m.Response = false
+ _, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatal("failed to exchange", err)
+ }
+ m.Response = true
+ _, _, err = c.Exchange(m, addrstr)
+ if err == nil {
+ t.Fatal("exchanged response message")
+ }
+
+ s.Shutdown()
+ s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m.Response = true
+ _, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatal("could exchanged response message in Unsafe mode")
+ }
+}
+
+func TestShutdownTCP(t *testing.T) {
+ s, _, err := RunLocalTCPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ err = s.Shutdown()
+ if err != nil {
+ t.Errorf("could not shutdown test TCP server, %v", err)
+ }
+}
+
+func TestShutdownTLS(t *testing.T) {
+ cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock)
+ if err != nil {
+ t.Fatalf("unable to build certificate: %v", err)
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ s, _, err := RunLocalTLSServer("127.0.0.1:0", &config)
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ err = s.Shutdown()
+ if err != nil {
+ t.Errorf("could not shutdown test TLS server, %v", err)
+ }
+}
+
+type trigger struct {
+ done bool
+ sync.RWMutex
+}
+
+func (t *trigger) Set() {
+ t.Lock()
+ defer t.Unlock()
+ t.done = true
+}
+func (t *trigger) Get() bool {
+ t.RLock()
+ defer t.RUnlock()
+ return t.done
+}
+
+func TestHandlerCloseTCP(t *testing.T) {
+
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ panic(err)
+ }
+ addr := ln.Addr().String()
+
+ server := &Server{Addr: addr, Net: "tcp", Listener: ln}
+
+ hname := "testhandlerclosetcp."
+ triggered := &trigger{}
+ HandleFunc(hname, func(w ResponseWriter, r *Msg) {
+ triggered.Set()
+ w.Close()
+ })
+ defer HandleRemove(hname)
+
+ go func() {
+ defer server.Shutdown()
+ c := &Client{Net: "tcp"}
+ m := new(Msg).SetQuestion(hname, 1)
+ tries := 0
+ exchange:
+ _, _, err := c.Exchange(m, addr)
+ if err != nil && err != io.EOF {
+ t.Logf("exchange failed: %s\n", err)
+ if tries == 3 {
+ return
+ }
+ time.Sleep(time.Second / 10)
+ tries += 1
+ goto exchange
+ }
+ }()
+ server.ActivateAndServe()
+ if !triggered.Get() {
+ t.Fatalf("handler never called")
+ }
+}
+
+func TestShutdownUDP(t *testing.T) {
+ s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ err = s.Shutdown()
+ if err != nil {
+ t.Errorf("could not shutdown test UDP server, %v", err)
+ }
+ select {
+ case <-fin:
+ case <-time.After(2 * time.Second):
+ t.Error("Could not shutdown test UDP server. Gave up waiting")
+ }
+}
+
+type ExampleFrameLengthWriter struct {
+ Writer
+}
+
+func (e *ExampleFrameLengthWriter) Write(m []byte) (int, error) {
+ fmt.Println("writing raw DNS message of length", len(m))
+ return e.Writer.Write(m)
+}
+
+func ExampleDecorateWriter() {
+ // instrument raw DNS message writing
+ wf := DecorateWriter(func(w Writer) Writer {
+ return &ExampleFrameLengthWriter{w}
+ })
+
+ // simple UDP server
+ pc, err := net.ListenPacket("udp", "127.0.0.1:0")
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ server := &Server{
+ PacketConn: pc,
+ DecorateWriter: wf,
+ ReadTimeout: time.Hour, WriteTimeout: time.Hour,
+ }
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+ defer server.Shutdown()
+
+ go func() {
+ server.ActivateAndServe()
+ pc.Close()
+ }()
+
+ waitLock.Lock()
+
+ HandleFunc("miek.nl.", HelloServer)
+
+ c := new(Client)
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeTXT)
+ _, _, err = c.Exchange(m, pc.LocalAddr().String())
+ if err != nil {
+ fmt.Println("failed to exchange", err.Error())
+ return
+ }
+ // Output: writing raw DNS message of length 56
+}
+
+var (
+ // CertPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair)
+ CertPEMBlock = []byte(`-----BEGIN CERTIFICATE-----
+MIIDAzCCAeugAwIBAgIRAJFYMkcn+b8dpU15wjf++GgwDQYJKoZIhvcNAQELBQAw
+EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjAxMDgxMjAzNTNaFw0xNzAxMDcxMjAz
+NTNaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDXjqO6skvP03k58CNjQggd9G/mt+Wa+xRU+WXiKCCHttawM8x+slq5
+yfsHCwxlwsGn79HmJqecNqgHb2GWBXAvVVokFDTcC1hUP4+gp2gu9Ny27UHTjlLm
+O0l/xZ5MN8tfKyYlFw18tXu3fkaPyHj8v/D1RDkuo4ARdFvGSe8TqisbhLk2+9ow
+xfIGbEM9Fdiw8qByC2+d+FfvzIKz3GfQVwn0VoRom8L6NBIANq1IGrB5JefZB6nv
+DnfuxkBmY7F1513HKuEJ8KsLWWZWV9OPU4j4I4Rt+WJNlKjbD2srHxyrS2RDsr91
+8nCkNoWVNO3sZq0XkWKecdc921vL4ginAgMBAAGjVDBSMA4GA1UdDwEB/wQEAwIC
+pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBoGA1UdEQQT
+MBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAGcU3iyLBIVZj
+aDzSvEDHUd1bnLBl1C58Xu/CyKlPqVU7mLfK0JcgEaYQTSX6fCJVNLbbCrcGLsPJ
+fbjlBbyeLjTV413fxPVuona62pBFjqdtbli2Qe8FRH2KBdm41JUJGdo+SdsFu7nc
+BFOcubdw6LLIXvsTvwndKcHWx1rMX709QU1Vn1GAIsbJV/DWI231Jyyb+lxAUx/C
+8vce5uVxiKcGS+g6OjsN3D3TtiEQGSXLh013W6Wsih8td8yMCMZ3w8LQ38br1GUe
+ahLIgUJ9l6HDguM17R7kGqxNvbElsMUHfTtXXP7UDQUiYXDakg8xDP6n9DCDhJ8Y
+bSt7OLB7NQ==
+-----END CERTIFICATE-----`)
+
+ // KeyPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair)
+ KeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA146jurJLz9N5OfAjY0IIHfRv5rflmvsUVPll4iggh7bWsDPM
+frJaucn7BwsMZcLBp+/R5iannDaoB29hlgVwL1VaJBQ03AtYVD+PoKdoLvTctu1B
+045S5jtJf8WeTDfLXysmJRcNfLV7t35Gj8h4/L/w9UQ5LqOAEXRbxknvE6orG4S5
+NvvaMMXyBmxDPRXYsPKgcgtvnfhX78yCs9xn0FcJ9FaEaJvC+jQSADatSBqweSXn
+2Qep7w537sZAZmOxdeddxyrhCfCrC1lmVlfTj1OI+COEbfliTZSo2w9rKx8cq0tk
+Q7K/dfJwpDaFlTTt7GatF5FinnHXPdtby+IIpwIDAQABAoIBAAJK4RDmPooqTJrC
+JA41MJLo+5uvjwCT9QZmVKAQHzByUFw1YNJkITTiognUI0CdzqNzmH7jIFs39ZeG
+proKusO2G6xQjrNcZ4cV2fgyb5g4QHStl0qhs94A+WojduiGm2IaumAgm6Mc5wDv
+ld6HmknN3Mku/ZCyanVFEIjOVn2WB7ZQLTBs6ZYaebTJG2Xv6p9t2YJW7pPQ9Xce
+s9ohAWohyM4X/OvfnfnLtQp2YLw/BxwehBsCR5SXM3ibTKpFNtxJC8hIfTuWtxZu
+2ywrmXShYBRB1WgtZt5k04bY/HFncvvcHK3YfI1+w4URKtwdaQgPUQRbVwDwuyBn
+flfkCJECgYEA/eWt01iEyE/lXkGn6V9lCocUU7lCU6yk5UT8VXVUc5If4KZKPfCk
+p4zJDOqwn2eM673aWz/mG9mtvAvmnugaGjcaVCyXOp/D/GDmKSoYcvW5B/yjfkLy
+dK6Yaa5LDRVYlYgyzcdCT5/9Qc626NzFwKCZNI4ncIU8g7ViATRxWJ8CgYEA2Ver
+vZ0M606sfgC0H3NtwNBxmuJ+lIF5LNp/wDi07lDfxRR1rnZMX5dnxjcpDr/zvm8J
+WtJJX3xMgqjtHuWKL3yKKony9J5ZPjichSbSbhrzfovgYIRZLxLLDy4MP9L3+CX/
+yBXnqMWuSnFX+M5fVGxdDWiYF3V+wmeOv9JvavkCgYEAiXAPDFzaY+R78O3xiu7M
+r0o3wqqCMPE/wav6O/hrYrQy9VSO08C0IM6g9pEEUwWmzuXSkZqhYWoQFb8Lc/GI
+T7CMXAxXQLDDUpbRgG79FR3Wr3AewHZU8LyiXHKwxcBMV4WGmsXGK3wbh8fyU1NO
+6NsGk+BvkQVOoK1LBAPzZ1kCgYEAsBSmD8U33T9s4dxiEYTrqyV0lH3g/SFz8ZHH
+pAyNEPI2iC1ONhyjPWKlcWHpAokiyOqeUpVBWnmSZtzC1qAydsxYB6ShT+sl9BHb
+RMix/QAauzBJhQhUVJ3OIys0Q1UBDmqCsjCE8SfOT4NKOUnA093C+YT+iyrmmktZ
+zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb
+7+a1OMhLwWbuz+pl64wFKrbSUyimMOYQpjVE/1vk/kb99pxbgol27hdKyTH1d+ov
+kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA=
+-----END RSA PRIVATE KEY-----`)
+)
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
new file mode 100644
index 000000000..2dce06af8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -0,0 +1,219 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/binary"
+ "math/big"
+ "strings"
+ "time"
+)
+
+// Sign signs a dns.Msg. It fills the signature with the appropriate data.
+// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
+// and Expiration set.
+func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
+ if k == nil {
+ return nil, ErrPrivKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return nil, ErrKey
+ }
+ rr.Header().Rrtype = TypeSIG
+ rr.Header().Class = ClassANY
+ rr.Header().Ttl = 0
+ rr.Header().Name = "."
+ rr.OrigTtl = 0
+ rr.TypeCovered = 0
+ rr.Labels = 0
+
+ buf := make([]byte, m.Len()+rr.len())
+ mbuf, err := m.PackBuffer(buf)
+ if err != nil {
+ return nil, err
+ }
+ if &buf[0] != &mbuf[0] {
+ return nil, ErrBuf
+ }
+ off, err := PackRR(rr, buf, len(mbuf), nil, false)
+ if err != nil {
+ return nil, err
+ }
+ buf = buf[:off:cap(buf)]
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return nil, ErrAlg
+ }
+
+ hasher := hash.New()
+ // Write SIG rdata
+ hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
+ // Write message
+ hasher.Write(buf[:len(mbuf)])
+
+ signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ rr.Signature = toBase64(signature)
+ sig := string(signature)
+
+ buf = append(buf, sig...)
+ if len(buf) > int(^uint16(0)) {
+ return nil, ErrBuf
+ }
+ // Adjust sig data length
+ rdoff := len(mbuf) + 1 + 2 + 2 + 4
+ rdlen := binary.BigEndian.Uint16(buf[rdoff:])
+ rdlen += uint16(len(sig))
+ binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
+ // Adjust additional count
+ adc := binary.BigEndian.Uint16(buf[10:])
+ adc++
+ binary.BigEndian.PutUint16(buf[10:], adc)
+ return buf, nil
+}
+
+// Verify validates the message buf using the key k.
+// It's assumed that buf is a valid message from which rr was unpacked.
+func (rr *SIG) Verify(k *KEY, buf []byte) error {
+ if k == nil {
+ return ErrKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ var hash crypto.Hash
+ switch rr.Algorithm {
+ case DSA, RSASHA1:
+ hash = crypto.SHA1
+ case RSASHA256, ECDSAP256SHA256:
+ hash = crypto.SHA256
+ case ECDSAP384SHA384:
+ hash = crypto.SHA384
+ case RSASHA512:
+ hash = crypto.SHA512
+ default:
+ return ErrAlg
+ }
+ hasher := hash.New()
+
+ buflen := len(buf)
+ qdc := binary.BigEndian.Uint16(buf[4:])
+ anc := binary.BigEndian.Uint16(buf[6:])
+ auc := binary.BigEndian.Uint16(buf[8:])
+ adc := binary.BigEndian.Uint16(buf[10:])
+ offset := 12
+ var err error
+ for i := uint16(0); i < qdc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type and Class
+ offset += 2 + 2
+ }
+ for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type, Class and TTL
+ offset += 2 + 2 + 4
+ if offset+1 >= buflen {
+ continue
+ }
+ var rdlen uint16
+ rdlen = binary.BigEndian.Uint16(buf[offset:])
+ offset += 2
+ offset += int(rdlen)
+ }
+ if offset >= buflen {
+ return &Error{err: "overflowing unpacking signed message"}
+ }
+
+ // offset should be just prior to SIG
+ bodyend := offset
+ // owner name SHOULD be root
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip Type, Class, TTL, RDLen
+ offset += 2 + 2 + 4 + 2
+ sigstart := offset
+ // Skip Type Covered, Algorithm, Labels, Original TTL
+ offset += 2 + 1 + 1 + 4
+ if offset+4+4 >= buflen {
+ return &Error{err: "overflow unpacking signed message"}
+ }
+ expire := binary.BigEndian.Uint32(buf[offset:])
+ offset += 4
+ incept := binary.BigEndian.Uint32(buf[offset:])
+ offset += 4
+ now := uint32(time.Now().Unix())
+ if now < incept || now > expire {
+ return ErrTime
+ }
+ // Skip key tag
+ offset += 2
+ var signername string
+ signername, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // If key has come from the DNS name compression might
+ // have mangled the case of the name
+ if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
+ return &Error{err: "signer name doesn't match key name"}
+ }
+ sigend := offset
+ hasher.Write(buf[sigstart:sigend])
+ hasher.Write(buf[:10])
+ hasher.Write([]byte{
+ byte((adc - 1) << 8),
+ byte(adc - 1),
+ })
+ hasher.Write(buf[12:bodyend])
+
+ hashed := hasher.Sum(nil)
+ sig := buf[sigend:]
+ switch k.Algorithm {
+ case DSA:
+ pk := k.publicKeyDSA()
+ sig = sig[1:]
+ r := big.NewInt(0)
+ r.SetBytes(sig[:len(sig)/2])
+ s := big.NewInt(0)
+ s.SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if dsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ case RSASHA1, RSASHA256, RSASHA512:
+ pk := k.publicKeyRSA()
+ if pk != nil {
+ return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
+ }
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pk := k.publicKeyECDSA()
+ r := big.NewInt(0)
+ r.SetBytes(sig[:len(sig)/2])
+ s := big.NewInt(0)
+ s.SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if ecdsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ }
+ return ErrKeyAlg
+}
diff --git a/vendor/github.com/miekg/dns/sig0_test.go b/vendor/github.com/miekg/dns/sig0_test.go
new file mode 100644
index 000000000..122de6a8e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sig0_test.go
@@ -0,0 +1,89 @@
+package dns
+
+import (
+ "crypto"
+ "testing"
+ "time"
+)
+
+func TestSIG0(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ m := new(Msg)
+ m.SetQuestion("example.org.", TypeSOA)
+ for _, alg := range []uint8{ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} {
+ algstr := AlgorithmToString[alg]
+ keyrr := new(KEY)
+ keyrr.Hdr.Name = algstr + "."
+ keyrr.Hdr.Rrtype = TypeKEY
+ keyrr.Hdr.Class = ClassINET
+ keyrr.Algorithm = alg
+ keysize := 1024
+ switch alg {
+ case ECDSAP256SHA256:
+ keysize = 256
+ case ECDSAP384SHA384:
+ keysize = 384
+ }
+ pk, err := keyrr.Generate(keysize)
+ if err != nil {
+ t.Errorf("failed to generate key for “%sâ€: %v", algstr, err)
+ continue
+ }
+ now := uint32(time.Now().Unix())
+ sigrr := new(SIG)
+ sigrr.Hdr.Name = "."
+ sigrr.Hdr.Rrtype = TypeSIG
+ sigrr.Hdr.Class = ClassANY
+ sigrr.Algorithm = alg
+ sigrr.Expiration = now + 300
+ sigrr.Inception = now - 300
+ sigrr.KeyTag = keyrr.KeyTag()
+ sigrr.SignerName = keyrr.Hdr.Name
+ mb, err := sigrr.Sign(pk.(crypto.Signer), m)
+ if err != nil {
+ t.Errorf("failed to sign message using “%sâ€: %v", algstr, err)
+ continue
+ }
+ m := new(Msg)
+ if err := m.Unpack(mb); err != nil {
+ t.Errorf("failed to unpack message signed using “%sâ€: %v", algstr, err)
+ continue
+ }
+ if len(m.Extra) != 1 {
+ t.Errorf("missing SIG for message signed using “%sâ€", algstr)
+ continue
+ }
+ var sigrrwire *SIG
+ switch rr := m.Extra[0].(type) {
+ case *SIG:
+ sigrrwire = rr
+ default:
+ t.Errorf("expected SIG RR, instead: %v", rr)
+ continue
+ }
+ for _, rr := range []*SIG{sigrr, sigrrwire} {
+ id := "sigrr"
+ if rr == sigrrwire {
+ id = "sigrrwire"
+ }
+ if err := rr.Verify(keyrr, mb); err != nil {
+ t.Errorf("failed to verify “%s†signed SIG(%s): %v", algstr, id, err)
+ continue
+ }
+ }
+ mb[13]++
+ if err := sigrr.Verify(keyrr, mb); err == nil {
+ t.Errorf("verify succeeded on an altered message using “%sâ€", algstr)
+ continue
+ }
+ sigrr.Expiration = 2
+ sigrr.Inception = 1
+ mb, _ = sigrr.Sign(pk.(crypto.Signer), m)
+ if err := sigrr.Verify(keyrr, mb); err == nil {
+ t.Errorf("verify succeeded on an expired message using “%sâ€", algstr)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go
new file mode 100644
index 000000000..9573c7d0b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/singleinflight.go
@@ -0,0 +1,57 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Adapted for dns package usage by Miek Gieben.
+
+package dns
+
+import "sync"
+import "time"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+ val *Msg
+ rtt time.Duration
+ err error
+ dups int
+}
+
+// singleflight represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type singleflight struct {
+ sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
+ g.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.Unlock()
+ c.wg.Wait()
+ return c.val, c.rtt, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.Unlock()
+
+ c.val, c.rtt, c.err = fn()
+ c.wg.Done()
+
+ g.Lock()
+ delete(g.m, key)
+ g.Unlock()
+
+ return c.val, c.rtt, c.err, c.dups > 0
+}
diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go
new file mode 100644
index 000000000..34fe6615a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tlsa.go
@@ -0,0 +1,86 @@
+package dns
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
+func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
+ switch matchingType {
+ case 0:
+ switch selector {
+ case 0:
+ return hex.EncodeToString(cert.Raw), nil
+ case 1:
+ return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
+ }
+ case 1:
+ h := sha256.New()
+ switch selector {
+ case 0:
+ io.WriteString(h, string(cert.Raw))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ case 2:
+ h := sha512.New()
+ switch selector {
+ case 0:
+ io.WriteString(h, string(cert.Raw))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ }
+ return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
+}
+
+// Sign creates a TLSA record from an SSL certificate.
+func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
+ r.Hdr.Rrtype = TypeTLSA
+ r.Usage = uint8(usage)
+ r.Selector = uint8(selector)
+ r.MatchingType = uint8(matchingType)
+
+ r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Verify verifies a TLSA record against an SSL certificate. If it is OK
+// a nil error is returned.
+func (r *TLSA) Verify(cert *x509.Certificate) error {
+ c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err // Not also ErrSig?
+ }
+ if r.Certificate == c {
+ return nil
+ }
+ return ErrSig // ErrSig, really?
+}
+
+// TLSAName returns the ownername of a TLSA resource record as per the
+// rules specified in RFC 6698, Section 3.
+func TLSAName(name, service, network string) (string, error) {
+ if !IsFqdn(name) {
+ return "", ErrFqdn
+ }
+ p, err := net.LookupPort(network, service)
+ if err != nil {
+ return "", err
+ }
+ return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
+}
diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
new file mode 100644
index 000000000..78365e1c5
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tsig.go
@@ -0,0 +1,384 @@
+package dns
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/binary"
+ "encoding/hex"
+ "hash"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// HMAC hashing codes. These are transmitted as domain names.
+const (
+ HmacMD5 = "hmac-md5.sig-alg.reg.int."
+ HmacSHA1 = "hmac-sha1."
+ HmacSHA256 = "hmac-sha256."
+ HmacSHA512 = "hmac-sha512."
+)
+
+// TSIG is the RR the holds the transaction signature of a message.
+// See RFC 2845 and RFC 4635.
+type TSIG struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ MACSize uint16
+ MAC string `dns:"size-hex:MACSize"`
+ OrigId uint16
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex:OtherLen"`
+}
+
+// TSIG has no official presentation format, but this will suffice.
+
+func (rr *TSIG) String() string {
+ s := "\n;; TSIG PSEUDOSECTION:\n"
+ s += rr.Hdr.String() +
+ " " + rr.Algorithm +
+ " " + tsigTimeToString(rr.TimeSigned) +
+ " " + strconv.Itoa(int(rr.Fudge)) +
+ " " + strconv.Itoa(int(rr.MACSize)) +
+ " " + strings.ToUpper(rr.MAC) +
+ " " + strconv.Itoa(int(rr.OrigId)) +
+ " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
+ " " + strconv.Itoa(int(rr.OtherLen)) +
+ " " + rr.OtherData
+ return s
+}
+
+// The following values must be put in wireformat, so that the MAC can be calculated.
+// RFC 2845, section 3.4.2. TSIG Variables.
+type tsigWireFmt struct {
+ // From RR_Header
+ Name string `dns:"domain-name"`
+ Class uint16
+ Ttl uint32
+ // Rdata of the TSIG
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ // MACSize, MAC and OrigId excluded
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex:OtherLen"`
+}
+
+// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
+type macWireFmt struct {
+ MACSize uint16
+ MAC string `dns:"size-hex:MACSize"`
+}
+
+// 3.3. Time values used in TSIG calculations
+type timerWireFmt struct {
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+}
+
+// TsigGenerate fills out the TSIG record attached to the message.
+// The message should contain
+// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
+// time fudge (defaults to 300 seconds) and the current time
+// The TSIG MAC is saved in that Tsig RR.
+// When TsigGenerate is called for the first time requestMAC is set to the empty string and
+// timersOnly is false.
+// If something goes wrong an error is returned, otherwise it is nil.
+func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
+ if m.IsTsig() == nil {
+ panic("dns: TSIG not last RR in additional")
+ }
+ // If we barf here, the caller is to blame
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return nil, "", err
+ }
+
+ rr := m.Extra[len(m.Extra)-1].(*TSIG)
+ m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
+ mbuf, err := m.Pack()
+ if err != nil {
+ return nil, "", err
+ }
+ buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
+
+ t := new(TSIG)
+ var h hash.Hash
+ switch strings.ToLower(rr.Algorithm) {
+ case HmacMD5:
+ h = hmac.New(md5.New, []byte(rawsecret))
+ case HmacSHA1:
+ h = hmac.New(sha1.New, []byte(rawsecret))
+ case HmacSHA256:
+ h = hmac.New(sha256.New, []byte(rawsecret))
+ case HmacSHA512:
+ h = hmac.New(sha512.New, []byte(rawsecret))
+ default:
+ return nil, "", ErrKeyAlg
+ }
+ io.WriteString(h, string(buf))
+ t.MAC = hex.EncodeToString(h.Sum(nil))
+ t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
+
+ t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
+ t.Fudge = rr.Fudge
+ t.TimeSigned = rr.TimeSigned
+ t.Algorithm = rr.Algorithm
+ t.OrigId = m.Id
+
+ tbuf := make([]byte, t.len())
+ if off, err := PackRR(t, tbuf, 0, nil, false); err == nil {
+ tbuf = tbuf[:off] // reset to actual size used
+ } else {
+ return nil, "", err
+ }
+ mbuf = append(mbuf, tbuf...)
+ // Update the ArCount directly in the buffer.
+ binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
+
+ return mbuf, t.MAC, nil
+}
+
+// TsigVerify verifies the TSIG on a message.
+// If the signature does not validate err contains the
+// error, otherwise it is nil.
+func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return err
+ }
+ // Strip the TSIG from the incoming msg
+ stripped, tsig, err := stripTsig(msg)
+ if err != nil {
+ return err
+ }
+
+ msgMAC, err := hex.DecodeString(tsig.MAC)
+ if err != nil {
+ return err
+ }
+
+ buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
+
+ // Fudge factor works both ways. A message can arrive before it was signed because
+ // of clock skew.
+ now := uint64(time.Now().Unix())
+ ti := now - tsig.TimeSigned
+ if now < tsig.TimeSigned {
+ ti = tsig.TimeSigned - now
+ }
+ if uint64(tsig.Fudge) < ti {
+ return ErrTime
+ }
+
+ var h hash.Hash
+ switch strings.ToLower(tsig.Algorithm) {
+ case HmacMD5:
+ h = hmac.New(md5.New, rawsecret)
+ case HmacSHA1:
+ h = hmac.New(sha1.New, rawsecret)
+ case HmacSHA256:
+ h = hmac.New(sha256.New, rawsecret)
+ case HmacSHA512:
+ h = hmac.New(sha512.New, rawsecret)
+ default:
+ return ErrKeyAlg
+ }
+ h.Write(buf)
+ if !hmac.Equal(h.Sum(nil), msgMAC) {
+ return ErrSig
+ }
+ return nil
+}
+
+// Create a wiredata buffer for the MAC calculation.
+func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
+ var buf []byte
+ if rr.TimeSigned == 0 {
+ rr.TimeSigned = uint64(time.Now().Unix())
+ }
+ if rr.Fudge == 0 {
+ rr.Fudge = 300 // Standard (RFC) default.
+ }
+
+ if requestMAC != "" {
+ m := new(macWireFmt)
+ m.MACSize = uint16(len(requestMAC) / 2)
+ m.MAC = requestMAC
+ buf = make([]byte, len(requestMAC)) // long enough
+ n, _ := packMacWire(m, buf)
+ buf = buf[:n]
+ }
+
+ tsigvar := make([]byte, DefaultMsgSize)
+ if timersOnly {
+ tsig := new(timerWireFmt)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ n, _ := packTimerWire(tsig, tsigvar)
+ tsigvar = tsigvar[:n]
+ } else {
+ tsig := new(tsigWireFmt)
+ tsig.Name = strings.ToLower(rr.Hdr.Name)
+ tsig.Class = ClassANY
+ tsig.Ttl = rr.Hdr.Ttl
+ tsig.Algorithm = strings.ToLower(rr.Algorithm)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ tsig.Error = rr.Error
+ tsig.OtherLen = rr.OtherLen
+ tsig.OtherData = rr.OtherData
+ n, _ := packTsigWire(tsig, tsigvar)
+ tsigvar = tsigvar[:n]
+ }
+
+ if requestMAC != "" {
+ x := append(buf, msgbuf...)
+ buf = append(x, tsigvar...)
+ } else {
+ buf = append(msgbuf, tsigvar...)
+ }
+ return buf
+}
+
+// Strip the TSIG from the raw message.
+func stripTsig(msg []byte) ([]byte, *TSIG, error) {
+ // Copied from msg.go's Unpack() Header, but modified.
+ var (
+ dh Header
+ err error
+ )
+ off, tsigoff := 0, 0
+
+ if dh, off, err = unpackMsgHdr(msg, off); err != nil {
+ return nil, nil, err
+ }
+ if dh.Arcount == 0 {
+ return nil, nil, ErrNoSig
+ }
+
+ // Rcode, see msg.go Unpack()
+ if int(dh.Bits&0xF) == RcodeNotAuth {
+ return nil, nil, ErrAuth
+ }
+
+ for i := 0; i < int(dh.Qdcount); i++ {
+ _, off, err = unpackQuestion(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ _, off, err = unpackRRslice(int(dh.Ancount), msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ _, off, err = unpackRRslice(int(dh.Nscount), msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ rr := new(TSIG)
+ var extra RR
+ for i := 0; i < int(dh.Arcount); i++ {
+ tsigoff = off
+ extra, off, err = UnpackRR(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ if extra.Header().Rrtype == TypeTSIG {
+ rr = extra.(*TSIG)
+ // Adjust Arcount.
+ arcount := binary.BigEndian.Uint16(msg[10:])
+ binary.BigEndian.PutUint16(msg[10:], arcount-1)
+ break
+ }
+ }
+ if rr == nil {
+ return nil, nil, ErrNoSig
+ }
+ return msg[:tsigoff], rr, nil
+}
+
+// Translate the TSIG time signed into a date. There is no
+// need for RFC1982 calculations as this date is 48 bits.
+func tsigTimeToString(t uint64) string {
+ ti := time.Unix(int64(t), 0).UTC()
+ return ti.Format("20060102150405")
+}
+
+func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go TSIG packing
+ // RR_Header
+ off, err := PackDomainName(tw.Name, msg, 0, nil, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Class, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(tw.Ttl, msg, off)
+ if err != nil {
+ return off, err
+ }
+
+ off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint48(tw.TimeSigned, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+
+ off, err = packUint16(tw.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(tw.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
+ off, err := packUint16(mw.MACSize, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(mw.MAC, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
+ off, err := packUint48(tw.TimeSigned, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/miekg/dns/tsig_test.go
new file mode 100644
index 000000000..48b9988b6
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tsig_test.go
@@ -0,0 +1,37 @@
+package dns
+
+import (
+ "testing"
+ "time"
+)
+
+func newTsig(algo string) *Msg {
+ m := new(Msg)
+ m.SetQuestion("example.org.", TypeA)
+ m.SetTsig("example.", algo, 300, time.Now().Unix())
+ return m
+}
+
+func TestTsig(t *testing.T) {
+ m := newTsig(HmacMD5)
+ buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestTsigCase(t *testing.T) {
+ m := newTsig("HmAc-mD5.sig-ALg.rEg.int.") // HmacMD5
+ buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
new file mode 100644
index 000000000..5059d1a79
--- /dev/null
+++ b/vendor/github.com/miekg/dns/types.go
@@ -0,0 +1,1249 @@
+package dns
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type (
+ // Type is a DNS type.
+ Type uint16
+ // Class is a DNS class.
+ Class uint16
+ // Name is a DNS domain name.
+ Name string
+)
+
+// Packet formats
+
+// Wire constants and supported types.
+const (
+ // valid RR_Header.Rrtype and Question.qtype
+
+ TypeNone uint16 = 0
+ TypeA uint16 = 1
+ TypeNS uint16 = 2
+ TypeMD uint16 = 3
+ TypeMF uint16 = 4
+ TypeCNAME uint16 = 5
+ TypeSOA uint16 = 6
+ TypeMB uint16 = 7
+ TypeMG uint16 = 8
+ TypeMR uint16 = 9
+ TypeNULL uint16 = 10
+ TypePTR uint16 = 12
+ TypeHINFO uint16 = 13
+ TypeMINFO uint16 = 14
+ TypeMX uint16 = 15
+ TypeTXT uint16 = 16
+ TypeRP uint16 = 17
+ TypeAFSDB uint16 = 18
+ TypeX25 uint16 = 19
+ TypeISDN uint16 = 20
+ TypeRT uint16 = 21
+ TypeNSAPPTR uint16 = 23
+ TypeSIG uint16 = 24
+ TypeKEY uint16 = 25
+ TypePX uint16 = 26
+ TypeGPOS uint16 = 27
+ TypeAAAA uint16 = 28
+ TypeLOC uint16 = 29
+ TypeNXT uint16 = 30
+ TypeEID uint16 = 31
+ TypeNIMLOC uint16 = 32
+ TypeSRV uint16 = 33
+ TypeATMA uint16 = 34
+ TypeNAPTR uint16 = 35
+ TypeKX uint16 = 36
+ TypeCERT uint16 = 37
+ TypeDNAME uint16 = 39
+ TypeOPT uint16 = 41 // EDNS
+ TypeDS uint16 = 43
+ TypeSSHFP uint16 = 44
+ TypeRRSIG uint16 = 46
+ TypeNSEC uint16 = 47
+ TypeDNSKEY uint16 = 48
+ TypeDHCID uint16 = 49
+ TypeNSEC3 uint16 = 50
+ TypeNSEC3PARAM uint16 = 51
+ TypeTLSA uint16 = 52
+ TypeHIP uint16 = 55
+ TypeNINFO uint16 = 56
+ TypeRKEY uint16 = 57
+ TypeTALINK uint16 = 58
+ TypeCDS uint16 = 59
+ TypeCDNSKEY uint16 = 60
+ TypeOPENPGPKEY uint16 = 61
+ TypeSPF uint16 = 99
+ TypeUINFO uint16 = 100
+ TypeUID uint16 = 101
+ TypeGID uint16 = 102
+ TypeUNSPEC uint16 = 103
+ TypeNID uint16 = 104
+ TypeL32 uint16 = 105
+ TypeL64 uint16 = 106
+ TypeLP uint16 = 107
+ TypeEUI48 uint16 = 108
+ TypeEUI64 uint16 = 109
+ TypeURI uint16 = 256
+ TypeCAA uint16 = 257
+
+ TypeTKEY uint16 = 249
+ TypeTSIG uint16 = 250
+
+ // valid Question.Qtype only
+ TypeIXFR uint16 = 251
+ TypeAXFR uint16 = 252
+ TypeMAILB uint16 = 253
+ TypeMAILA uint16 = 254
+ TypeANY uint16 = 255
+
+ TypeTA uint16 = 32768
+ TypeDLV uint16 = 32769
+ TypeReserved uint16 = 65535
+
+ // valid Question.Qclass
+ ClassINET = 1
+ ClassCSNET = 2
+ ClassCHAOS = 3
+ ClassHESIOD = 4
+ ClassNONE = 254
+ ClassANY = 255
+
+ // Message Response Codes.
+ RcodeSuccess = 0
+ RcodeFormatError = 1
+ RcodeServerFailure = 2
+ RcodeNameError = 3
+ RcodeNotImplemented = 4
+ RcodeRefused = 5
+ RcodeYXDomain = 6
+ RcodeYXRrset = 7
+ RcodeNXRrset = 8
+ RcodeNotAuth = 9
+ RcodeNotZone = 10
+ RcodeBadSig = 16 // TSIG
+ RcodeBadVers = 16 // EDNS0
+ RcodeBadKey = 17
+ RcodeBadTime = 18
+ RcodeBadMode = 19 // TKEY
+ RcodeBadName = 20
+ RcodeBadAlg = 21
+ RcodeBadTrunc = 22 // TSIG
+ RcodeBadCookie = 23 // DNS Cookies
+
+ // Message Opcodes. There is no 3.
+ OpcodeQuery = 0
+ OpcodeIQuery = 1
+ OpcodeStatus = 2
+ OpcodeNotify = 4
+ OpcodeUpdate = 5
+)
+
+// Headers is the wire format for the DNS packet header.
+type Header struct {
+ Id uint16
+ Bits uint16
+ Qdcount, Ancount, Nscount, Arcount uint16
+}
+
+const (
+ headerSize = 12
+
+ // Header.Bits
+ _QR = 1 << 15 // query/response (response=1)
+ _AA = 1 << 10 // authoritative
+ _TC = 1 << 9 // truncated
+ _RD = 1 << 8 // recursion desired
+ _RA = 1 << 7 // recursion available
+ _Z = 1 << 6 // Z
+ _AD = 1 << 5 // authticated data
+ _CD = 1 << 4 // checking disabled
+
+ LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
+ LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
+
+ LOC_HOURS = 60 * 1000
+ LOC_DEGREES = 60 * LOC_HOURS
+
+ LOC_ALTITUDEBASE = 100000
+)
+
+// Different Certificate Types, see RFC 4398, Section 2.1
+const (
+ CertPKIX = 1 + iota
+ CertSPKI
+ CertPGP
+ CertIPIX
+ CertISPKI
+ CertIPGP
+ CertACPKIX
+ CertIACPKIX
+ CertURI = 253
+ CertOID = 254
+)
+
+// CertTypeToString converts the Cert Type to its string representation.
+// See RFC 4398 and RFC 6944.
+var CertTypeToString = map[uint16]string{
+ CertPKIX: "PKIX",
+ CertSPKI: "SPKI",
+ CertPGP: "PGP",
+ CertIPIX: "IPIX",
+ CertISPKI: "ISPKI",
+ CertIPGP: "IPGP",
+ CertACPKIX: "ACPKIX",
+ CertIACPKIX: "IACPKIX",
+ CertURI: "URI",
+ CertOID: "OID",
+}
+
+// StringToCertType is the reverseof CertTypeToString.
+var StringToCertType = reverseInt16(CertTypeToString)
+
+//go:generate go run types_generate.go
+
+// Question holds a DNS question. There can be multiple questions in the
+// question section of a message. Usually there is just one.
+type Question struct {
+ Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
+ Qtype uint16
+ Qclass uint16
+}
+
+func (q *Question) len() int {
+ return len(q.Name) + 1 + 2 + 2
+}
+
+func (q *Question) String() (s string) {
+ // prefix with ; (as in dig)
+ s = ";" + sprintName(q.Name) + "\t"
+ s += Class(q.Qclass).String() + "\t"
+ s += " " + Type(q.Qtype).String()
+ return s
+}
+
+// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY
+// is named "*" there.
+type ANY struct {
+ Hdr RR_Header
+ // Does not have any rdata
+}
+
+func (rr *ANY) String() string { return rr.Hdr.String() }
+
+type CNAME struct {
+ Hdr RR_Header
+ Target string `dns:"cdomain-name"`
+}
+
+func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) }
+
+type HINFO struct {
+ Hdr RR_Header
+ Cpu string
+ Os string
+}
+
+func (rr *HINFO) String() string {
+ return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
+}
+
+type MB struct {
+ Hdr RR_Header
+ Mb string `dns:"cdomain-name"`
+}
+
+func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) }
+
+type MG struct {
+ Hdr RR_Header
+ Mg string `dns:"cdomain-name"`
+}
+
+func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) }
+
+type MINFO struct {
+ Hdr RR_Header
+ Rmail string `dns:"cdomain-name"`
+ Email string `dns:"cdomain-name"`
+}
+
+func (rr *MINFO) String() string {
+ return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
+}
+
+type MR struct {
+ Hdr RR_Header
+ Mr string `dns:"cdomain-name"`
+}
+
+func (rr *MR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mr)
+}
+
+type MF struct {
+ Hdr RR_Header
+ Mf string `dns:"cdomain-name"`
+}
+
+func (rr *MF) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mf)
+}
+
+type MD struct {
+ Hdr RR_Header
+ Md string `dns:"cdomain-name"`
+}
+
+func (rr *MD) String() string {
+ return rr.Hdr.String() + sprintName(rr.Md)
+}
+
+type MX struct {
+ Hdr RR_Header
+ Preference uint16
+ Mx string `dns:"cdomain-name"`
+}
+
+func (rr *MX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
+}
+
+type AFSDB struct {
+ Hdr RR_Header
+ Subtype uint16
+ Hostname string `dns:"cdomain-name"`
+}
+
+func (rr *AFSDB) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
+}
+
+type X25 struct {
+ Hdr RR_Header
+ PSDNAddress string
+}
+
+func (rr *X25) String() string {
+ return rr.Hdr.String() + rr.PSDNAddress
+}
+
+type RT struct {
+ Hdr RR_Header
+ Preference uint16
+ Host string `dns:"cdomain-name"`
+}
+
+func (rr *RT) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
+}
+
+type NS struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+}
+
+func (rr *NS) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns)
+}
+
+type PTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"cdomain-name"`
+}
+
+func (rr *PTR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ptr)
+}
+
+type RP struct {
+ Hdr RR_Header
+ Mbox string `dns:"domain-name"`
+ Txt string `dns:"domain-name"`
+}
+
+func (rr *RP) String() string {
+ return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt})
+}
+
+type SOA struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+ Mbox string `dns:"cdomain-name"`
+ Serial uint32
+ Refresh uint32
+ Retry uint32
+ Expire uint32
+ Minttl uint32
+}
+
+func (rr *SOA) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) +
+ " " + strconv.FormatInt(int64(rr.Serial), 10) +
+ " " + strconv.FormatInt(int64(rr.Refresh), 10) +
+ " " + strconv.FormatInt(int64(rr.Retry), 10) +
+ " " + strconv.FormatInt(int64(rr.Expire), 10) +
+ " " + strconv.FormatInt(int64(rr.Minttl), 10)
+}
+
+type TXT struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+func sprintName(s string) string {
+ src := []byte(s)
+ dst := make([]byte, 0, len(src))
+ for i := 0; i < len(src); {
+ if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
+ dst = append(dst, src[i:i+2]...)
+ i += 2
+ } else {
+ b, n := nextByte(src, i)
+ if n == 0 {
+ i++ // dangling back slash
+ } else if b == '.' {
+ dst = append(dst, b)
+ } else {
+ dst = appendDomainNameByte(dst, b)
+ }
+ i += n
+ }
+ }
+ return string(dst)
+}
+
+func sprintTxtOctet(s string) string {
+ src := []byte(s)
+ dst := make([]byte, 0, len(src))
+ dst = append(dst, '"')
+ for i := 0; i < len(src); {
+ if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
+ dst = append(dst, src[i:i+2]...)
+ i += 2
+ } else {
+ b, n := nextByte(src, i)
+ if n == 0 {
+ i++ // dangling back slash
+ } else if b == '.' {
+ dst = append(dst, b)
+ } else {
+ if b < ' ' || b > '~' {
+ dst = appendByte(dst, b)
+ } else {
+ dst = append(dst, b)
+ }
+ }
+ i += n
+ }
+ }
+ dst = append(dst, '"')
+ return string(dst)
+}
+
+func sprintTxt(txt []string) string {
+ var out []byte
+ for i, s := range txt {
+ if i > 0 {
+ out = append(out, ` "`...)
+ } else {
+ out = append(out, '"')
+ }
+ bs := []byte(s)
+ for j := 0; j < len(bs); {
+ b, n := nextByte(bs, j)
+ if n == 0 {
+ break
+ }
+ out = appendTXTStringByte(out, b)
+ j += n
+ }
+ out = append(out, '"')
+ }
+ return string(out)
+}
+
+func appendDomainNameByte(s []byte, b byte) []byte {
+ switch b {
+ case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
+ return append(s, '\\', b)
+ }
+ return appendTXTStringByte(s, b)
+}
+
+func appendTXTStringByte(s []byte, b byte) []byte {
+ switch b {
+ case '\t':
+ return append(s, '\\', 't')
+ case '\r':
+ return append(s, '\\', 'r')
+ case '\n':
+ return append(s, '\\', 'n')
+ case '"', '\\':
+ return append(s, '\\', b)
+ }
+ if b < ' ' || b > '~' {
+ return appendByte(s, b)
+ }
+ return append(s, b)
+}
+
+func appendByte(s []byte, b byte) []byte {
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ return s
+}
+
+func nextByte(b []byte, offset int) (byte, int) {
+ if offset >= len(b) {
+ return 0, 0
+ }
+ if b[offset] != '\\' {
+ // not an escape sequence
+ return b[offset], 1
+ }
+ switch len(b) - offset {
+ case 1: // dangling escape
+ return 0, 0
+ case 2, 3: // too short to be \ddd
+ default: // maybe \ddd
+ if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) {
+ return dddToByte(b[offset+1:]), 4
+ }
+ }
+ // not \ddd, maybe a control char
+ switch b[offset+1] {
+ case 't':
+ return '\t', 2
+ case 'r':
+ return '\r', 2
+ case 'n':
+ return '\n', 2
+ default:
+ return b[offset+1], 2
+ }
+}
+
+type SPF struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+type SRV struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Port uint16
+ Target string `dns:"domain-name"`
+}
+
+func (rr *SRV) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Priority)) + " " +
+ strconv.Itoa(int(rr.Weight)) + " " +
+ strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
+}
+
+type NAPTR struct {
+ Hdr RR_Header
+ Order uint16
+ Preference uint16
+ Flags string
+ Service string
+ Regexp string
+ Replacement string `dns:"domain-name"`
+}
+
+func (rr *NAPTR) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Order)) + " " +
+ strconv.Itoa(int(rr.Preference)) + " " +
+ "\"" + rr.Flags + "\" " +
+ "\"" + rr.Service + "\" " +
+ "\"" + rr.Regexp + "\" " +
+ rr.Replacement
+}
+
+// The CERT resource record, see RFC 4398.
+type CERT struct {
+ Hdr RR_Header
+ Type uint16
+ KeyTag uint16
+ Algorithm uint8
+ Certificate string `dns:"base64"`
+}
+
+func (rr *CERT) String() string {
+ var (
+ ok bool
+ certtype, algorithm string
+ )
+ if certtype, ok = CertTypeToString[rr.Type]; !ok {
+ certtype = strconv.Itoa(int(rr.Type))
+ }
+ if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok {
+ algorithm = strconv.Itoa(int(rr.Algorithm))
+ }
+ return rr.Hdr.String() + certtype +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + algorithm +
+ " " + rr.Certificate
+}
+
+// The DNAME resource record, see RFC 2672.
+type DNAME struct {
+ Hdr RR_Header
+ Target string `dns:"domain-name"`
+}
+
+func (rr *DNAME) String() string {
+ return rr.Hdr.String() + sprintName(rr.Target)
+}
+
+type A struct {
+ Hdr RR_Header
+ A net.IP `dns:"a"`
+}
+
+func (rr *A) String() string {
+ if rr.A == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.A.String()
+}
+
+type AAAA struct {
+ Hdr RR_Header
+ AAAA net.IP `dns:"aaaa"`
+}
+
+func (rr *AAAA) String() string {
+ if rr.AAAA == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.AAAA.String()
+}
+
+type PX struct {
+ Hdr RR_Header
+ Preference uint16
+ Map822 string `dns:"domain-name"`
+ Mapx400 string `dns:"domain-name"`
+}
+
+func (rr *PX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
+}
+
+type GPOS struct {
+ Hdr RR_Header
+ Longitude string
+ Latitude string
+ Altitude string
+}
+
+func (rr *GPOS) String() string {
+ return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
+}
+
+type LOC struct {
+ Hdr RR_Header
+ Version uint8
+ Size uint8
+ HorizPre uint8
+ VertPre uint8
+ Latitude uint32
+ Longitude uint32
+ Altitude uint32
+}
+
+// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent
+// format and returns a string in m (two decimals for the cm)
+func cmToM(m, e uint8) string {
+ if e < 2 {
+ if e == 1 {
+ m *= 10
+ }
+
+ return fmt.Sprintf("0.%02d", m)
+ }
+
+ s := fmt.Sprintf("%d", m)
+ for e > 2 {
+ s += "0"
+ e--
+ }
+ return s
+}
+
+func (rr *LOC) String() string {
+ s := rr.Hdr.String()
+
+ lat := rr.Latitude
+ ns := "N"
+ if lat > LOC_EQUATOR {
+ lat = lat - LOC_EQUATOR
+ } else {
+ ns = "S"
+ lat = LOC_EQUATOR - lat
+ }
+ h := lat / LOC_DEGREES
+ lat = lat % LOC_DEGREES
+ m := lat / LOC_HOURS
+ lat = lat % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns)
+
+ lon := rr.Longitude
+ ew := "E"
+ if lon > LOC_PRIMEMERIDIAN {
+ lon = lon - LOC_PRIMEMERIDIAN
+ } else {
+ ew = "W"
+ lon = LOC_PRIMEMERIDIAN - lon
+ }
+ h = lon / LOC_DEGREES
+ lon = lon % LOC_DEGREES
+ m = lon / LOC_HOURS
+ lon = lon % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
+
+ var alt = float64(rr.Altitude) / 100
+ alt -= LOC_ALTITUDEBASE
+ if rr.Altitude%100 != 0 {
+ s += fmt.Sprintf("%.2fm ", alt)
+ } else {
+ s += fmt.Sprintf("%.0fm ", alt)
+ }
+
+ s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m "
+ s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m "
+ s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m"
+
+ return s
+}
+
+// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931.
+type SIG struct {
+ RRSIG
+}
+
+type RRSIG struct {
+ Hdr RR_Header
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ Signature string `dns:"base64"`
+}
+
+func (rr *RRSIG) String() string {
+ s := rr.Hdr.String()
+ s += Type(rr.TypeCovered).String()
+ s += " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Labels)) +
+ " " + strconv.FormatInt(int64(rr.OrigTtl), 10) +
+ " " + TimeToString(rr.Expiration) +
+ " " + TimeToString(rr.Inception) +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + sprintName(rr.SignerName) +
+ " " + rr.Signature
+ return s
+}
+
+type NSEC struct {
+ Hdr RR_Header
+ NextDomain string `dns:"domain-name"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC) String() string {
+ s := rr.Hdr.String() + sprintName(rr.NextDomain)
+ for i := 0; i < len(rr.TypeBitMap); i++ {
+ s += " " + Type(rr.TypeBitMap[i]).String()
+ }
+ return s
+}
+
+func (rr *NSEC) len() int {
+ l := rr.Hdr.len() + len(rr.NextDomain) + 1
+ lastwindow := uint32(2 ^ 32 + 1)
+ for _, t := range rr.TypeBitMap {
+ window := t / 256
+ if uint32(window) != lastwindow {
+ l += 1 + 32
+ }
+ lastwindow = uint32(window)
+ }
+ return l
+}
+
+type DLV struct {
+ DS
+}
+
+type CDS struct {
+ DS
+}
+
+type DS struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *DS) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+type KX struct {
+ Hdr RR_Header
+ Preference uint16
+ Exchanger string `dns:"domain-name"`
+}
+
+func (rr *KX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + sprintName(rr.Exchanger)
+}
+
+type TA struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *TA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+type TALINK struct {
+ Hdr RR_Header
+ PreviousName string `dns:"domain-name"`
+ NextName string `dns:"domain-name"`
+}
+
+func (rr *TALINK) String() string {
+ return rr.Hdr.String() +
+ sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
+}
+
+type SSHFP struct {
+ Hdr RR_Header
+ Algorithm uint8
+ Type uint8
+ FingerPrint string `dns:"hex"`
+}
+
+func (rr *SSHFP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Type)) +
+ " " + strings.ToUpper(rr.FingerPrint)
+}
+
+type KEY struct {
+ DNSKEY
+}
+
+type CDNSKEY struct {
+ DNSKEY
+}
+
+type DNSKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *DNSKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+type RKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *RKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+type NSAPPTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"domain-name"`
+}
+
+func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) }
+
+type NSEC3 struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"size-hex:SaltLength"`
+ HashLength uint8
+ NextDomain string `dns:"size-base32:HashLength"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC3) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt) +
+ " " + rr.NextDomain
+ for i := 0; i < len(rr.TypeBitMap); i++ {
+ s += " " + Type(rr.TypeBitMap[i]).String()
+ }
+ return s
+}
+
+func (rr *NSEC3) len() int {
+ l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
+ lastwindow := uint32(2 ^ 32 + 1)
+ for _, t := range rr.TypeBitMap {
+ window := t / 256
+ if uint32(window) != lastwindow {
+ l += 1 + 32
+ }
+ lastwindow = uint32(window)
+ }
+ return l
+}
+
+type NSEC3PARAM struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"size-hex:SaltLength"`
+}
+
+func (rr *NSEC3PARAM) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt)
+ return s
+}
+
+type TKEY struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ Inception uint32
+ Expiration uint32
+ Mode uint16
+ Error uint16
+ KeySize uint16
+ Key string
+ OtherLen uint16
+ OtherData string
+}
+
+func (rr *TKEY) String() string {
+ // It has no presentation format
+ return ""
+}
+
+// RFC3597 represents an unknown/generic RR.
+type RFC3597 struct {
+ Hdr RR_Header
+ Rdata string `dns:"hex"`
+}
+
+func (rr *RFC3597) String() string {
+ // Let's call it a hack
+ s := rfc3597Header(rr.Hdr)
+
+ s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata
+ return s
+}
+
+func rfc3597Header(h RR_Header) string {
+ var s string
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
+ s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
+ return s
+}
+
+type URI struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Target string `dns:"octet"`
+}
+
+func (rr *URI) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
+ " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
+}
+
+type DHCID struct {
+ Hdr RR_Header
+ Digest string `dns:"base64"`
+}
+
+func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest }
+
+type TLSA struct {
+ Hdr RR_Header
+ Usage uint8
+ Selector uint8
+ MatchingType uint8
+ Certificate string `dns:"hex"`
+}
+
+func (rr *TLSA) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Usage)) +
+ " " + strconv.Itoa(int(rr.Selector)) +
+ " " + strconv.Itoa(int(rr.MatchingType)) +
+ " " + rr.Certificate
+}
+
+type HIP struct {
+ Hdr RR_Header
+ HitLength uint8
+ PublicKeyAlgorithm uint8
+ PublicKeyLength uint16
+ Hit string `dns:"size-hex:HitLength"`
+ PublicKey string `dns:"size-base64:PublicKeyLength"`
+ RendezvousServers []string `dns:"domain-name"`
+}
+
+func (rr *HIP) String() string {
+ s := rr.Hdr.String() +
+ strconv.Itoa(int(rr.PublicKeyAlgorithm)) +
+ " " + rr.Hit +
+ " " + rr.PublicKey
+ for _, d := range rr.RendezvousServers {
+ s += " " + sprintName(d)
+ }
+ return s
+}
+
+type NINFO struct {
+ Hdr RR_Header
+ ZSData []string `dns:"txt"`
+}
+
+func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
+
+type NID struct {
+ Hdr RR_Header
+ Preference uint16
+ NodeID uint64
+}
+
+func (rr *NID) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16x", rr.NodeID)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+type L32 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator32 net.IP `dns:"a"`
+}
+
+func (rr *L32) String() string {
+ if rr.Locator32 == nil {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ }
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + rr.Locator32.String()
+}
+
+type L64 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator64 uint64
+}
+
+func (rr *L64) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16X", rr.Locator64)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+type LP struct {
+ Hdr RR_Header
+ Preference uint16
+ Fqdn string `dns:"domain-name"`
+}
+
+func (rr *LP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
+}
+
+type EUI48 struct {
+ Hdr RR_Header
+ Address uint64 `dns:"uint48"`
+}
+
+func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) }
+
+type EUI64 struct {
+ Hdr RR_Header
+ Address uint64
+}
+
+func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
+
+type CAA struct {
+ Hdr RR_Header
+ Flag uint8
+ Tag string
+ Value string `dns:"octet"`
+}
+
+func (rr *CAA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
+}
+
+type UID struct {
+ Hdr RR_Header
+ Uid uint32
+}
+
+func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
+
+type GID struct {
+ Hdr RR_Header
+ Gid uint32
+}
+
+func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
+
+type UINFO struct {
+ Hdr RR_Header
+ Uinfo string
+}
+
+func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
+
+type EID struct {
+ Hdr RR_Header
+ Endpoint string `dns:"hex"`
+}
+
+func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
+
+type NIMLOC struct {
+ Hdr RR_Header
+ Locator string `dns:"hex"`
+}
+
+func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
+
+type OPENPGPKEY struct {
+ Hdr RR_Header
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey }
+
+// TimeToString translates the RRSIG's incep. and expir. times to the
+// string representation used when printing the record.
+// It takes serial arithmetic (RFC 1982) into account.
+func TimeToString(t uint32) string {
+ mod := ((int64(t) - time.Now().Unix()) / year68) - 1
+ if mod < 0 {
+ mod = 0
+ }
+ ti := time.Unix(int64(t)-(mod*year68), 0).UTC()
+ return ti.Format("20060102150405")
+}
+
+// StringToTime translates the RRSIG's incep. and expir. times from
+// string values like "20110403154150" to an 32 bit integer.
+// It takes serial arithmetic (RFC 1982) into account.
+func StringToTime(s string) (uint32, error) {
+ t, err := time.Parse("20060102150405", s)
+ if err != nil {
+ return 0, err
+ }
+ mod := (t.Unix() / year68) - 1
+ if mod < 0 {
+ mod = 0
+ }
+ return uint32(t.Unix() - (mod * year68)), nil
+}
+
+// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty.
+func saltToString(s string) string {
+ if len(s) == 0 {
+ return "-"
+ }
+ return strings.ToUpper(s)
+}
+
+func euiToString(eui uint64, bits int) (hex string) {
+ switch bits {
+ case 64:
+ hex = fmt.Sprintf("%16.16x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16]
+ case 48:
+ hex = fmt.Sprintf("%12.12x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12]
+ }
+ return
+}
+
+// copyIP returns a copy of ip.
+func copyIP(ip net.IP) net.IP {
+ p := make(net.IP, len(ip))
+ copy(p, ip)
+ return p
+}
diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go
new file mode 100644
index 000000000..bf80da329
--- /dev/null
+++ b/vendor/github.com/miekg/dns/types_generate.go
@@ -0,0 +1,271 @@
+//+build ignore
+
+// types_generate.go is meant to run with go generate. It will use
+// go/{importer,types} to track down all the RR struct types. Then for each type
+// it will generate conversion tables (TypeToRR and TypeToString) and banal
+// methods (len, Header, copy) based on the struct tags. The generated source is
+// written to ztypes.go, and is meant to be checked into git.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/importer"
+ "go/types"
+ "log"
+ "os"
+ "strings"
+ "text/template"
+)
+
+var skipLen = map[string]struct{}{
+ "NSEC": {},
+ "NSEC3": {},
+ "OPT": {},
+}
+
+var packageHdr = `
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from type_generate.go
+
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+)
+
+`
+
+var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
+// TypeToRR is a map of constructors for each RR type.
+var TypeToRR = map[uint16]func() RR{
+{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
+{{end}}{{end}} }
+
+`))
+
+var typeToString = template.Must(template.New("typeToString").Parse(`
+// TypeToString is a map of strings for each RR type.
+var TypeToString = map[uint16]string{
+{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
+{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
+}
+
+`))
+
+var headerFunc = template.Must(template.New("headerFunc").Parse(`
+// Header() functions
+{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
+{{end}}
+
+`))
+
+// getTypeStruct will take a type and the package scope, and return the
+// (innermost) struct if the type is considered a RR type (currently defined as
+// those structs beginning with a RR_Header, could be redefined as implementing
+// the RR interface). The bool return value indicates if embedded structs were
+// resolved.
+func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
+ st, ok := t.Underlying().(*types.Struct)
+ if !ok {
+ return nil, false
+ }
+ if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
+ return st, false
+ }
+ if st.Field(0).Anonymous() {
+ st, _ := getTypeStruct(st.Field(0).Type(), scope)
+ return st, true
+ }
+ return nil, false
+}
+
+func main() {
+ // Import and type-check the package
+ pkg, err := importer.Default().Import("github.com/miekg/dns")
+ fatalIfErr(err)
+ scope := pkg.Scope()
+
+ // Collect constants like TypeX
+ var numberedTypes []string
+ for _, name := range scope.Names() {
+ o := scope.Lookup(name)
+ if o == nil || !o.Exported() {
+ continue
+ }
+ b, ok := o.Type().(*types.Basic)
+ if !ok || b.Kind() != types.Uint16 {
+ continue
+ }
+ if !strings.HasPrefix(o.Name(), "Type") {
+ continue
+ }
+ name := strings.TrimPrefix(o.Name(), "Type")
+ if name == "PrivateRR" {
+ continue
+ }
+ numberedTypes = append(numberedTypes, name)
+ }
+
+ // Collect actual types (*X)
+ var namedTypes []string
+ for _, name := range scope.Names() {
+ o := scope.Lookup(name)
+ if o == nil || !o.Exported() {
+ continue
+ }
+ if st, _ := getTypeStruct(o.Type(), scope); st == nil {
+ continue
+ }
+ if name == "PrivateRR" {
+ continue
+ }
+
+ // Check if corresponding TypeX exists
+ if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
+ log.Fatalf("Constant Type%s does not exist.", o.Name())
+ }
+
+ namedTypes = append(namedTypes, o.Name())
+ }
+
+ b := &bytes.Buffer{}
+ b.WriteString(packageHdr)
+
+ // Generate TypeToRR
+ fatalIfErr(TypeToRR.Execute(b, namedTypes))
+
+ // Generate typeToString
+ fatalIfErr(typeToString.Execute(b, numberedTypes))
+
+ // Generate headerFunc
+ fatalIfErr(headerFunc.Execute(b, namedTypes))
+
+ // Generate len()
+ fmt.Fprint(b, "// len() functions\n")
+ for _, name := range namedTypes {
+ if _, ok := skipLen[name]; ok {
+ continue
+ }
+ o := scope.Lookup(name)
+ st, isEmbedded := getTypeStruct(o.Type(), scope)
+ if isEmbedded {
+ continue
+ }
+ fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
+ fmt.Fprintf(b, "l := rr.Hdr.len()\n")
+ for i := 1; i < st.NumFields(); i++ {
+ o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
+
+ if _, ok := st.Field(i).Type().(*types.Slice); ok {
+ switch st.Tag(i) {
+ case `dns:"-"`:
+ // ignored
+ case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
+ o("for _, x := range rr.%s { l += len(x) + 1 }\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ continue
+ }
+
+ switch {
+ case st.Tag(i) == `dns:"-"`:
+ // ignored
+ case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
+ o("l += len(rr.%s) + 1\n")
+ case st.Tag(i) == `dns:"octet"`:
+ o("l += len(rr.%s)\n")
+ case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
+ fallthrough
+ case st.Tag(i) == `dns:"base64"`:
+ o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
+ case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
+ fallthrough
+ case st.Tag(i) == `dns:"hex"`:
+ o("l += len(rr.%s)/2 + 1\n")
+ case st.Tag(i) == `dns:"a"`:
+ o("l += net.IPv4len // %s\n")
+ case st.Tag(i) == `dns:"aaaa"`:
+ o("l += net.IPv6len // %s\n")
+ case st.Tag(i) == `dns:"txt"`:
+ o("for _, t := range rr.%s { l += len(t) + 1 }\n")
+ case st.Tag(i) == `dns:"uint48"`:
+ o("l += 6 // %s\n")
+ case st.Tag(i) == "":
+ switch st.Field(i).Type().(*types.Basic).Kind() {
+ case types.Uint8:
+ o("l += 1 // %s\n")
+ case types.Uint16:
+ o("l += 2 // %s\n")
+ case types.Uint32:
+ o("l += 4 // %s\n")
+ case types.Uint64:
+ o("l += 8 // %s\n")
+ case types.String:
+ o("l += len(rr.%s) + 1\n")
+ default:
+ log.Fatalln(name, st.Field(i).Name())
+ }
+ default:
+ log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
+ }
+ }
+ fmt.Fprintf(b, "return l }\n")
+ }
+
+ // Generate copy()
+ fmt.Fprint(b, "// copy() functions\n")
+ for _, name := range namedTypes {
+ o := scope.Lookup(name)
+ st, isEmbedded := getTypeStruct(o.Type(), scope)
+ if isEmbedded {
+ continue
+ }
+ fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
+ fields := []string{"*rr.Hdr.copyHeader()"}
+ for i := 1; i < st.NumFields(); i++ {
+ f := st.Field(i).Name()
+ if sl, ok := st.Field(i).Type().(*types.Slice); ok {
+ t := sl.Underlying().String()
+ t = strings.TrimPrefix(t, "[]")
+ if strings.Contains(t, ".") {
+ splits := strings.Split(t, ".")
+ t = splits[len(splits)-1]
+ }
+ fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
+ f, t, f, f, f)
+ fields = append(fields, f)
+ continue
+ }
+ if st.Field(i).Type().String() == "net.IP" {
+ fields = append(fields, "copyIP(rr."+f+")")
+ continue
+ }
+ fields = append(fields, "rr."+f)
+ }
+ fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
+ fmt.Fprintf(b, "}\n")
+ }
+
+ // gofmt
+ res, err := format.Source(b.Bytes())
+ if err != nil {
+ b.WriteTo(os.Stderr)
+ log.Fatal(err)
+ }
+
+ // write result
+ f, err := os.Create("ztypes.go")
+ fatalIfErr(err)
+ defer f.Close()
+ f.Write(res)
+}
+
+func fatalIfErr(err error) {
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/miekg/dns/types_test.go
new file mode 100644
index 000000000..118612946
--- /dev/null
+++ b/vendor/github.com/miekg/dns/types_test.go
@@ -0,0 +1,42 @@
+package dns
+
+import (
+ "testing"
+)
+
+func TestCmToM(t *testing.T) {
+ s := cmToM(0, 0)
+ if s != "0.00" {
+ t.Error("0, 0")
+ }
+
+ s = cmToM(1, 0)
+ if s != "0.01" {
+ t.Error("1, 0")
+ }
+
+ s = cmToM(3, 1)
+ if s != "0.30" {
+ t.Error("3, 1")
+ }
+
+ s = cmToM(4, 2)
+ if s != "4" {
+ t.Error("4, 2")
+ }
+
+ s = cmToM(5, 3)
+ if s != "50" {
+ t.Error("5, 3")
+ }
+
+ s = cmToM(7, 5)
+ if s != "7000" {
+ t.Error("7, 5")
+ }
+
+ s = cmToM(9, 9)
+ if s != "90000000" {
+ t.Error("9, 9")
+ }
+}
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
new file mode 100644
index 000000000..c79c6c883
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -0,0 +1,58 @@
+// +build !windows,!plan9
+
+package dns
+
+import (
+ "net"
+ "syscall"
+)
+
+// SessionUDP holds the remote address and the associated
+// out-of-band data.
+type SessionUDP struct {
+ raddr *net.UDPAddr
+ context []byte
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// setUDPSocketOptions sets the UDP socket options.
+// This function is implemented on a per platform basis. See udp_*.go for more details
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ sa, err := getUDPSocketName(conn)
+ if err != nil {
+ return err
+ }
+ switch sa.(type) {
+ case *syscall.SockaddrInet6:
+ v6only, err := getUDPSocketOptions6Only(conn)
+ if err != nil {
+ return err
+ }
+ setUDPSocketOptions6(conn)
+ if !v6only {
+ setUDPSocketOptions4(conn)
+ }
+ case *syscall.SockaddrInet4:
+ setUDPSocketOptions4(conn)
+ }
+ return nil
+}
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ oob := make([]byte, 40)
+ n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
+ if err != nil {
+ return n, nil, err
+ }
+ return n, &SessionUDP{raddr, oob[:oobn]}, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
+ return n, err
+}
diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go
new file mode 100644
index 000000000..c62d21881
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_linux.go
@@ -0,0 +1,73 @@
+// +build linux
+
+package dns
+
+// See:
+// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and
+// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/
+//
+// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing
+// interface, this might not always be the correct one. This code will make sure the egress
+// packet's interface matched the ingress' one.
+
+import (
+ "net"
+ "syscall"
+)
+
+// setUDPSocketOptions4 prepares the v4 socket for sessions.
+func setUDPSocketOptions4(conn *net.UDPConn) error {
+ file, err := conn.File()
+ if err != nil {
+ return err
+ }
+ if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
+ return err
+ }
+ // Calling File() above results in the connection becoming blocking, we must fix that.
+ // See https://github.com/miekg/dns/issues/279
+ err = syscall.SetNonblock(int(file.Fd()), true)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// setUDPSocketOptions6 prepares the v6 socket for sessions.
+func setUDPSocketOptions6(conn *net.UDPConn) error {
+ file, err := conn.File()
+ if err != nil {
+ return err
+ }
+ if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
+ return err
+ }
+ err = syscall.SetNonblock(int(file.Fd()), true)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined
+// (dualstack).
+func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) {
+ file, err := conn.File()
+ if err != nil {
+ return false, err
+ }
+ // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections
+ v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY)
+ if err != nil {
+ return false, err
+ }
+ return v6only == 1, nil
+}
+
+func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
+ file, err := conn.File()
+ if err != nil {
+ return nil, err
+ }
+ return syscall.Getsockname(int(file.Fd()))
+}
diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go
new file mode 100644
index 000000000..d40732441
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_other.go
@@ -0,0 +1,17 @@
+// +build !linux,!plan9
+
+package dns
+
+import (
+ "net"
+ "syscall"
+)
+
+// These do nothing. See udp_linux.go for an example of how to implement this.
+
+// We tried to adhire to some kind of naming scheme.
+
+func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
+func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
+func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
+func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go
new file mode 100644
index 000000000..b794deeba
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_plan9.go
@@ -0,0 +1,34 @@
+package dns
+
+import (
+ "net"
+)
+
+func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
+
+// SessionUDP holds the remote address and the associated
+// out-of-band data.
+type SessionUDP struct {
+ raddr *net.UDPAddr
+ context []byte
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ oob := make([]byte, 40)
+ n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
+ if err != nil {
+ return n, nil, err
+ }
+ return n, &SessionUDP{raddr, oob[:oobn]}, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
+ return n, err
+}
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
new file mode 100644
index 000000000..2ce4b3300
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -0,0 +1,34 @@
+// +build windows
+
+package dns
+
+import "net"
+
+type SessionUDP struct {
+ raddr *net.UDPAddr
+}
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ n, raddr, err := conn.ReadFrom(b)
+ if err != nil {
+ return n, nil, err
+ }
+ session := &SessionUDP{raddr.(*net.UDPAddr)}
+ return n, session, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, err := conn.WriteTo(b, session.raddr)
+ return n, err
+}
+
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// setUDPSocketOptions sets the UDP socket options.
+// This function is implemented on a per platform basis. See udp_*.go for more details
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go
new file mode 100644
index 000000000..e90c5c968
--- /dev/null
+++ b/vendor/github.com/miekg/dns/update.go
@@ -0,0 +1,106 @@
+package dns
+
+// NameUsed sets the RRs in the prereq section to
+// "Name is in use" RRs. RFC 2136 section 2.4.4.
+func (u *Msg) NameUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
+ }
+}
+
+// NameNotUsed sets the RRs in the prereq section to
+// "Name is in not use" RRs. RFC 2136 section 2.4.5.
+func (u *Msg) NameNotUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
+ }
+}
+
+// Used sets the RRs in the prereq section to
+// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
+func (u *Msg) Used(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ r.Header().Class = u.Question[0].Qclass
+ u.Answer = append(u.Answer, r)
+ }
+}
+
+// RRsetUsed sets the RRs in the prereq section to
+// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
+func (u *Msg) RRsetUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
+ }
+}
+
+// RRsetNotUsed sets the RRs in the prereq section to
+// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
+func (u *Msg) RRsetNotUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}})
+ }
+}
+
+// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
+func (u *Msg) Insert(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ r.Header().Class = u.Question[0].Qclass
+ u.Ns = append(u.Ns, r)
+ }
+}
+
+// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
+func (u *Msg) RemoveRRset(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
+ }
+}
+
+// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
+func (u *Msg) RemoveName(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
+ }
+}
+
+// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
+func (u *Msg) Remove(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ r.Header().Class = ClassNONE
+ r.Header().Ttl = 0
+ u.Ns = append(u.Ns, r)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go
new file mode 100644
index 000000000..56602dfe9
--- /dev/null
+++ b/vendor/github.com/miekg/dns/update_test.go
@@ -0,0 +1,145 @@
+package dns
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestDynamicUpdateParsing(t *testing.T) {
+ prefix := "example.com. IN "
+ for _, typ := range TypeToString {
+ if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" ||
+ typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" ||
+ typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" {
+ continue
+ }
+ r, err := NewRR(prefix + typ)
+ if err != nil {
+ t.Errorf("failure to parse: %s %s: %v", prefix, typ, err)
+ } else {
+ t.Logf("parsed: %s", r.String())
+ }
+ }
+}
+
+func TestDynamicUpdateUnpack(t *testing.T) {
+ // From https://github.com/miekg/dns/issues/150#issuecomment-62296803
+ // It should be an update message for the zone "example.",
+ // deleting the A RRset "example." and then adding an A record at "example.".
+ // class ANY, TYPE A
+ buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1}
+ msg := new(Msg)
+ err := msg.Unpack(buf)
+ if err != nil {
+ t.Errorf("failed to unpack: %v\n%s", err, msg.String())
+ }
+}
+
+func TestDynamicUpdateZeroRdataUnpack(t *testing.T) {
+ m := new(Msg)
+ rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0}
+ m.Answer = []RR{rr, rr, rr, rr, rr}
+ m.Ns = m.Answer
+ for n, s := range TypeToString {
+ rr.Rrtype = n
+ bytes, err := m.Pack()
+ if err != nil {
+ t.Errorf("failed to pack %s: %v", s, err)
+ continue
+ }
+ if err := new(Msg).Unpack(bytes); err != nil {
+ t.Errorf("failed to unpack %s: %v", s, err)
+ }
+ }
+}
+
+func TestRemoveRRset(t *testing.T) {
+ // Should add a zero data RR in Class ANY with a TTL of 0
+ // for each set mentioned in the RRs provided to it.
+ rr, err := NewRR(". 100 IN A 127.0.0.1")
+ if err != nil {
+ t.Fatalf("error constructing RR: %v", err)
+ }
+ m := new(Msg)
+ m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}}
+ expectstr := m.String()
+ expect, err := m.Pack()
+ if err != nil {
+ t.Fatalf("error packing expected msg: %v", err)
+ }
+
+ m.Ns = nil
+ m.RemoveRRset([]RR{rr})
+ actual, err := m.Pack()
+ if err != nil {
+ t.Fatalf("error packing actual msg: %v", err)
+ }
+ if !bytes.Equal(actual, expect) {
+ tmp := new(Msg)
+ if err := tmp.Unpack(actual); err != nil {
+ t.Fatalf("error unpacking actual msg: %v\nexpected: %v\ngot: %v\n", err, expect, actual)
+ }
+ t.Errorf("expected msg:\n%s", expectstr)
+ t.Errorf("actual msg:\n%v", tmp)
+ }
+}
+
+func TestPreReqAndRemovals(t *testing.T) {
+ // Build a list of multiple prereqs and then somes removes followed by an insert.
+ // We should be able to add multiple prereqs and updates.
+ m := new(Msg)
+ m.SetUpdate("example.org.")
+ m.Id = 1234
+
+ // Use a full set of RRs each time, so we are sure the rdata is stripped.
+ rr_name1, _ := NewRR("name_used. 3600 IN A 127.0.0.1")
+ rr_name2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1")
+ rr_remove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1")
+ rr_remove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1")
+ rr_remove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1")
+ rr_insert, _ := NewRR("insert. 3600 IN A 127.0.0.1")
+ rr_rrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1")
+ rr_rrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1")
+ rr_rrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1")
+
+ // Handle the prereqs.
+ m.NameUsed([]RR{rr_name1})
+ m.NameNotUsed([]RR{rr_name2})
+ m.RRsetUsed([]RR{rr_rrset1})
+ m.Used([]RR{rr_rrset2})
+ m.RRsetNotUsed([]RR{rr_rrset3})
+
+ // and now the updates.
+ m.RemoveName([]RR{rr_remove1})
+ m.RemoveRRset([]RR{rr_remove2})
+ m.Remove([]RR{rr_remove3})
+ m.Insert([]RR{rr_insert})
+
+ // This test function isn't a Example function because we print these RR with tabs at the
+ // end and the Example function trim these, thus they never match.
+ // TODO(miek): don't print these tabs and make this into an Example function.
+ expect := `;; opcode: UPDATE, status: NOERROR, id: 1234
+;; flags:; QUERY: 1, ANSWER: 5, AUTHORITY: 4, ADDITIONAL: 0
+
+;; QUESTION SECTION:
+;example.org. IN SOA
+
+;; ANSWER SECTION:
+name_used. 0 ANY ANY
+name_not_used. 0 NONE ANY
+rrset_used1. 0 ANY A
+rrset_used2. 3600 IN A 127.0.0.1
+rrset_not_used. 0 NONE A
+
+;; AUTHORITY SECTION:
+remove1. 0 ANY ANY
+remove2. 0 ANY A
+remove3. 0 NONE A 127.0.0.1
+insert. 3600 IN A 127.0.0.1
+`
+
+ if m.String() != expect {
+ t.Errorf("expected msg:\n%s", expect)
+ t.Errorf("actual msg:\n%v", m.String())
+ }
+}
diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go
new file mode 100644
index 000000000..7346deffb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/xfr.go
@@ -0,0 +1,244 @@
+package dns
+
+import (
+ "time"
+)
+
+// Envelope is used when doing a zone transfer with a remote server.
+type Envelope struct {
+ RR []RR // The set of RRs in the answer section of the xfr reply message.
+ Error error // If something went wrong, this contains the error.
+}
+
+// A Transfer defines parameters that are used during a zone transfer.
+type Transfer struct {
+ *Conn
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
+ TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
+ tsigTimersOnly bool
+}
+
+// Think we need to away to stop the transfer
+
+// In performs an incoming transfer with the server in a.
+// If you would like to set the source IP, or some other attribute
+// of a Dialer for a Transfer, you can do so by specifying the attributes
+// in the Transfer.Conn:
+//
+// d := net.Dialer{LocalAddr: transfer_source}
+// con, err := d.Dial("tcp", master)
+// dnscon := &dns.Conn{Conn:con}
+// transfer = &dns.Transfer{Conn: dnscon}
+// channel, err := transfer.In(message, master)
+//
+func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
+ timeout := dnsTimeout
+ if t.DialTimeout != 0 {
+ timeout = t.DialTimeout
+ }
+ if t.Conn == nil {
+ t.Conn, err = DialTimeout("tcp", a, timeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := t.WriteMsg(q); err != nil {
+ return nil, err
+ }
+ env = make(chan *Envelope)
+ go func() {
+ if q.Question[0].Qtype == TypeAXFR {
+ go t.inAxfr(q.Id, env)
+ return
+ }
+ if q.Question[0].Qtype == TypeIXFR {
+ go t.inIxfr(q.Id, env)
+ return
+ }
+ }()
+ return env, nil
+}
+
+func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
+ first := true
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.Conn.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if first {
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ first = !first
+ // only one answer that is SOA, receive more
+ if len(in.Answer) == 1 {
+ t.tsigTimersOnly = true
+ c <- &Envelope{in.Answer, nil}
+ continue
+ }
+ }
+
+ if !first {
+ t.tsigTimersOnly = true // Subsequent envelopes use this.
+ if isSOALast(in) {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+ }
+}
+
+func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
+ serial := uint32(0) // The first serial seen is the current server serial
+ first := true
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if first {
+ // A single SOA RR signals "no changes"
+ if len(in.Answer) == 1 && isSOAFirst(in) {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+
+ // Check if the returned answer is ok
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ // This serial is important
+ serial = in.Answer[0].(*SOA).Serial
+ first = !first
+ }
+
+ // Now we need to check each message for SOA records, to see what we need to do
+ if !first {
+ t.tsigTimersOnly = true
+ // If the last record in the IXFR contains the servers' SOA, we should quit
+ if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok {
+ if v.Serial == serial {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+ }
+}
+
+// Out performs an outgoing transfer with the client connecting in w.
+// Basic use pattern:
+//
+// ch := make(chan *dns.Envelope)
+// tr := new(dns.Transfer)
+// go tr.Out(w, r, ch)
+// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
+// close(ch)
+// w.Hijack()
+// // w.Close() // Client closes connection
+//
+// The server is responsible for sending the correct sequence of RRs through the
+// channel ch.
+func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
+ for x := range ch {
+ r := new(Msg)
+ // Compress?
+ r.SetReply(q)
+ r.Authoritative = true
+ // assume it fits TODO(miek): fix
+ r.Answer = append(r.Answer, x.RR...)
+ if err := w.WriteMsg(r); err != nil {
+ return err
+ }
+ }
+ w.TsigTimersOnly(true)
+ return nil
+}
+
+// ReadMsg reads a message from the transfer connection t.
+func (t *Transfer) ReadMsg() (*Msg, error) {
+ m := new(Msg)
+ p := make([]byte, MaxMsgSize)
+ n, err := t.Read(p)
+ if err != nil && n == 0 {
+ return nil, err
+ }
+ p = p[:n]
+ if err := m.Unpack(p); err != nil {
+ return nil, err
+ }
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ t.tsigRequestMAC = ts.MAC
+ }
+ return m, err
+}
+
+// WriteMsg writes a message through the transfer connection t.
+func (t *Transfer) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ if _, err = t.Write(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+func isSOAFirst(in *Msg) bool {
+ if len(in.Answer) > 0 {
+ return in.Answer[0].Header().Rrtype == TypeSOA
+ }
+ return false
+}
+
+func isSOALast(in *Msg) bool {
+ if len(in.Answer) > 0 {
+ return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
+ }
+ return false
+}
diff --git a/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/miekg/dns/xfr_test.go
new file mode 100644
index 000000000..1337eec65
--- /dev/null
+++ b/vendor/github.com/miekg/dns/xfr_test.go
@@ -0,0 +1,161 @@
+// +build net
+
+package dns
+
+import (
+ "net"
+ "testing"
+ "time"
+)
+
+func getIP(s string) string {
+ a, err := net.LookupAddr(s)
+ if err != nil {
+ return ""
+ }
+ return a[0]
+}
+
+// flaky, need to setup local server and test from
+// that.
+func TestAXFR_Miek(t *testing.T) {
+ // This test runs against a server maintained by Miek
+ if testing.Short() {
+ return
+ }
+ m := new(Msg)
+ m.SetAxfr("miek.nl.")
+
+ server := getIP("linode.atoom.net")
+
+ tr := new(Transfer)
+
+ if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
+ t.Fatal("failed to setup axfr: ", err)
+ } else {
+ for ex := range a {
+ if ex.Error != nil {
+ t.Errorf("error %v", ex.Error)
+ break
+ }
+ for _, rr := range ex.RR {
+ t.Log(rr.String())
+ }
+ }
+ }
+}
+
+// fails.
+func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) {
+ // This test runs against a server maintained by NLnet Labs
+ if testing.Short() {
+ return
+ }
+ m := new(Msg)
+ m.SetAxfr("nlnetlabs.nl.")
+
+ server := getIP("open.nlnetlabs.nl.")
+
+ tr := new(Transfer)
+ if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
+ t.Fatalf("failed to setup axfr %v for server: %v", err, server)
+ } else {
+ for ex := range a {
+ if ex.Error != nil {
+ t.Errorf("error %v", ex.Error)
+ break
+ }
+ }
+ }
+}
+
+func TestAXFR_Miek_Tsig(t *testing.T) {
+ // This test runs against a server maintained by Miek
+ if testing.Short() {
+ return
+ }
+ m := new(Msg)
+ m.SetAxfr("example.nl.")
+ m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix())
+
+ tr := new(Transfer)
+ tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+
+ if a, err := tr.In(m, "176.58.119.54:53"); err != nil {
+ t.Fatal("failed to setup axfr: ", err)
+ } else {
+ for ex := range a {
+ if ex.Error != nil {
+ t.Errorf("error %v", ex.Error)
+ break
+ }
+ for _, rr := range ex.RR {
+ t.Log(rr.String())
+ }
+ }
+ }
+}
+
+func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") }
+func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) }
+func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) }
+func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) }
+
+func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") }
+func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) }
+func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) }
+func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) }
+
+func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") }
+func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) }
+func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) }
+func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) }
+
+func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") }
+func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) }
+func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) }
+func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) }
+
+func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") }
+func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) }
+func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) }
+func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) }
+
+func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") }
+func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) }
+func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) }
+func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) }
+
+func testAXFRSIDN(t *testing.T, host, alg string) {
+ // This tests run against a server maintained by SIDN labs, see:
+ // https://workbench.sidnlabs.nl/
+ if testing.Short() {
+ return
+ }
+ x := new(Transfer)
+ x.TsigSecret = map[string]string{
+ "wb_md5.": "Wu/utSasZUkoeCNku152Zw==",
+ "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==",
+ "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=",
+ }
+ keyname := map[string]string{
+ HmacMD5: "wb_md5.",
+ HmacSHA1: "wb_sha1_longkey.",
+ HmacSHA256: "wb_sha256.",
+ }[alg]
+
+ m := new(Msg)
+ m.SetAxfr("types.wb.sidnlabs.nl.")
+ if keyname != "" {
+ m.SetTsig(keyname, alg, 300, time.Now().Unix())
+ }
+ c, err := x.In(m, host+".sidnlabs.nl:53")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for e := range c {
+ if e.Error != nil {
+ t.Fatal(e.Error)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
new file mode 100644
index 000000000..346d3102d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -0,0 +1,3464 @@
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from msg_generate.go
+
+package dns
+
+// pack*() functions
+
+func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packDataA(rr.A, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packDataAAAA(rr.AAAA, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Subtype, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Hostname, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Flag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Tag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringOctet(rr.Value, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Type, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Certificate, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Target, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringBase64(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Target, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringHex(rr.Endpoint, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint48(rr.Address, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint64(rr.Address, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint32(rr.Gid, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packString(rr.Longitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Latitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Altitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packString(rr.Cpu, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Os, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.HitLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.PublicKeyAlgorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.PublicKeyLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Hit, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataA(rr.Locator32, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint64(rr.Locator64, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Version, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Size, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.HorizPre, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.VertPre, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Latitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Longitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Altitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Mb, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Md, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Mf, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Mg, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Rmail, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Email, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Mr, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Mx, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Order, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Service, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Regexp, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Replacement, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint64(rr.NodeID, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringHex(rr.Locator, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringTxt(rr.ZSData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Ns, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Ptr, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataNsec(rr.TypeBitMap, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Hash, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Iterations, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.SaltLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ if rr.Salt == "-" { /* do nothing, empty salt */
+ }
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.HashLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase32(rr.NextDomain, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataNsec(rr.TypeBitMap, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Hash, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Iterations, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.SaltLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ if rr.Salt == "-" { /* do nothing, empty salt */
+ }
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packDataOpt(rr.Option, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Ptr, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Map822, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringHex(rr.Rdata, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Mbox, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Txt, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.TypeCovered, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.SignerName, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Signature, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Host, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.TypeCovered, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.SignerName, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Signature, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Ns, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Mbox, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Serial, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Refresh, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Retry, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expire, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Minttl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Priority, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Weight, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Port, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.Target, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Type, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.FingerPrint, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(rr.NextName, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Mode, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeySize, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Key, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint8(rr.Usage, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Selector, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.MatchingType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Certificate, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint48(rr.TimeSigned, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.MACSize, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.MAC, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OrigId, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint32(rr.Uid, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packString(rr.Uinfo, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packUint16(rr.Priority, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Weight, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringOctet(rr.Target, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
+ off, err := rr.Hdr.pack(msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ headerEnd := off
+ off, err = packString(rr.PSDNAddress, msg, off)
+ if err != nil {
+ return off, err
+ }
+ rr.Header().Rdlength = uint16(off - headerEnd)
+ return off, nil
+}
+
+// unpack*() functions
+
+func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(A)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.A, off, err = unpackDataA(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(AAAA)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.AAAA, off, err = unpackDataAAAA(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(AFSDB)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Subtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Hostname, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(ANY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ return rr, off, err
+}
+
+func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(CAA)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Flag, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Tag, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Value, off, err = unpackStringOctet(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(CDNSKEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(CDS)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(CERT)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Type, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(CNAME)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(DHCID)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(DLV)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(DNAME)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(DNSKEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(DS)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(EID)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(EUI48)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Address, off, err = unpackUint48(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(EUI64)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Address, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(GID)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Gid, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(GPOS)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Longitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Latitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Altitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(HINFO)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Cpu, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Os, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(HIP)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.HitLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKeyLength, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength))
+ if err != nil {
+ return rr, off, err
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength))
+ if err != nil {
+ return rr, off, err
+ }
+ rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(KEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(KX)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Exchanger, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(L32)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Locator32, off, err = unpackDataA(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(L64)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Locator64, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(LOC)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Version, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Size, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.HorizPre, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.VertPre, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Latitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Longitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Altitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(LP)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Fqdn, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MB)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Mb, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MD)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Md, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MF)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Mf, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MG)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Mg, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MINFO)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Rmail, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Email, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MR)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Mr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(MX)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Mx, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NAPTR)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Order, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Flags, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Service, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Regexp, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Replacement, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NID)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.NodeID, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NIMLOC)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NINFO)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.ZSData, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NS)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Ns, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NSAPPTR)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Ptr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NSEC)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.NextDomain, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NSEC3)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Hash, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Flags, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Iterations, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.SaltLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
+ if err != nil {
+ return rr, off, err
+ }
+ rr.HashLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength))
+ if err != nil {
+ return rr, off, err
+ }
+ rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(NSEC3PARAM)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Hash, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Flags, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Iterations, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.SaltLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(OPENPGPKEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(OPT)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Option, off, err = unpackDataOpt(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(PTR)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Ptr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(PX)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Map822, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Mapx400, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(RFC3597)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(RKEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(RP)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Mbox, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Txt, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(RRSIG)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.TypeCovered, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Labels, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OrigTtl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.SignerName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(RT)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Host, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(SIG)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.TypeCovered, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Labels, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OrigTtl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.SignerName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(SOA)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Ns, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Mbox, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Serial, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Refresh, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Retry, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Expire, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Minttl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(SPF)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(SRV)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Priority, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Weight, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Port, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(SSHFP)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Type, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TA)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TALINK)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.PreviousName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.NextName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TKEY)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Mode, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Error, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.KeySize, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Key, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OtherLen, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OtherData, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TLSA)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Usage, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Selector, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.MatchingType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TSIG)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.TimeSigned, off, err = unpackUint48(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Fudge, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.MACSize, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize))
+ if err != nil {
+ return rr, off, err
+ }
+ rr.OrigId, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Error, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OtherLen, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(TXT)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(UID)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Uid, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(UINFO)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Uinfo, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(URI)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.Priority, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Weight, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ if off == len(msg) {
+ return rr, off, nil
+ }
+ rr.Target, off, err = unpackStringOctet(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) {
+ rr := new(X25)
+ rr.Hdr = h
+ if noRdata(h) {
+ return rr, off, nil
+ }
+ var err error
+ rdStart := off
+ _ = rdStart
+
+ rr.PSDNAddress, off, err = unpackString(msg, off)
+ if err != nil {
+ return rr, off, err
+ }
+ return rr, off, err
+}
+
+var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){
+ TypeA: unpackA,
+ TypeAAAA: unpackAAAA,
+ TypeAFSDB: unpackAFSDB,
+ TypeANY: unpackANY,
+ TypeCAA: unpackCAA,
+ TypeCDNSKEY: unpackCDNSKEY,
+ TypeCDS: unpackCDS,
+ TypeCERT: unpackCERT,
+ TypeCNAME: unpackCNAME,
+ TypeDHCID: unpackDHCID,
+ TypeDLV: unpackDLV,
+ TypeDNAME: unpackDNAME,
+ TypeDNSKEY: unpackDNSKEY,
+ TypeDS: unpackDS,
+ TypeEID: unpackEID,
+ TypeEUI48: unpackEUI48,
+ TypeEUI64: unpackEUI64,
+ TypeGID: unpackGID,
+ TypeGPOS: unpackGPOS,
+ TypeHINFO: unpackHINFO,
+ TypeHIP: unpackHIP,
+ TypeKEY: unpackKEY,
+ TypeKX: unpackKX,
+ TypeL32: unpackL32,
+ TypeL64: unpackL64,
+ TypeLOC: unpackLOC,
+ TypeLP: unpackLP,
+ TypeMB: unpackMB,
+ TypeMD: unpackMD,
+ TypeMF: unpackMF,
+ TypeMG: unpackMG,
+ TypeMINFO: unpackMINFO,
+ TypeMR: unpackMR,
+ TypeMX: unpackMX,
+ TypeNAPTR: unpackNAPTR,
+ TypeNID: unpackNID,
+ TypeNIMLOC: unpackNIMLOC,
+ TypeNINFO: unpackNINFO,
+ TypeNS: unpackNS,
+ TypeNSAPPTR: unpackNSAPPTR,
+ TypeNSEC: unpackNSEC,
+ TypeNSEC3: unpackNSEC3,
+ TypeNSEC3PARAM: unpackNSEC3PARAM,
+ TypeOPENPGPKEY: unpackOPENPGPKEY,
+ TypeOPT: unpackOPT,
+ TypePTR: unpackPTR,
+ TypePX: unpackPX,
+ TypeRKEY: unpackRKEY,
+ TypeRP: unpackRP,
+ TypeRRSIG: unpackRRSIG,
+ TypeRT: unpackRT,
+ TypeSIG: unpackSIG,
+ TypeSOA: unpackSOA,
+ TypeSPF: unpackSPF,
+ TypeSRV: unpackSRV,
+ TypeSSHFP: unpackSSHFP,
+ TypeTA: unpackTA,
+ TypeTALINK: unpackTALINK,
+ TypeTKEY: unpackTKEY,
+ TypeTLSA: unpackTLSA,
+ TypeTSIG: unpackTSIG,
+ TypeTXT: unpackTXT,
+ TypeUID: unpackUID,
+ TypeUINFO: unpackUINFO,
+ TypeURI: unpackURI,
+ TypeX25: unpackX25,
+}
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
new file mode 100644
index 000000000..a4ecbb0cc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -0,0 +1,828 @@
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from type_generate.go
+
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+)
+
+// TypeToRR is a map of constructors for each RR type.
+var TypeToRR = map[uint16]func() RR{
+ TypeA: func() RR { return new(A) },
+ TypeAAAA: func() RR { return new(AAAA) },
+ TypeAFSDB: func() RR { return new(AFSDB) },
+ TypeANY: func() RR { return new(ANY) },
+ TypeCAA: func() RR { return new(CAA) },
+ TypeCDNSKEY: func() RR { return new(CDNSKEY) },
+ TypeCDS: func() RR { return new(CDS) },
+ TypeCERT: func() RR { return new(CERT) },
+ TypeCNAME: func() RR { return new(CNAME) },
+ TypeDHCID: func() RR { return new(DHCID) },
+ TypeDLV: func() RR { return new(DLV) },
+ TypeDNAME: func() RR { return new(DNAME) },
+ TypeDNSKEY: func() RR { return new(DNSKEY) },
+ TypeDS: func() RR { return new(DS) },
+ TypeEID: func() RR { return new(EID) },
+ TypeEUI48: func() RR { return new(EUI48) },
+ TypeEUI64: func() RR { return new(EUI64) },
+ TypeGID: func() RR { return new(GID) },
+ TypeGPOS: func() RR { return new(GPOS) },
+ TypeHINFO: func() RR { return new(HINFO) },
+ TypeHIP: func() RR { return new(HIP) },
+ TypeKEY: func() RR { return new(KEY) },
+ TypeKX: func() RR { return new(KX) },
+ TypeL32: func() RR { return new(L32) },
+ TypeL64: func() RR { return new(L64) },
+ TypeLOC: func() RR { return new(LOC) },
+ TypeLP: func() RR { return new(LP) },
+ TypeMB: func() RR { return new(MB) },
+ TypeMD: func() RR { return new(MD) },
+ TypeMF: func() RR { return new(MF) },
+ TypeMG: func() RR { return new(MG) },
+ TypeMINFO: func() RR { return new(MINFO) },
+ TypeMR: func() RR { return new(MR) },
+ TypeMX: func() RR { return new(MX) },
+ TypeNAPTR: func() RR { return new(NAPTR) },
+ TypeNID: func() RR { return new(NID) },
+ TypeNIMLOC: func() RR { return new(NIMLOC) },
+ TypeNINFO: func() RR { return new(NINFO) },
+ TypeNS: func() RR { return new(NS) },
+ TypeNSAPPTR: func() RR { return new(NSAPPTR) },
+ TypeNSEC: func() RR { return new(NSEC) },
+ TypeNSEC3: func() RR { return new(NSEC3) },
+ TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
+ TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
+ TypeOPT: func() RR { return new(OPT) },
+ TypePTR: func() RR { return new(PTR) },
+ TypePX: func() RR { return new(PX) },
+ TypeRKEY: func() RR { return new(RKEY) },
+ TypeRP: func() RR { return new(RP) },
+ TypeRRSIG: func() RR { return new(RRSIG) },
+ TypeRT: func() RR { return new(RT) },
+ TypeSIG: func() RR { return new(SIG) },
+ TypeSOA: func() RR { return new(SOA) },
+ TypeSPF: func() RR { return new(SPF) },
+ TypeSRV: func() RR { return new(SRV) },
+ TypeSSHFP: func() RR { return new(SSHFP) },
+ TypeTA: func() RR { return new(TA) },
+ TypeTALINK: func() RR { return new(TALINK) },
+ TypeTKEY: func() RR { return new(TKEY) },
+ TypeTLSA: func() RR { return new(TLSA) },
+ TypeTSIG: func() RR { return new(TSIG) },
+ TypeTXT: func() RR { return new(TXT) },
+ TypeUID: func() RR { return new(UID) },
+ TypeUINFO: func() RR { return new(UINFO) },
+ TypeURI: func() RR { return new(URI) },
+ TypeX25: func() RR { return new(X25) },
+}
+
+// TypeToString is a map of strings for each RR type.
+var TypeToString = map[uint16]string{
+ TypeA: "A",
+ TypeAAAA: "AAAA",
+ TypeAFSDB: "AFSDB",
+ TypeANY: "ANY",
+ TypeATMA: "ATMA",
+ TypeAXFR: "AXFR",
+ TypeCAA: "CAA",
+ TypeCDNSKEY: "CDNSKEY",
+ TypeCDS: "CDS",
+ TypeCERT: "CERT",
+ TypeCNAME: "CNAME",
+ TypeDHCID: "DHCID",
+ TypeDLV: "DLV",
+ TypeDNAME: "DNAME",
+ TypeDNSKEY: "DNSKEY",
+ TypeDS: "DS",
+ TypeEID: "EID",
+ TypeEUI48: "EUI48",
+ TypeEUI64: "EUI64",
+ TypeGID: "GID",
+ TypeGPOS: "GPOS",
+ TypeHINFO: "HINFO",
+ TypeHIP: "HIP",
+ TypeISDN: "ISDN",
+ TypeIXFR: "IXFR",
+ TypeKEY: "KEY",
+ TypeKX: "KX",
+ TypeL32: "L32",
+ TypeL64: "L64",
+ TypeLOC: "LOC",
+ TypeLP: "LP",
+ TypeMAILA: "MAILA",
+ TypeMAILB: "MAILB",
+ TypeMB: "MB",
+ TypeMD: "MD",
+ TypeMF: "MF",
+ TypeMG: "MG",
+ TypeMINFO: "MINFO",
+ TypeMR: "MR",
+ TypeMX: "MX",
+ TypeNAPTR: "NAPTR",
+ TypeNID: "NID",
+ TypeNIMLOC: "NIMLOC",
+ TypeNINFO: "NINFO",
+ TypeNS: "NS",
+ TypeNSEC: "NSEC",
+ TypeNSEC3: "NSEC3",
+ TypeNSEC3PARAM: "NSEC3PARAM",
+ TypeNULL: "NULL",
+ TypeNXT: "NXT",
+ TypeNone: "None",
+ TypeOPENPGPKEY: "OPENPGPKEY",
+ TypeOPT: "OPT",
+ TypePTR: "PTR",
+ TypePX: "PX",
+ TypeRKEY: "RKEY",
+ TypeRP: "RP",
+ TypeRRSIG: "RRSIG",
+ TypeRT: "RT",
+ TypeReserved: "Reserved",
+ TypeSIG: "SIG",
+ TypeSOA: "SOA",
+ TypeSPF: "SPF",
+ TypeSRV: "SRV",
+ TypeSSHFP: "SSHFP",
+ TypeTA: "TA",
+ TypeTALINK: "TALINK",
+ TypeTKEY: "TKEY",
+ TypeTLSA: "TLSA",
+ TypeTSIG: "TSIG",
+ TypeTXT: "TXT",
+ TypeUID: "UID",
+ TypeUINFO: "UINFO",
+ TypeUNSPEC: "UNSPEC",
+ TypeURI: "URI",
+ TypeX25: "X25",
+ TypeNSAPPTR: "NSAP-PTR",
+}
+
+// Header() functions
+func (rr *A) Header() *RR_Header { return &rr.Hdr }
+func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
+func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
+func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
+func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
+func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
+func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *DS) Header() *RR_Header { return &rr.Hdr }
+func (rr *EID) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
+func (rr *GID) Header() *RR_Header { return &rr.Hdr }
+func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
+func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
+func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *KX) Header() *RR_Header { return &rr.Hdr }
+func (rr *L32) Header() *RR_Header { return &rr.Hdr }
+func (rr *L64) Header() *RR_Header { return &rr.Hdr }
+func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *LP) Header() *RR_Header { return &rr.Hdr }
+func (rr *MB) Header() *RR_Header { return &rr.Hdr }
+func (rr *MD) Header() *RR_Header { return &rr.Hdr }
+func (rr *MF) Header() *RR_Header { return &rr.Hdr }
+func (rr *MG) Header() *RR_Header { return &rr.Hdr }
+func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *MR) Header() *RR_Header { return &rr.Hdr }
+func (rr *MX) Header() *RR_Header { return &rr.Hdr }
+func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NID) Header() *RR_Header { return &rr.Hdr }
+func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *NS) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
+func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
+func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *PX) Header() *RR_Header { return &rr.Hdr }
+func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
+func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *RP) Header() *RR_Header { return &rr.Hdr }
+func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *RT) Header() *RR_Header { return &rr.Hdr }
+func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
+func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
+func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
+func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
+func (rr *TA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
+func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
+func (rr *UID) Header() *RR_Header { return &rr.Hdr }
+func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *URI) Header() *RR_Header { return &rr.Hdr }
+func (rr *X25) Header() *RR_Header { return &rr.Hdr }
+
+// len() functions
+func (rr *A) len() int {
+ l := rr.Hdr.len()
+ l += net.IPv4len // A
+ return l
+}
+func (rr *AAAA) len() int {
+ l := rr.Hdr.len()
+ l += net.IPv6len // AAAA
+ return l
+}
+func (rr *AFSDB) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Subtype
+ l += len(rr.Hostname) + 1
+ return l
+}
+func (rr *ANY) len() int {
+ l := rr.Hdr.len()
+ return l
+}
+func (rr *CAA) len() int {
+ l := rr.Hdr.len()
+ l += 1 // Flag
+ l += len(rr.Tag) + 1
+ l += len(rr.Value)
+ return l
+}
+func (rr *CERT) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Type
+ l += 2 // KeyTag
+ l += 1 // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
+ return l
+}
+func (rr *CNAME) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Target) + 1
+ return l
+}
+func (rr *DHCID) len() int {
+ l := rr.Hdr.len()
+ l += base64.StdEncoding.DecodedLen(len(rr.Digest))
+ return l
+}
+func (rr *DNAME) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Target) + 1
+ return l
+}
+func (rr *DNSKEY) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Flags
+ l += 1 // Protocol
+ l += 1 // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *DS) len() int {
+ l := rr.Hdr.len()
+ l += 2 // KeyTag
+ l += 1 // Algorithm
+ l += 1 // DigestType
+ l += len(rr.Digest)/2 + 1
+ return l
+}
+func (rr *EID) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Endpoint)/2 + 1
+ return l
+}
+func (rr *EUI48) len() int {
+ l := rr.Hdr.len()
+ l += 6 // Address
+ return l
+}
+func (rr *EUI64) len() int {
+ l := rr.Hdr.len()
+ l += 8 // Address
+ return l
+}
+func (rr *GID) len() int {
+ l := rr.Hdr.len()
+ l += 4 // Gid
+ return l
+}
+func (rr *GPOS) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Longitude) + 1
+ l += len(rr.Latitude) + 1
+ l += len(rr.Altitude) + 1
+ return l
+}
+func (rr *HINFO) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Cpu) + 1
+ l += len(rr.Os) + 1
+ return l
+}
+func (rr *HIP) len() int {
+ l := rr.Hdr.len()
+ l += 1 // HitLength
+ l += 1 // PublicKeyAlgorithm
+ l += 2 // PublicKeyLength
+ l += len(rr.Hit)/2 + 1
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ for _, x := range rr.RendezvousServers {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *KX) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += len(rr.Exchanger) + 1
+ return l
+}
+func (rr *L32) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += net.IPv4len // Locator32
+ return l
+}
+func (rr *L64) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += 8 // Locator64
+ return l
+}
+func (rr *LOC) len() int {
+ l := rr.Hdr.len()
+ l += 1 // Version
+ l += 1 // Size
+ l += 1 // HorizPre
+ l += 1 // VertPre
+ l += 4 // Latitude
+ l += 4 // Longitude
+ l += 4 // Altitude
+ return l
+}
+func (rr *LP) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += len(rr.Fqdn) + 1
+ return l
+}
+func (rr *MB) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Mb) + 1
+ return l
+}
+func (rr *MD) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Md) + 1
+ return l
+}
+func (rr *MF) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Mf) + 1
+ return l
+}
+func (rr *MG) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Mg) + 1
+ return l
+}
+func (rr *MINFO) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Rmail) + 1
+ l += len(rr.Email) + 1
+ return l
+}
+func (rr *MR) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Mr) + 1
+ return l
+}
+func (rr *MX) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += len(rr.Mx) + 1
+ return l
+}
+func (rr *NAPTR) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Order
+ l += 2 // Preference
+ l += len(rr.Flags) + 1
+ l += len(rr.Service) + 1
+ l += len(rr.Regexp) + 1
+ l += len(rr.Replacement) + 1
+ return l
+}
+func (rr *NID) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += 8 // NodeID
+ return l
+}
+func (rr *NIMLOC) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Locator)/2 + 1
+ return l
+}
+func (rr *NINFO) len() int {
+ l := rr.Hdr.len()
+ for _, x := range rr.ZSData {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *NS) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Ns) + 1
+ return l
+}
+func (rr *NSAPPTR) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Ptr) + 1
+ return l
+}
+func (rr *NSEC3PARAM) len() int {
+ l := rr.Hdr.len()
+ l += 1 // Hash
+ l += 1 // Flags
+ l += 2 // Iterations
+ l += 1 // SaltLength
+ l += len(rr.Salt)/2 + 1
+ return l
+}
+func (rr *OPENPGPKEY) len() int {
+ l := rr.Hdr.len()
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *PTR) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Ptr) + 1
+ return l
+}
+func (rr *PX) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += len(rr.Map822) + 1
+ l += len(rr.Mapx400) + 1
+ return l
+}
+func (rr *RFC3597) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Rdata)/2 + 1
+ return l
+}
+func (rr *RKEY) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Flags
+ l += 1 // Protocol
+ l += 1 // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *RP) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Mbox) + 1
+ l += len(rr.Txt) + 1
+ return l
+}
+func (rr *RRSIG) len() int {
+ l := rr.Hdr.len()
+ l += 2 // TypeCovered
+ l += 1 // Algorithm
+ l += 1 // Labels
+ l += 4 // OrigTtl
+ l += 4 // Expiration
+ l += 4 // Inception
+ l += 2 // KeyTag
+ l += len(rr.SignerName) + 1
+ l += base64.StdEncoding.DecodedLen(len(rr.Signature))
+ return l
+}
+func (rr *RT) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Preference
+ l += len(rr.Host) + 1
+ return l
+}
+func (rr *SOA) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Ns) + 1
+ l += len(rr.Mbox) + 1
+ l += 4 // Serial
+ l += 4 // Refresh
+ l += 4 // Retry
+ l += 4 // Expire
+ l += 4 // Minttl
+ return l
+}
+func (rr *SPF) len() int {
+ l := rr.Hdr.len()
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *SRV) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Priority
+ l += 2 // Weight
+ l += 2 // Port
+ l += len(rr.Target) + 1
+ return l
+}
+func (rr *SSHFP) len() int {
+ l := rr.Hdr.len()
+ l += 1 // Algorithm
+ l += 1 // Type
+ l += len(rr.FingerPrint)/2 + 1
+ return l
+}
+func (rr *TA) len() int {
+ l := rr.Hdr.len()
+ l += 2 // KeyTag
+ l += 1 // Algorithm
+ l += 1 // DigestType
+ l += len(rr.Digest)/2 + 1
+ return l
+}
+func (rr *TALINK) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.PreviousName) + 1
+ l += len(rr.NextName) + 1
+ return l
+}
+func (rr *TKEY) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Algorithm) + 1
+ l += 4 // Inception
+ l += 4 // Expiration
+ l += 2 // Mode
+ l += 2 // Error
+ l += 2 // KeySize
+ l += len(rr.Key) + 1
+ l += 2 // OtherLen
+ l += len(rr.OtherData) + 1
+ return l
+}
+func (rr *TLSA) len() int {
+ l := rr.Hdr.len()
+ l += 1 // Usage
+ l += 1 // Selector
+ l += 1 // MatchingType
+ l += len(rr.Certificate)/2 + 1
+ return l
+}
+func (rr *TSIG) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Algorithm) + 1
+ l += 6 // TimeSigned
+ l += 2 // Fudge
+ l += 2 // MACSize
+ l += len(rr.MAC)/2 + 1
+ l += 2 // OrigId
+ l += 2 // Error
+ l += 2 // OtherLen
+ l += len(rr.OtherData)/2 + 1
+ return l
+}
+func (rr *TXT) len() int {
+ l := rr.Hdr.len()
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *UID) len() int {
+ l := rr.Hdr.len()
+ l += 4 // Uid
+ return l
+}
+func (rr *UINFO) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.Uinfo) + 1
+ return l
+}
+func (rr *URI) len() int {
+ l := rr.Hdr.len()
+ l += 2 // Priority
+ l += 2 // Weight
+ l += len(rr.Target)
+ return l
+}
+func (rr *X25) len() int {
+ l := rr.Hdr.len()
+ l += len(rr.PSDNAddress) + 1
+ return l
+}
+
+// copy() functions
+func (rr *A) copy() RR {
+ return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)}
+}
+func (rr *AAAA) copy() RR {
+ return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)}
+}
+func (rr *AFSDB) copy() RR {
+ return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname}
+}
+func (rr *ANY) copy() RR {
+ return &ANY{*rr.Hdr.copyHeader()}
+}
+func (rr *CAA) copy() RR {
+ return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value}
+}
+func (rr *CERT) copy() RR {
+ return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
+}
+func (rr *CNAME) copy() RR {
+ return &CNAME{*rr.Hdr.copyHeader(), rr.Target}
+}
+func (rr *DHCID) copy() RR {
+ return &DHCID{*rr.Hdr.copyHeader(), rr.Digest}
+}
+func (rr *DNAME) copy() RR {
+ return &DNAME{*rr.Hdr.copyHeader(), rr.Target}
+}
+func (rr *DNSKEY) copy() RR {
+ return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+func (rr *DS) copy() RR {
+ return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+func (rr *EID) copy() RR {
+ return &EID{*rr.Hdr.copyHeader(), rr.Endpoint}
+}
+func (rr *EUI48) copy() RR {
+ return &EUI48{*rr.Hdr.copyHeader(), rr.Address}
+}
+func (rr *EUI64) copy() RR {
+ return &EUI64{*rr.Hdr.copyHeader(), rr.Address}
+}
+func (rr *GID) copy() RR {
+ return &GID{*rr.Hdr.copyHeader(), rr.Gid}
+}
+func (rr *GPOS) copy() RR {
+ return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude}
+}
+func (rr *HINFO) copy() RR {
+ return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os}
+}
+func (rr *HIP) copy() RR {
+ RendezvousServers := make([]string, len(rr.RendezvousServers))
+ copy(RendezvousServers, rr.RendezvousServers)
+ return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
+}
+func (rr *KX) copy() RR {
+ return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger}
+}
+func (rr *L32) copy() RR {
+ return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)}
+}
+func (rr *L64) copy() RR {
+ return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64}
+}
+func (rr *LOC) copy() RR {
+ return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
+}
+func (rr *LP) copy() RR {
+ return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn}
+}
+func (rr *MB) copy() RR {
+ return &MB{*rr.Hdr.copyHeader(), rr.Mb}
+}
+func (rr *MD) copy() RR {
+ return &MD{*rr.Hdr.copyHeader(), rr.Md}
+}
+func (rr *MF) copy() RR {
+ return &MF{*rr.Hdr.copyHeader(), rr.Mf}
+}
+func (rr *MG) copy() RR {
+ return &MG{*rr.Hdr.copyHeader(), rr.Mg}
+}
+func (rr *MINFO) copy() RR {
+ return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email}
+}
+func (rr *MR) copy() RR {
+ return &MR{*rr.Hdr.copyHeader(), rr.Mr}
+}
+func (rr *MX) copy() RR {
+ return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx}
+}
+func (rr *NAPTR) copy() RR {
+ return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
+}
+func (rr *NID) copy() RR {
+ return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID}
+}
+func (rr *NIMLOC) copy() RR {
+ return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator}
+}
+func (rr *NINFO) copy() RR {
+ ZSData := make([]string, len(rr.ZSData))
+ copy(ZSData, rr.ZSData)
+ return &NINFO{*rr.Hdr.copyHeader(), ZSData}
+}
+func (rr *NS) copy() RR {
+ return &NS{*rr.Hdr.copyHeader(), rr.Ns}
+}
+func (rr *NSAPPTR) copy() RR {
+ return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr}
+}
+func (rr *NSEC) copy() RR {
+ TypeBitMap := make([]uint16, len(rr.TypeBitMap))
+ copy(TypeBitMap, rr.TypeBitMap)
+ return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap}
+}
+func (rr *NSEC3) copy() RR {
+ TypeBitMap := make([]uint16, len(rr.TypeBitMap))
+ copy(TypeBitMap, rr.TypeBitMap)
+ return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
+}
+func (rr *NSEC3PARAM) copy() RR {
+ return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
+}
+func (rr *OPENPGPKEY) copy() RR {
+ return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey}
+}
+func (rr *OPT) copy() RR {
+ Option := make([]EDNS0, len(rr.Option))
+ copy(Option, rr.Option)
+ return &OPT{*rr.Hdr.copyHeader(), Option}
+}
+func (rr *PTR) copy() RR {
+ return &PTR{*rr.Hdr.copyHeader(), rr.Ptr}
+}
+func (rr *PX) copy() RR {
+ return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400}
+}
+func (rr *RFC3597) copy() RR {
+ return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata}
+}
+func (rr *RKEY) copy() RR {
+ return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+func (rr *RP) copy() RR {
+ return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt}
+}
+func (rr *RRSIG) copy() RR {
+ return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
+}
+func (rr *RT) copy() RR {
+ return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host}
+}
+func (rr *SOA) copy() RR {
+ return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
+}
+func (rr *SPF) copy() RR {
+ Txt := make([]string, len(rr.Txt))
+ copy(Txt, rr.Txt)
+ return &SPF{*rr.Hdr.copyHeader(), Txt}
+}
+func (rr *SRV) copy() RR {
+ return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
+}
+func (rr *SSHFP) copy() RR {
+ return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
+}
+func (rr *TA) copy() RR {
+ return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+func (rr *TALINK) copy() RR {
+ return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName}
+}
+func (rr *TKEY) copy() RR {
+ return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
+}
+func (rr *TLSA) copy() RR {
+ return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+}
+func (rr *TSIG) copy() RR {
+ return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
+}
+func (rr *TXT) copy() RR {
+ Txt := make([]string, len(rr.Txt))
+ copy(Txt, rr.Txt)
+ return &TXT{*rr.Hdr.copyHeader(), Txt}
+}
+func (rr *UID) copy() RR {
+ return &UID{*rr.Hdr.copyHeader(), rr.Uid}
+}
+func (rr *UINFO) copy() RR {
+ return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo}
+}
+func (rr *URI) copy() RR {
+ return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target}
+}
+func (rr *X25) copy() RR {
+ return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress}
+}
diff --git a/vendor/github.com/rsc/letsencrypt/LICENSE b/vendor/github.com/rsc/letsencrypt/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/rsc/letsencrypt/README b/vendor/github.com/rsc/letsencrypt/README
new file mode 100644
index 000000000..98a875f37
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/README
@@ -0,0 +1,152 @@
+package letsencrypt // import "rsc.io/letsencrypt"
+
+Package letsencrypt obtains TLS certificates from LetsEncrypt.org.
+
+LetsEncrypt.org is a service that issues free SSL/TLS certificates to
+servers that can prove control over the given domain's DNS records or the
+servers pointed at by those records.
+
+
+Quick Start
+
+A complete HTTP/HTTPS web server using TLS certificates from
+LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS
+certificates in a file letsencrypt.cache across server restarts.
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+ "net/http"
+ "rsc.io/letsencrypt"
+ )
+
+ func main() {
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, TLS!\n")
+ })
+ var m letsencrypt.Manager
+ if err := m.CacheFile("letsencrypt.cache"); err != nil {
+ log.Fatal(err)
+ }
+ log.Fatal(m.Serve())
+ }
+
+
+Overview
+
+The fundamental type in this package is the Manager, which manages obtaining
+and refreshing a collection of TLS certificates, typically for use by an
+HTTPS server. The example above shows the most basic use of a Manager. The
+use can be customized by calling additional methods of the Manager.
+
+
+Registration
+
+A Manager m registers anonymously with LetsEncrypt.org, including agreeing
+to the letsencrypt.org terms of service, the first time it needs to obtain a
+certificate. To register with a particular email address and with the option
+of a prompt for agreement with the terms of service, call m.Register.
+
+
+GetCertificate
+
+The Manager's GetCertificate method returns certificates from the Manager's
+cache, filling the cache by requesting certificates from LetsEncrypt.org. In
+this way, a server with a tls.Config.GetCertificate set to m.GetCertificate
+will demand load a certificate for any host name it serves. To force loading
+of certificates ahead of time, install m.GetCertificate as before but then
+call m.Cert for each host name.
+
+A Manager can only obtain a certificate for a given host name if it can
+prove control of that host name to LetsEncrypt.org. By default it proves
+control by answering an HTTPS-based challenge: when the LetsEncrypt.org
+servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake
+must use m.GetCertificate to obtain a per-host certificate. The most common
+way to satisfy this requirement is for the host name to resolve to the IP
+address of a (single) computer running m.ServeHTTPS, or at least running a
+Go TLS server with tls.Config.GetCertificate set to m.GetCertificate.
+However, other configurations are possible. For example, a group of machines
+could use an implementation of tls.Config.GetCertificate that cached
+certificates but handled cache misses by making RPCs to a Manager m on an
+elected leader machine.
+
+In typical usage, then, the setting of tls.Config.GetCertificate to
+m.GetCertificate serves two purposes: it provides certificates to the TLS
+server for ordinary serving, and it also answers challenges to prove
+ownership of the domains in order to obtain those certificates.
+
+To force the loading of a certificate for a given host into the Manager's
+cache, use m.Cert.
+
+
+Persistent Storage
+
+If a server always starts with a zero Manager m, the server effectively
+fetches a new certificate for each of its host name from LetsEncrypt.org on
+each restart. This is unfortunate both because the server cannot start if
+LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often
+it will issue a certificate for a given host name (at time of writing, the
+limit is 5 per week for a given host name). To save server state proactively
+to a cache file and to reload the server state from that same file when
+creating a new manager, call m.CacheFile with the name of the file to use.
+
+For alternate storage uses, m.Marshal returns the current state of the
+Manager as an opaque string, m.Unmarshal sets the state of the Manager using
+a string previously returned by m.Marshal (usually a different m), and
+m.Watch returns a channel that receives notifications about state changes.
+
+
+Limits
+
+To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager
+limits all its interactions to at most one request every minute, with an
+initial allowed burst of 20 requests.
+
+By default, if GetCertificate is asked for a certificate it does not have,
+it will in turn ask LetsEncrypt.org for that certificate. This opens a
+potential attack where attackers connect to a server by IP address and
+pretend to be asking for an incorrect host name. Then GetCertificate will
+attempt to obtain a certificate for that host, incorrectly, eventually
+hitting LetsEncrypt.org's rate limit for certificate requests and making it
+impossible to obtain actual certificates. Because servers hold certificates
+for months at a time, however, an attack would need to be sustained over a
+time period of at least a month in order to cause real problems.
+
+To mitigate this kind of attack, a given Manager limits itself to an average
+of one certificate request for a new host every three hours, with an initial
+allowed burst of up to 20 requests. Long-running servers will therefore stay
+within the LetsEncrypt.org limit of 300 failed requests per month.
+Certificate refreshes are not subject to this limit.
+
+To eliminate the attack entirely, call m.SetHosts to enumerate the exact set
+of hosts that are allowed in certificate requests.
+
+
+Web Servers
+
+The basic requirement for use of a Manager is that there be an HTTPS server
+running on port 443 and calling m.GetCertificate to obtain TLS certificates.
+Using standard primitives, the way to do this is:
+
+ srv := &http.Server{
+ Addr: ":https",
+ TLSConfig: &tls.Config{
+ GetCertificate: m.GetCertificate,
+ },
+ }
+ srv.ListenAndServeTLS("", "")
+
+However, this pattern of serving HTTPS with demand-loaded TLS certificates
+comes up enough to wrap into a single method m.ServeHTTPS.
+
+Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS
+URLs. That functionality is provided by RedirectHTTP.
+
+The combination of serving HTTPS with demand-loaded TLS certificates and
+serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in
+the original example above.
+
+func RedirectHTTP(w http.ResponseWriter, r *http.Request)
+type Manager struct { ... }
diff --git a/vendor/github.com/rsc/letsencrypt/lets.go b/vendor/github.com/rsc/letsencrypt/lets.go
new file mode 100644
index 000000000..c0168b56a
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/lets.go
@@ -0,0 +1,757 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package letsencrypt obtains TLS certificates from LetsEncrypt.org.
+//
+// LetsEncrypt.org is a service that issues free SSL/TLS certificates to servers
+// that can prove control over the given domain's DNS records or
+// the servers pointed at by those records.
+//
+// Quick Start
+//
+// A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org,
+// redirecting all HTTP access to HTTPS, and maintaining TLS certificates in a file
+// letsencrypt.cache across server restarts.
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+// "net/http"
+// "rsc.io/letsencrypt"
+// )
+//
+// func main() {
+// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// fmt.Fprintf(w, "Hello, TLS!\n")
+// })
+// var m letsencrypt.Manager
+// if err := m.CacheFile("letsencrypt.cache"); err != nil {
+// log.Fatal(err)
+// }
+// log.Fatal(m.Serve())
+// }
+//
+// Overview
+//
+// The fundamental type in this package is the Manager, which
+// manages obtaining and refreshing a collection of TLS certificates,
+// typically for use by an HTTPS server.
+// The example above shows the most basic use of a Manager.
+// The use can be customized by calling additional methods of the Manager.
+//
+// Registration
+//
+// A Manager m registers anonymously with LetsEncrypt.org, including agreeing to
+// the letsencrypt.org terms of service, the first time it needs to obtain a certificate.
+// To register with a particular email address and with the option of a
+// prompt for agreement with the terms of service, call m.Register.
+//
+// GetCertificate
+//
+// The Manager's GetCertificate method returns certificates
+// from the Manager's cache, filling the cache by requesting certificates
+// from LetsEncrypt.org. In this way, a server with a tls.Config.GetCertificate
+// set to m.GetCertificate will demand load a certificate for any host name
+// it serves. To force loading of certificates ahead of time, install m.GetCertificate
+// as before but then call m.Cert for each host name.
+//
+// A Manager can only obtain a certificate for a given host name if it can prove
+// control of that host name to LetsEncrypt.org. By default it proves control by
+// answering an HTTPS-based challenge: when
+// the LetsEncrypt.org servers connect to the named host on port 443 (HTTPS),
+// the TLS SNI handshake must use m.GetCertificate to obtain a per-host certificate.
+// The most common way to satisfy this requirement is for the host name to
+// resolve to the IP address of a (single) computer running m.ServeHTTPS,
+// or at least running a Go TLS server with tls.Config.GetCertificate set to m.GetCertificate.
+// However, other configurations are possible. For example, a group of machines
+// could use an implementation of tls.Config.GetCertificate that cached
+// certificates but handled cache misses by making RPCs to a Manager m
+// on an elected leader machine.
+//
+// In typical usage, then, the setting of tls.Config.GetCertificate to m.GetCertificate
+// serves two purposes: it provides certificates to the TLS server for ordinary serving,
+// and it also answers challenges to prove ownership of the domains in order to
+// obtain those certificates.
+//
+// To force the loading of a certificate for a given host into the Manager's cache,
+// use m.Cert.
+//
+// Persistent Storage
+//
+// If a server always starts with a zero Manager m, the server effectively fetches
+// a new certificate for each of its host name from LetsEncrypt.org on each restart.
+// This is unfortunate both because the server cannot start if LetsEncrypt.org is
+// unavailable and because LetsEncrypt.org limits how often it will issue a certificate
+// for a given host name (at time of writing, the limit is 5 per week for a given host name).
+// To save server state proactively to a cache file and to reload the server state from
+// that same file when creating a new manager, call m.CacheFile with the name of
+// the file to use.
+//
+// For alternate storage uses, m.Marshal returns the current state of the Manager
+// as an opaque string, m.Unmarshal sets the state of the Manager using a string
+// previously returned by m.Marshal (usually a different m), and m.Watch returns
+// a channel that receives notifications about state changes.
+//
+// Limits
+//
+// To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager limits all its
+// interactions to at most one request every minute, with an initial allowed burst of
+// 20 requests.
+//
+// By default, if GetCertificate is asked for a certificate it does not have, it will in turn
+// ask LetsEncrypt.org for that certificate. This opens a potential attack where attackers
+// connect to a server by IP address and pretend to be asking for an incorrect host name.
+// Then GetCertificate will attempt to obtain a certificate for that host, incorrectly,
+// eventually hitting LetsEncrypt.org's rate limit for certificate requests and making it
+// impossible to obtain actual certificates. Because servers hold certificates for months
+// at a time, however, an attack would need to be sustained over a time period
+// of at least a month in order to cause real problems.
+//
+// To mitigate this kind of attack, a given Manager limits
+// itself to an average of one certificate request for a new host every three hours,
+// with an initial allowed burst of up to 20 requests.
+// Long-running servers will therefore stay
+// within the LetsEncrypt.org limit of 300 failed requests per month.
+// Certificate refreshes are not subject to this limit.
+//
+// To eliminate the attack entirely, call m.SetHosts to enumerate the exact set
+// of hosts that are allowed in certificate requests.
+//
+// Web Servers
+//
+// The basic requirement for use of a Manager is that there be an HTTPS server
+// running on port 443 and calling m.GetCertificate to obtain TLS certificates.
+// Using standard primitives, the way to do this is:
+//
+// srv := &http.Server{
+// Addr: ":https",
+// TLSConfig: &tls.Config{
+// GetCertificate: m.GetCertificate,
+// },
+// }
+// srv.ListenAndServeTLS("", "")
+//
+// However, this pattern of serving HTTPS with demand-loaded TLS certificates
+// comes up enough to wrap into a single method m.ServeHTTPS.
+//
+// Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS URLs.
+// That functionality is provided by RedirectHTTP.
+//
+// The combination of serving HTTPS with demand-loaded TLS certificates and
+// serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in
+// the original example above.
+//
+package letsencrypt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/time/rate"
+
+ "github.com/xenolf/lego/acme"
+)
+
+const letsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
+const debug = false
+
+// A Manager m takes care of obtaining and refreshing a collection of TLS certificates
+// obtained by LetsEncrypt.org.
+// The zero Manager is not yet registered with LetsEncrypt.org and has no TLS certificates
+// but is nonetheless ready for use.
+// See the package comment for an overview of how to use a Manager.
+type Manager struct {
+ mu sync.Mutex
+ state state
+ rateLimit *rate.Limiter
+ newHostLimit *rate.Limiter
+ certCache map[string]*cacheEntry
+ certTokens map[string]*tls.Certificate
+ watchChan chan struct{}
+}
+
+// Serve runs an HTTP/HTTPS web server using TLS certificates obtained by the manager.
+// The HTTP server redirects all requests to the HTTPS server.
+// The HTTPS server obtains TLS certificates as needed and responds to requests
+// by invoking http.DefaultServeMux.
+//
+// Serve does not return unitil the HTTPS server fails to start or else stops.
+// Either way, Serve can only return a non-nil error, never nil.
+func (m *Manager) Serve() error {
+ l, err := net.Listen("tcp", ":http")
+ if err != nil {
+ return err
+ }
+ defer l.Close()
+ go http.Serve(l, http.HandlerFunc(RedirectHTTP))
+
+ return m.ServeHTTPS()
+}
+
+// ServeHTTPS runs an HTTPS web server using TLS certificates obtained by the manager.
+// The HTTPS server obtains TLS certificates as needed and responds to requests
+// by invoking http.DefaultServeMux.
+// ServeHTTPS does not return unitil the HTTPS server fails to start or else stops.
+// Either way, ServeHTTPS can only return a non-nil error, never nil.
+func (m *Manager) ServeHTTPS() error {
+ srv := &http.Server{
+ Addr: ":https",
+ TLSConfig: &tls.Config{
+ GetCertificate: m.GetCertificate,
+ },
+ }
+ return srv.ListenAndServeTLS("", "")
+}
+
+// RedirectHTTP is an HTTP handler (suitable for use with http.HandleFunc)
+// that responds to all requests by redirecting to the same URL served over HTTPS.
+// It should only be invoked for requests received over HTTP.
+func RedirectHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.TLS != nil || r.Host == "" {
+ http.Error(w, "not found", 404)
+ }
+
+ u := r.URL
+ u.Host = r.Host
+ u.Scheme = "https"
+ http.Redirect(w, r, u.String(), 302)
+}
+
+// state is the serializable state for the Manager.
+// It also implements acme.User.
+type state struct {
+ Email string
+ Reg *acme.RegistrationResource
+ Key string
+ key *ecdsa.PrivateKey
+ Hosts []string
+ Certs map[string]stateCert
+}
+
+func (s *state) GetEmail() string { return s.Email }
+func (s *state) GetRegistration() *acme.RegistrationResource { return s.Reg }
+func (s *state) GetPrivateKey() crypto.PrivateKey { return s.key }
+
+type stateCert struct {
+ Cert string
+ Key string
+}
+
+func (cert stateCert) toTLS() (*tls.Certificate, error) {
+ c, err := tls.X509KeyPair([]byte(cert.Cert), []byte(cert.Key))
+ if err != nil {
+ return nil, err
+ }
+ return &c, err
+}
+
+type cacheEntry struct {
+ host string
+ m *Manager
+
+ mu sync.Mutex
+ cert *tls.Certificate
+ timeout time.Time
+ refreshing bool
+ err error
+}
+
+func (m *Manager) init() {
+ m.mu.Lock()
+ if m.certCache == nil {
+ m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
+ m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
+ m.certCache = map[string]*cacheEntry{}
+ m.certTokens = map[string]*tls.Certificate{}
+ m.watchChan = make(chan struct{}, 1)
+ m.watchChan <- struct{}{}
+ }
+ m.mu.Unlock()
+}
+
+// Watch returns the manager's watch channel,
+// which delivers a notification after every time the
+// manager's state (as exposed by Marshal and Unmarshal) changes.
+// All calls to Watch return the same watch channel.
+//
+// The watch channel includes notifications about changes
+// before the first call to Watch, so that in the pattern below,
+// the range loop executes once immediately, saving
+// the result of setup (along with any background updates that
+// may have raced in quickly).
+//
+// m := new(letsencrypt.Manager)
+// setup(m)
+// go backgroundUpdates(m)
+// for range m.Watch() {
+// save(m.Marshal())
+// }
+//
+func (m *Manager) Watch() <-chan struct{} {
+ m.init()
+ m.updated()
+ return m.watchChan
+}
+
+func (m *Manager) updated() {
+ select {
+ case m.watchChan <- struct{}{}:
+ default:
+ }
+}
+
+func (m *Manager) CacheFile(name string) error {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 {
+ if err := m.Unmarshal(string(data)); err != nil {
+ return err
+ }
+ }
+ go func() {
+ for range m.Watch() {
+ err := ioutil.WriteFile(name, []byte(m.Marshal()), 0600)
+ if err != nil {
+ log.Printf("writing letsencrypt cache: %v", err)
+ }
+ }
+ }()
+ return nil
+}
+
+// Registered reports whether the manager has registered with letsencrypt.org yet.
+func (m *Manager) Registered() bool {
+ m.init()
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.registered()
+}
+
+func (m *Manager) registered() bool {
+ return m.state.Reg != nil && m.state.Reg.Body.Agreement != ""
+}
+
+// Register registers the manager with letsencrypt.org, using the given email address.
+// Registration may require agreeing to the letsencrypt.org terms of service.
+// If so, Register calls prompt(url) where url is the URL of the terms of service.
+// Prompt should report whether the caller agrees to the terms.
+// A nil prompt func is taken to mean that the user always agrees.
+// The email address is sent to LetsEncrypt.org but otherwise unchecked;
+// it can be omitted by passing the empty string.
+//
+// Calling Register is only required to make sure registration uses a
+// particular email address or to insert an explicit prompt into the
+// registration sequence. If the manager is not registered, it will
+// automatically register with no email address and automatic
+// agreement to the terms of service at the first call to Cert or GetCertificate.
+func (m *Manager) Register(email string, prompt func(string) bool) error {
+ m.init()
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ return m.register(email, prompt)
+}
+
+func (m *Manager) register(email string, prompt func(string) bool) error {
+ if m.registered() {
+ return fmt.Errorf("already registered")
+ }
+ m.state.Email = email
+ if m.state.key == nil {
+ key, err := newKey()
+ if err != nil {
+ return fmt.Errorf("generating key: %v", err)
+ }
+ Key, err := marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("generating key: %v", err)
+ }
+ m.state.key = key
+ m.state.Key = string(Key)
+ }
+
+ c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256)
+ if err != nil {
+ return fmt.Errorf("create client: %v", err)
+ }
+ reg, err := c.Register()
+ if err != nil {
+ return fmt.Errorf("register: %v", err)
+ }
+
+ m.state.Reg = reg
+ if reg.Body.Agreement == "" {
+ if prompt != nil && !prompt(reg.TosURL) {
+ return fmt.Errorf("did not agree to TOS")
+ }
+ if err := c.AgreeToTOS(); err != nil {
+ return fmt.Errorf("agreeing to TOS: %v", err)
+ }
+ }
+
+ m.updated()
+
+ return nil
+}
+
+// Marshal returns an encoding of the manager's state,
+// suitable for writing to disk and reloading by calling Unmarshal.
+// The state includes registration status, the configured host list
+// from SetHosts, and all known certificates, including their private
+// cryptographic keys.
+// Consequently, the state should be kept private.
+func (m *Manager) Marshal() string {
+ m.init()
+ m.mu.Lock()
+ js, err := json.MarshalIndent(&m.state, "", "\t")
+ m.mu.Unlock()
+ if err != nil {
+ panic("unexpected json.Marshal failure")
+ }
+ return string(js)
+}
+
+// Unmarshal restores the state encoded by a previous call to Marshal
+// (perhaps on a different Manager in a different program).
+func (m *Manager) Unmarshal(enc string) error {
+ m.init()
+ var st state
+ if err := json.Unmarshal([]byte(enc), &st); err != nil {
+ return err
+ }
+ if st.Key != "" {
+ key, err := unmarshalKey(st.Key)
+ if err != nil {
+ return err
+ }
+ st.key = key
+ }
+ m.mu.Lock()
+ m.state = st
+ m.mu.Unlock()
+ for host, cert := range m.state.Certs {
+ c, err := cert.toTLS()
+ if err != nil {
+ log.Printf("letsencrypt: ignoring entry for %s: %v", host, err)
+ continue
+ }
+ m.certCache[host] = &cacheEntry{host: host, m: m, cert: c}
+ }
+ m.updated()
+ return nil
+}
+
+// SetHosts sets the manager's list of known host names.
+// If the list is non-nil, the manager will only ever attempt to acquire
+// certificates for host names on the list.
+// If the list is nil, the manager does not restrict the hosts it will
+// ask for certificates for.
+func (m *Manager) SetHosts(hosts []string) {
+ m.init()
+ m.mu.Lock()
+ m.state.Hosts = append(m.state.Hosts[:0], hosts...)
+ m.mu.Unlock()
+ m.updated()
+}
+
+// GetCertificate can be placed a tls.Config's GetCertificate field to make
+// the TLS server use Let's Encrypt certificates.
+// Each time a client connects to the TLS server expecting a new host name,
+// the TLS server's call to GetCertificate will trigger an exchange with the
+// Let's Encrypt servers to obtain that certificate, subject to the manager rate limits.
+//
+// As noted in the Manager's documentation comment,
+// to obtain a certificate for a given host name, that name
+// must resolve to a computer running a TLS server on port 443
+// that obtains TLS SNI certificates by calling m.GetCertificate.
+// In the standard usage, then, installing m.GetCertificate in the tls.Config
+// both automatically provisions the TLS certificates needed for
+// ordinary HTTPS service and answers the challenges from LetsEncrypt.org.
+func (m *Manager) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ m.init()
+
+ host := clientHello.ServerName
+
+ if debug {
+ log.Printf("GetCertificate %s", host)
+ }
+
+ if strings.HasSuffix(host, ".acme.invalid") {
+ m.mu.Lock()
+ cert := m.certTokens[host]
+ m.mu.Unlock()
+ if cert == nil {
+ return nil, fmt.Errorf("unknown host")
+ }
+ return cert, nil
+ }
+
+ return m.Cert(host)
+}
+
+// Cert returns the certificate for the given host name, obtaining a new one if necessary.
+//
+// As noted in the documentation for Manager and for the GetCertificate method,
+// obtaining a certificate requires that m.GetCertificate be associated with host.
+// In most servers, simply starting a TLS server with a configuration referring
+// to m.GetCertificate is sufficient, and Cert need not be called.
+//
+// The main use of Cert is to force the manager to obtain a certificate
+// for a particular host name ahead of time.
+func (m *Manager) Cert(host string) (*tls.Certificate, error) {
+ host = strings.ToLower(host)
+ if debug {
+ log.Printf("Cert %s", host)
+ }
+
+ m.init()
+ m.mu.Lock()
+ if !m.registered() {
+ m.register("", nil)
+ }
+
+ ok := false
+ if m.state.Hosts == nil {
+ ok = true
+ } else {
+ for _, h := range m.state.Hosts {
+ if host == h {
+ ok = true
+ break
+ }
+ }
+ }
+ if !ok {
+ m.mu.Unlock()
+ return nil, fmt.Errorf("unknown host")
+ }
+
+ // Otherwise look in our cert cache.
+ entry, ok := m.certCache[host]
+ if !ok {
+ r := m.rateLimit.Reserve()
+ ok := r.OK()
+ if ok {
+ ok = m.newHostLimit.Allow()
+ if !ok {
+ r.Cancel()
+ }
+ }
+ if !ok {
+ m.mu.Unlock()
+ return nil, fmt.Errorf("rate limited")
+ }
+ entry = &cacheEntry{host: host, m: m}
+ m.certCache[host] = entry
+ }
+ m.mu.Unlock()
+
+ entry.mu.Lock()
+ defer entry.mu.Unlock()
+ entry.init()
+ if entry.err != nil {
+ return nil, entry.err
+ }
+ return entry.cert, nil
+}
+
+func (e *cacheEntry) init() {
+ if e.err != nil && time.Now().Before(e.timeout) {
+ return
+ }
+ if e.cert != nil {
+ if e.timeout.IsZero() {
+ t, err := certRefreshTime(e.cert)
+ if err != nil {
+ e.err = err
+ e.timeout = time.Now().Add(1 * time.Minute)
+ e.cert = nil
+ return
+ }
+ e.timeout = t
+ }
+ if time.Now().After(e.timeout) && !e.refreshing {
+ e.refreshing = true
+ go e.refresh()
+ }
+ return
+ }
+
+ cert, refreshTime, err := e.m.verify(e.host)
+ e.m.mu.Lock()
+ e.m.certCache[e.host] = e
+ e.m.mu.Unlock()
+ e.install(cert, refreshTime, err)
+}
+
+func (e *cacheEntry) install(cert *tls.Certificate, refreshTime time.Time, err error) {
+ e.cert = nil
+ e.timeout = time.Time{}
+ e.err = nil
+
+ if err != nil {
+ e.err = err
+ e.timeout = time.Now().Add(1 * time.Minute)
+ return
+ }
+
+ e.cert = cert
+ e.timeout = refreshTime
+}
+
+func (e *cacheEntry) refresh() {
+ e.m.rateLimit.Wait(context.Background())
+ cert, refreshTime, err := e.m.verify(e.host)
+
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ e.refreshing = false
+ if err == nil {
+ e.install(cert, refreshTime, nil)
+ }
+}
+
+func (m *Manager) verify(host string) (cert *tls.Certificate, refreshTime time.Time, err error) {
+ c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256)
+ if err != nil {
+ return
+ }
+ if err = c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}); err != nil {
+ return
+ }
+ c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m})
+ c.ExcludeChallenges([]acme.Challenge{acme.HTTP01})
+ acmeCert, errmap := c.ObtainCertificate([]string{host}, true, nil)
+ if len(errmap) > 0 {
+ if debug {
+ log.Printf("ObtainCertificate %v => %v", host, errmap)
+ }
+ err = fmt.Errorf("%v", errmap)
+ return
+ }
+ entryCert := stateCert{
+ Cert: string(acmeCert.Certificate),
+ Key: string(acmeCert.PrivateKey),
+ }
+ cert, err = entryCert.toTLS()
+ if err != nil {
+ if debug {
+ log.Printf("ObtainCertificate %v toTLS failure: %v", host, err)
+ }
+ err = err
+ return
+ }
+ if refreshTime, err = certRefreshTime(cert); err != nil {
+ return
+ }
+
+ m.mu.Lock()
+ if m.state.Certs == nil {
+ m.state.Certs = make(map[string]stateCert)
+ }
+ m.state.Certs[host] = entryCert
+ m.mu.Unlock()
+ m.updated()
+
+ return cert, refreshTime, nil
+}
+
+func certRefreshTime(cert *tls.Certificate) (time.Time, error) {
+ xc, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ if debug {
+ log.Printf("ObtainCertificate to X.509 failure: %v", err)
+ }
+ return time.Time{}, err
+ }
+ t := xc.NotBefore.Add(xc.NotAfter.Sub(xc.NotBefore) / 2)
+ monthEarly := xc.NotAfter.Add(-30 * 24 * time.Hour)
+ if t.Before(monthEarly) {
+ t = monthEarly
+ }
+ return t, nil
+}
+
+// tlsProvider implements acme.ChallengeProvider for TLS handshake challenges.
+type tlsProvider struct {
+ m *Manager
+}
+
+func (p tlsProvider) Present(domain, token, keyAuth string) error {
+ cert, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ p.m.mu.Lock()
+ p.m.certTokens[dom] = &cert
+ p.m.mu.Unlock()
+
+ return nil
+}
+
+func (p tlsProvider) CleanUp(domain, token, keyAuth string) error {
+ _, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ p.m.mu.Lock()
+ delete(p.m.certTokens, dom)
+ p.m.mu.Unlock()
+
+ return nil
+}
+
+func marshalKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ data, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}), nil
+}
+
+func unmarshalKey(text string) (*ecdsa.PrivateKey, error) {
+ b, _ := pem.Decode([]byte(text))
+ if b == nil {
+ return nil, fmt.Errorf("unmarshalKey: missing key")
+ }
+ if b.Type != "EC PRIVATE KEY" {
+ return nil, fmt.Errorf("unmarshalKey: found %q, not %q", b.Type, "EC PRIVATE KEY")
+ }
+ k, err := x509.ParseECPrivateKey(b.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalKey: %v", err)
+ }
+ return k, nil
+}
+
+func newKey() (*ecdsa.PrivateKey, error) {
+ return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE
new file mode 100644
index 000000000..17460b716
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Sebastian Erhart
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go
new file mode 100644
index 000000000..857900507
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go
@@ -0,0 +1,16 @@
+package acme
+
+// Challenge is a string that identifies a particular type and version of ACME challenge.
+type Challenge string
+
+const (
+ // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http
+ // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge
+ HTTP01 = Challenge("http-01")
+ // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni
+ // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge
+ TLSSNI01 = Challenge("tls-sni-01")
+ // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns
+ // Note: DNS01Record returns a DNS record which will fulfill this challenge
+ DNS01 = Challenge("dns-01")
+)
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go
new file mode 100644
index 000000000..16e4cbe00
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go
@@ -0,0 +1,638 @@
+// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers.
+package acme
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // Logger is an optional custom logger.
+ Logger *log.Logger
+)
+
+// logf writes a log entry. It uses Logger if not
+// nil, otherwise it uses the default log.Logger.
+func logf(format string, args ...interface{}) {
+ if Logger != nil {
+ Logger.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// User interface is to be implemented by users of this library.
+// It is used by the client type to get user specific information.
+type User interface {
+ GetEmail() string
+ GetRegistration() *RegistrationResource
+ GetPrivateKey() crypto.PrivateKey
+}
+
+// Interface for all challenge solvers to implement.
+type solver interface {
+ Solve(challenge challenge, domain string) error
+}
+
+type validateFunc func(j *jws, domain, uri string, chlng challenge) error
+
+// Client is the user-friendy way to ACME
+type Client struct {
+ directory directory
+ user User
+ jws *jws
+ keyType KeyType
+ issuerCert []byte
+ solvers map[Challenge]solver
+}
+
+// NewClient creates a new ACME client on behalf of the user. The client will depend on
+// the ACME directory located at caDirURL for the rest of its actions. It will
+// generate private keys for certificates of size keyBits.
+func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) {
+ privKey := user.GetPrivateKey()
+ if privKey == nil {
+ return nil, errors.New("private key was nil")
+ }
+
+ var dir directory
+ if _, err := getJSON(caDirURL, &dir); err != nil {
+ return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err)
+ }
+
+ if dir.NewRegURL == "" {
+ return nil, errors.New("directory missing new registration URL")
+ }
+ if dir.NewAuthzURL == "" {
+ return nil, errors.New("directory missing new authz URL")
+ }
+ if dir.NewCertURL == "" {
+ return nil, errors.New("directory missing new certificate URL")
+ }
+ if dir.RevokeCertURL == "" {
+ return nil, errors.New("directory missing revoke certificate URL")
+ }
+
+ jws := &jws{privKey: privKey, directoryURL: caDirURL}
+
+ // REVIEW: best possibility?
+ // Add all available solvers with the right index as per ACME
+ // spec to this map. Otherwise they won`t be found.
+ solvers := make(map[Challenge]solver)
+ solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}}
+ solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}}
+
+ return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil
+}
+
+// SetChallengeProvider specifies a custom provider that will make the solution available
+func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error {
+ switch challenge {
+ case HTTP01:
+ c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p}
+ case TLSSNI01:
+ c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p}
+ default:
+ return fmt.Errorf("Unknown challenge %v", challenge)
+ }
+ return nil
+}
+
+// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges.
+// If this option is not used, the default port 80 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetHTTPAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[HTTP01]; ok {
+ chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port)
+ }
+
+ return nil
+}
+
+// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges.
+// If this option is not used, the default port 443 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetTLSAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[TLSSNI01]; ok {
+ chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port)
+ }
+ return nil
+}
+
+// ExcludeChallenges explicitly removes challenges from the pool for solving.
+func (c *Client) ExcludeChallenges(challenges []Challenge) {
+ // Loop through all challenges and delete the requested one if found.
+ for _, challenge := range challenges {
+ delete(c.solvers, challenge)
+ }
+}
+
+// Register the current account to the ACME server.
+func (c *Client) Register() (*RegistrationResource, error) {
+ if c == nil || c.user == nil {
+ return nil, errors.New("acme: cannot register a nil client or user")
+ }
+ logf("[INFO] acme: Registering account for %s", c.user.GetEmail())
+
+ regMsg := registrationMessage{
+ Resource: "new-reg",
+ }
+ if c.user.GetEmail() != "" {
+ regMsg.Contact = []string{"mailto:" + c.user.GetEmail()}
+ } else {
+ regMsg.Contact = []string{}
+ }
+
+ var serverReg Registration
+ hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg)
+ if err != nil {
+ return nil, err
+ }
+
+ reg := &RegistrationResource{Body: serverReg}
+
+ links := parseLinks(hdr["Link"])
+ reg.URI = hdr.Get("Location")
+ if links["terms-of-service"] != "" {
+ reg.TosURL = links["terms-of-service"]
+ }
+
+ if links["next"] != "" {
+ reg.NewAuthzURL = links["next"]
+ } else {
+ return nil, errors.New("acme: The server did not return 'next' link to proceed")
+ }
+
+ return reg, nil
+}
+
+// AgreeToTOS updates the Client registration and sends the agreement to
+// the server.
+func (c *Client) AgreeToTOS() error {
+ reg := c.user.GetRegistration()
+
+ reg.Body.Agreement = c.user.GetRegistration().TosURL
+ reg.Body.Resource = "reg"
+ _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil)
+ return err
+}
+
+// ObtainCertificate tries to obtain a single certificate using all domains passed into it.
+// The first domain in domains is used for the CommonName field of the certificate, all other
+// domains are added using the Subject Alternate Names extension. A new private key is generated
+// for every invocation of this function. If you do not want that you can supply your own private key
+// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// This function will never return a partial certificate. If one domain in the list fails,
+// the whole certificate will fail.
+func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) {
+ if bundle {
+ logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", "))
+ } else {
+ logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", "))
+ }
+
+ challenges, failures := c.getChallenges(domains)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(failures) > 0 {
+ return CertificateResource{}, failures
+ }
+
+ errs := c.solveChallenges(challenges)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(errs) > 0 {
+ return CertificateResource{}, errs
+ }
+
+ logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
+
+ cert, err := c.requestCertificate(challenges, bundle, privKey)
+ if err != nil {
+ for _, chln := range challenges {
+ failures[chln.Domain] = err
+ }
+ }
+
+ return cert, failures
+}
+
+// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA.
+func (c *Client) RevokeCertificate(certificate []byte) error {
+ certificates, err := parsePEMBundle(certificate)
+ if err != nil {
+ return err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return fmt.Errorf("Certificate bundle starts with a CA certificate")
+ }
+
+ encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw)
+
+ _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil)
+ return err
+}
+
+// RenewCertificate takes a CertificateResource and tries to renew the certificate.
+// If the renewal process succeeds, the new certificate will ge returned in a new CertResource.
+// Please be aware that this function will return a new certificate in ANY case that is not an error.
+// If the server does not provide us with a new cert on a GET request to the CertURL
+// this function will start a new-cert flow where a new certificate gets generated.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil.
+func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) {
+ // Input certificate is PEM encoded. Decode it here as we may need the decoded
+ // cert later on in the renewal process. The input may be a bundle or a single certificate.
+ certificates, err := parsePEMBundle(cert.Certificate)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain)
+ }
+
+ // This is just meant to be informal for the user.
+ timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC())
+ logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours()))
+
+ // The first step of renewal is to check if we get a renewed cert
+ // directly from the cert URL.
+ resp, err := httpGet(cert.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ defer resp.Body.Close()
+ serverCertBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ serverCert, err := x509.ParseCertificate(serverCertBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // If the server responds with a different certificate we are effectively renewed.
+ // TODO: Further test if we can actually use the new certificate (Our private key works)
+ if !x509Cert.Equal(serverCert) {
+ logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain)
+ issuedCert := pemEncode(derCertificateBytes(serverCertBytes))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cert.Certificate = issuedCert
+ return cert, nil
+ }
+
+ var privKey crypto.PrivateKey
+ if cert.PrivateKey != nil {
+ privKey, err = parsePEMPrivateKey(cert.PrivateKey)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ var domains []string
+ var failures map[string]error
+ // check for SAN certificate
+ if len(x509Cert.DNSNames) > 1 {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ for _, sanDomain := range x509Cert.DNSNames {
+ if sanDomain == x509Cert.Subject.CommonName {
+ continue
+ }
+ domains = append(domains, sanDomain)
+ }
+ } else {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ }
+
+ newCert, failures := c.ObtainCertificate(domains, bundle, privKey)
+ return newCert, failures[cert.Domain]
+}
+
+// Looks through the challenge combinations to find a solvable match.
+// Then solves the challenges in series and returns.
+func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error {
+ // loop through the resources, basically through the domains.
+ failures := make(map[string]error)
+ for _, authz := range challenges {
+ // no solvers - no solving
+ if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil {
+ for i, solver := range solvers {
+ // TODO: do not immediately fail if one domain fails to validate.
+ err := solver.Solve(authz.Body.Challenges[i], authz.Domain)
+ if err != nil {
+ failures[authz.Domain] = err
+ }
+ }
+ } else {
+ failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain)
+ }
+ }
+
+ return failures
+}
+
+// Checks all combinations from the server and returns an array of
+// solvers which should get executed in series.
+func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver {
+ for _, combination := range auth.Combinations {
+ solvers := make(map[int]solver)
+ for _, idx := range combination {
+ if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok {
+ solvers[idx] = solver
+ } else {
+ logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type)
+ }
+ }
+
+ // If we can solve the whole combination, return the solvers
+ if len(solvers) == len(combination) {
+ return solvers
+ }
+ }
+ return nil
+}
+
+// Get the challenges needed to proof our identifier to the ACME server.
+func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) {
+ resc, errc := make(chan authorizationResource), make(chan domainError)
+
+ for _, domain := range domains {
+ go func(domain string) {
+ authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}}
+ var authz authorization
+ hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz)
+ if err != nil {
+ errc <- domainError{Domain: domain, Error: err}
+ return
+ }
+
+ links := parseLinks(hdr["Link"])
+ if links["next"] == "" {
+ logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain)
+ return
+ }
+
+ resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain}
+ }(domain)
+ }
+
+ responses := make(map[string]authorizationResource)
+ failures := make(map[string]error)
+ for i := 0; i < len(domains); i++ {
+ select {
+ case res := <-resc:
+ responses[res.Domain] = res
+ case err := <-errc:
+ failures[err.Domain] = err.Error
+ }
+ }
+
+ challenges := make([]authorizationResource, 0, len(responses))
+ for _, domain := range domains {
+ if challenge, ok := responses[domain]; ok {
+ challenges = append(challenges, challenge)
+ }
+ }
+
+ close(resc)
+ close(errc)
+
+ return challenges, failures
+}
+
+func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) {
+ if len(authz) == 0 {
+ return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!")
+ }
+
+ commonName := authz[0]
+ var err error
+ if privKey == nil {
+ privKey, err = generatePrivateKey(c.keyType)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ var san []string
+ var authURLs []string
+ for _, auth := range authz[1:] {
+ san = append(san, auth.Domain)
+ authURLs = append(authURLs, auth.AuthURL)
+ }
+
+ // TODO: should the CSR be customizable?
+ csr, err := generateCsr(privKey, commonName.Domain, san)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ csrString := base64.URLEncoding.EncodeToString(csr)
+ jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs})
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ resp, err := c.jws.post(commonName.NewCertURL, jsonBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ privateKeyPem := pemEncode(privKey)
+ cerRes := CertificateResource{
+ Domain: commonName.Domain,
+ CertURL: resp.Header.Get("Location"),
+ PrivateKey: privateKeyPem}
+
+ for {
+ switch resp.StatusCode {
+ case 201, 202:
+ cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ resp.Body.Close()
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // The server returns a body with a length of zero if the
+ // certificate was not ready at the time this request completed.
+ // Otherwise the body is the certificate.
+ if len(cert) > 0 {
+
+ cerRes.CertStableURL = resp.Header.Get("Content-Location")
+ cerRes.AccountRef = c.user.GetRegistration().URI
+
+ issuedCert := pemEncode(derCertificateBytes(cert))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cerRes.Certificate = issuedCert
+ logf("[INFO][%s] Server responded with a certificate.", commonName.Domain)
+ return cerRes, nil
+ }
+
+ // The certificate was granted but is not yet issued.
+ // Check retry-after and loop.
+ ra := resp.Header.Get("Retry-After")
+ retryAfter, err := strconv.Atoi(ra)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter)
+ time.Sleep(time.Duration(retryAfter) * time.Second)
+
+ break
+ default:
+ return CertificateResource{}, handleHTTPError(resp)
+ }
+
+ resp, err = httpGet(cerRes.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+}
+
+// getIssuerCertificate requests the issuer certificate and caches it for
+// subsequent requests.
+func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
+ logf("[INFO] acme: Requesting issuer cert from %s", url)
+ if c.issuerCert != nil {
+ return c.issuerCert, nil
+ }
+
+ resp, err := httpGet(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ c.issuerCert = issuerBytes
+ return issuerBytes, err
+}
+
+func parseLinks(links []string) map[string]string {
+ aBrkt := regexp.MustCompile("[<>]")
+ slver := regexp.MustCompile("(.+) *= *\"(.+)\"")
+ linkMap := make(map[string]string)
+
+ for _, link := range links {
+
+ link = aBrkt.ReplaceAllString(link, "")
+ parts := strings.Split(link, ";")
+
+ matches := slver.FindStringSubmatch(parts[1])
+ if len(matches) > 0 {
+ linkMap[matches[2]] = parts[0]
+ }
+ }
+
+ return linkMap
+}
+
+// validate makes the ACME server start validating a
+// challenge response, only returning once it is done.
+func validate(j *jws, domain, uri string, chlng challenge) error {
+ var challengeResponse challenge
+
+ hdr, err := postJSON(j, uri, chlng, &challengeResponse)
+ if err != nil {
+ return err
+ }
+
+ // After the path is sent, the ACME server will access our server.
+ // Repeatedly check the server for an updated status on our request.
+ for {
+ switch challengeResponse.Status {
+ case "valid":
+ logf("[INFO][%s] The server validated our request", domain)
+ return nil
+ case "pending":
+ break
+ case "invalid":
+ return handleChallengeError(challengeResponse)
+ default:
+ return errors.New("The server returned an unexpected state.")
+ }
+
+ ra, err := strconv.Atoi(hdr.Get("Retry-After"))
+ if err != nil {
+ // The ACME server MUST return a Retry-After.
+ // If it doesn't, we'll just poll hard.
+ ra = 1
+ }
+ time.Sleep(time.Duration(ra) * time.Second)
+
+ hdr, err = getJSON(uri, &challengeResponse)
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go
new file mode 100644
index 000000000..e309554f3
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go
@@ -0,0 +1,198 @@
+package acme
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestNewClient(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ keyType := RSA2048
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ client, err := NewClient(ts.URL, user, keyType)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+
+ if client.jws == nil {
+ t.Fatalf("Expected client.jws to not be nil")
+ }
+ if expected, actual := key, client.jws.privKey; actual != expected {
+ t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual)
+ }
+
+ if client.keyType != keyType {
+ t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType)
+ }
+
+ if expected, actual := 2, len(client.solvers); actual != expected {
+ t.Fatalf("Expected %d solver(s), got %d", expected, actual)
+ }
+}
+
+func TestClientOptPort(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ optPort := "1234"
+ optHost := ""
+ client, err := NewClient(ts.URL, user, RSA2048)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ httpSolver, ok := client.solvers[HTTP01].(*httpChallenge)
+ if !ok {
+ t.Fatal("Expected http-01 solver to be httpChallenge type")
+ }
+ if httpSolver.jws != client.jws {
+ t.Error("Expected http-01 to have same jws as client")
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort {
+ t.Errorf("Expected http-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+
+ httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge)
+ if !ok {
+ t.Fatal("Expected tls-sni-01 solver to be httpChallenge type")
+ }
+ if httpsSolver.jws != client.jws {
+ t.Error("Expected tls-sni-01 to have same jws as client")
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got)
+ }
+
+ // test setting different host
+ optHost = "127.0.0.1"
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+}
+
+func TestValidate(t *testing.T) {
+ var statuses []string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Minimal stub ACME server for validation.
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Header().Add("Retry-After", "0")
+ switch r.Method {
+ case "HEAD":
+ case "POST":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ case "GET":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ default:
+ http.Error(w, r.Method, http.StatusMethodNotAllowed)
+ }
+ }))
+ defer ts.Close()
+
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey, directoryURL: ts.URL}
+
+ tsts := []struct {
+ name string
+ statuses []string
+ want string
+ }{
+ {"POST-unexpected", []string{"weird"}, "unexpected"},
+ {"POST-valid", []string{"valid"}, ""},
+ {"POST-invalid", []string{"invalid"}, "Error Detail"},
+ {"GET-unexpected", []string{"pending", "weird"}, "unexpected"},
+ {"GET-valid", []string{"pending", "valid"}, ""},
+ {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"},
+ }
+
+ for _, tst := range tsts {
+ statuses = tst.statuses
+ if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ } else if err != nil && !strings.Contains(err.Error(), tst.want) {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ }
+ }
+}
+
+// writeJSONResponse marshals the body as JSON and writes it to the response.
+func writeJSONResponse(w http.ResponseWriter, body interface{}) {
+ bs, err := json.Marshal(body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if _, err := w.Write(bs); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// stubValidate is like validate, except it does nothing.
+func stubValidate(j *jws, domain, uri string, chlng challenge) error {
+ return nil
+}
+
+type mockUser struct {
+ email string
+ regres *RegistrationResource
+ privatekey *rsa.PrivateKey
+}
+
+func (u mockUser) GetEmail() string { return u.email }
+func (u mockUser) GetRegistration() *RegistrationResource { return u.regres }
+func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey }
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go
new file mode 100644
index 000000000..fc20442f7
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go
@@ -0,0 +1,323 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "net/http"
+ "strings"
+ "time"
+
+ "golang.org/x/crypto/ocsp"
+)
+
+// KeyType represents the key algo as well as the key size or curve to use.
+type KeyType string
+type derCertificateBytes []byte
+
+// Constants for all key types we support.
+const (
+ EC256 = KeyType("P256")
+ EC384 = KeyType("P384")
+ RSA2048 = KeyType("2048")
+ RSA4096 = KeyType("4096")
+ RSA8192 = KeyType("8192")
+)
+
+const (
+ // OCSPGood means that the certificate is valid.
+ OCSPGood = ocsp.Good
+ // OCSPRevoked means that the certificate has been deliberately revoked.
+ OCSPRevoked = ocsp.Revoked
+ // OCSPUnknown means that the OCSP responder doesn't know about the certificate.
+ OCSPUnknown = ocsp.Unknown
+ // OCSPServerFailed means that the OCSP responder failed to process the request.
+ OCSPServerFailed = ocsp.ServerFailed
+)
+
+// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
+// the parsed response, and an error, if any. The returned []byte can be passed directly
+// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
+// issued certificate, this function will try to get the issuer certificate from the
+// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
+// values are nil, the OCSP status may be assumed OCSPUnknown.
+func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) {
+ certificates, err := parsePEMBundle(bundle)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // We expect the certificate slice to be ordered downwards the chain.
+ // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
+ // which should always be the first two certificates. If there's no
+ // OCSP server listed in the leaf cert, there's nothing to do. And if
+ // we have only one certificate so far, we need to get the issuer cert.
+ issuedCert := certificates[0]
+ if len(issuedCert.OCSPServer) == 0 {
+ return nil, nil, errors.New("no OCSP server specified in cert")
+ }
+ if len(certificates) == 1 {
+ // TODO: build fallback. If this fails, check the remaining array entries.
+ if len(issuedCert.IssuingCertificateURL) == 0 {
+ return nil, nil, errors.New("no issuing certificate URL")
+ }
+
+ resp, err := httpGet(issuedCert.IssuingCertificateURL[0])
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ issuerCert, err := x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Insert it into the slice on position 0
+ // We want it ordered right SRV CRT -> CA
+ certificates = append(certificates, issuerCert)
+ }
+ issuerCert := certificates[1]
+
+ // Finally kick off the OCSP request.
+ ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ reader := bytes.NewReader(ocspReq)
+ req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer req.Body.Close()
+
+ ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024))
+ ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if ocspRes.Certificate == nil {
+ err = ocspRes.CheckSignatureFrom(issuerCert)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return ocspResBytes, ocspRes, nil
+}
+
+func getKeyAuthorization(token string, key interface{}) (string, error) {
+ var publicKey crypto.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ publicKey = k.Public()
+ case *rsa.PrivateKey:
+ publicKey = k.Public()
+ }
+
+ // Generate the Key Authorization for the challenge
+ jwk := keyAsJWK(publicKey)
+ if jwk == nil {
+ return "", errors.New("Could not generate JWK from key.")
+ }
+ thumbBytes, err := jwk.Thumbprint(crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+
+ // unpad the base64URL
+ keyThumb := base64.URLEncoding.EncodeToString(thumbBytes)
+ index := strings.Index(keyThumb, "=")
+ if index != -1 {
+ keyThumb = keyThumb[:index]
+ }
+
+ return token + "." + keyThumb, nil
+}
+
+// parsePEMBundle parses a certificate bundle from top to bottom and returns
+// a slice of x509 certificates. This function will error if no certificates are found.
+func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) {
+ var certificates []*x509.Certificate
+ var certDERBlock *pem.Block
+
+ for {
+ certDERBlock, bundle = pem.Decode(bundle)
+ if certDERBlock == nil {
+ break
+ }
+
+ if certDERBlock.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(certDERBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ }
+ }
+
+ if len(certificates) == 0 {
+ return nil, errors.New("No certificates were found while parsing the bundle.")
+ }
+
+ return certificates, nil
+}
+
+func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) {
+ keyBlock, _ := pem.Decode(key)
+
+ switch keyBlock.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(keyBlock.Bytes)
+ default:
+ return nil, errors.New("Unknown PEM header value")
+ }
+}
+
+func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) {
+
+ switch keyType {
+ case EC256:
+ return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ case EC384:
+ return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+ case RSA2048:
+ return rsa.GenerateKey(rand.Reader, 2048)
+ case RSA4096:
+ return rsa.GenerateKey(rand.Reader, 4096)
+ case RSA8192:
+ return rsa.GenerateKey(rand.Reader, 8192)
+ }
+
+ return nil, fmt.Errorf("Invalid KeyType: %s", keyType)
+}
+
+func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) {
+ template := x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: domain,
+ },
+ }
+
+ if len(san) > 0 {
+ template.DNSNames = san
+ }
+
+ return x509.CreateCertificateRequest(rand.Reader, &template, privateKey)
+}
+
+func pemEncode(data interface{}) []byte {
+ var pemBlock *pem.Block
+ switch key := data.(type) {
+ case *ecdsa.PrivateKey:
+ keyBytes, _ := x509.MarshalECPrivateKey(key)
+ pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}
+ case *rsa.PrivateKey:
+ pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}
+ break
+ case derCertificateBytes:
+ pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))}
+ }
+
+ return pem.EncodeToMemory(pemBlock)
+}
+
+func pemDecode(data []byte) (*pem.Block, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?")
+ }
+
+ return pemBlock, nil
+}
+
+func pemDecodeTox509(pem []byte) (*x509.Certificate, error) {
+ pemBlock, err := pemDecode(pem)
+ if pemBlock == nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificate(pemBlock.Bytes)
+}
+
+// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate.
+// The certificate has to be PEM encoded. Any other encodings like DER will fail.
+func GetPEMCertExpiration(cert []byte) (time.Time, error) {
+ pemBlock, err := pemDecode(cert)
+ if pemBlock == nil {
+ return time.Time{}, err
+ }
+
+ return getCertExpiration(pemBlock.Bytes)
+}
+
+// getCertExpiration returns the "NotAfter" date of a DER encoded certificate.
+func getCertExpiration(cert []byte) (time.Time, error) {
+ pCert, err := x509.ParseCertificate(cert)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return pCert.NotAfter, nil
+}
+
+func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) {
+ derBytes, err := generateDerCert(privKey, time.Time{}, domain)
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil
+}
+
+func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) {
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return nil, err
+ }
+
+ if expiration.IsZero() {
+ expiration = time.Now().Add(365)
+ }
+
+ template := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ CommonName: "ACME Challenge TEMP",
+ },
+ NotBefore: time.Now(),
+ NotAfter: expiration,
+
+ KeyUsage: x509.KeyUsageKeyEncipherment,
+ BasicConstraintsValid: true,
+ DNSNames: []string{domain},
+ }
+
+ return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
+}
+
+func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser {
+ return http.MaxBytesReader(nil, rd, numBytes)
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go
new file mode 100644
index 000000000..d2fc5088b
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go
@@ -0,0 +1,93 @@
+package acme
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+ "time"
+)
+
+func TestGeneratePrivateKey(t *testing.T) {
+ key, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Error("Error generating private key:", err)
+ }
+ if key == nil {
+ t.Error("Expected key to not be nil, but it was")
+ }
+}
+
+func TestGenerateCSR(t *testing.T) {
+ key, err := rsa.GenerateKey(rand.Reader, 512)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ csr, err := generateCsr(key, "fizz.buzz", nil)
+ if err != nil {
+ t.Error("Error generating CSR:", err)
+ }
+ if csr == nil || len(csr) == 0 {
+ t.Error("Expected CSR with data, but it was nil or length 0")
+ }
+}
+
+func TestPEMEncode(t *testing.T) {
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ reader := MockRandReader{b: buf}
+ key, err := rsa.GenerateKey(reader, 32)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ data := pemEncode(key)
+
+ if data == nil {
+ t.Fatal("Expected result to not be nil, but it was")
+ }
+ if len(data) != 127 {
+ t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data))
+ }
+}
+
+func TestPEMCertExpiration(t *testing.T) {
+ privKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ expiration := time.Now().Add(365)
+ expiration = expiration.Round(time.Second)
+ certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com")
+ if err != nil {
+ t.Fatal("Error generating cert:", err)
+ }
+
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ // Some random string should return an error.
+ if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime)
+ }
+
+ // A DER encoded certificate should return an error.
+ if _, err := GetPEMCertExpiration(certBytes); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.")
+ }
+
+ // A PEM encoded certificate should work ok.
+ pemCert := pemEncode(derCertificateBytes(certBytes))
+ if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) {
+ t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err)
+ }
+}
+
+type MockRandReader struct {
+ b *bytes.Buffer
+}
+
+func (r MockRandReader) Read(p []byte) (int, error) {
+ return r.b.Read(p)
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go
new file mode 100644
index 000000000..b32561a3a
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go
@@ -0,0 +1,73 @@
+package acme
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+const (
+ tosAgreementError = "Must agree to subscriber agreement before any further actions"
+)
+
+// RemoteError is the base type for all errors specific to the ACME protocol.
+type RemoteError struct {
+ StatusCode int `json:"status,omitempty"`
+ Type string `json:"type"`
+ Detail string `json:"detail"`
+}
+
+func (e RemoteError) Error() string {
+ return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail)
+}
+
+// TOSError represents the error which is returned if the user needs to
+// accept the TOS.
+// TODO: include the new TOS url if we can somehow obtain it.
+type TOSError struct {
+ RemoteError
+}
+
+type domainError struct {
+ Domain string
+ Error error
+}
+
+type challengeError struct {
+ RemoteError
+ records []validationRecord
+}
+
+func (c challengeError) Error() string {
+
+ var errStr string
+ for _, validation := range c.records {
+ errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n",
+ validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress)
+ }
+
+ return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr)
+}
+
+func handleHTTPError(resp *http.Response) error {
+ var errorDetail RemoteError
+ decoder := json.NewDecoder(resp.Body)
+ err := decoder.Decode(&errorDetail)
+ if err != nil {
+ return err
+ }
+
+ errorDetail.StatusCode = resp.StatusCode
+
+ // Check for errors we handle specifically
+ if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError {
+ return TOSError{errorDetail}
+ }
+
+ return errorDetail
+}
+
+func handleChallengeError(chlng challenge) error {
+ return challengeError{chlng.Error, chlng.ValidationRecords}
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go
new file mode 100644
index 000000000..410aead6d
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go
@@ -0,0 +1,117 @@
+package acme
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests.
+var UserAgent string
+
+// defaultClient is an HTTP client with a reasonable timeout value.
+var defaultClient = http.Client{Timeout: 10 * time.Second}
+
+const (
+ // defaultGoUserAgent is the Go HTTP package user agent string. Too
+ // bad it isn't exported. If it changes, we should update it here, too.
+ defaultGoUserAgent = "Go-http-client/1.1"
+
+ // ourUserAgent is the User-Agent of this underlying library package.
+ ourUserAgent = "xenolf-acme"
+)
+
+// httpHead performs a HEAD request with a proper User-Agent string.
+// The response body (resp.Body) is already closed when this function returns.
+func httpHead(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("User-Agent", userAgent())
+
+ resp, err = defaultClient.Do(req)
+ if err != nil {
+ return resp, err
+ }
+ resp.Body.Close()
+ return resp, err
+}
+
+// httpPost performs a POST request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ req.Header.Set("User-Agent", userAgent())
+
+ return defaultClient.Do(req)
+}
+
+// httpGet performs a GET request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpGet(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent())
+
+ return defaultClient.Do(req)
+}
+
+// getJSON performs an HTTP GET request and parses the response body
+// as JSON, into the provided respBody object.
+func getJSON(uri string, respBody interface{}) (http.Header, error) {
+ resp, err := httpGet(uri)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get %q: %v", uri, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// postJSON performs an HTTP POST request and parses the response body
+// as JSON, into the provided respBody object.
+func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) {
+ jsonBytes, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, errors.New("Failed to marshal network message...")
+ }
+
+ resp, err := j.post(uri, jsonBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to post JWS message. -> %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ if respBody == nil {
+ return resp.Header, nil
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// userAgent builds and returns the User-Agent string to use in requests.
+func userAgent() string {
+ ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent)
+ return strings.TrimSpace(ua)
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go
new file mode 100644
index 000000000..95cb1fd81
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go
@@ -0,0 +1,41 @@
+package acme
+
+import (
+ "fmt"
+ "log"
+)
+
+type httpChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+// HTTP01ChallengePath returns the URL path for the `http-01` challenge
+func HTTP01ChallengePath(token string) string {
+ return "/.well-known/acme-challenge/" + token
+}
+
+func (s *httpChallenge) Solve(chlng challenge, domain string) error {
+
+ logf("[INFO][%s] acme: Trying to solve HTTP-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = s.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+
+ return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
new file mode 100644
index 000000000..42541380c
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
@@ -0,0 +1,79 @@
+package acme
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// HTTPProviderServer implements ChallengeProvider for `http-01` challenge
+// It may be instantiated without using the NewHTTPProviderServer function if
+// you want only to use the default values.
+type HTTPProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 80 respectively.
+func NewHTTPProviderServer(iface, port string) *HTTPProviderServer {
+ return &HTTPProviderServer{iface: iface, port: port}
+}
+
+// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests.
+func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "80"
+ }
+
+ var err error
+ s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port))
+ if err != nil {
+ return fmt.Errorf("Could not start HTTP server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go s.serve(domain, token, keyAuth)
+ return nil
+}
+
+// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)`
+func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
+
+func (s *HTTPProviderServer) serve(domain, token, keyAuth string) {
+ path := HTTP01ChallengePath(token)
+
+ // The handler validates the HOST header and request type.
+ // For validation it then writes the token the server returned with the challenge
+ mux := http.NewServeMux()
+ mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+ if strings.HasPrefix(r.Host, domain) && r.Method == "GET" {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte(keyAuth))
+ logf("[INFO][%s] Served key authentication", domain)
+ } else {
+ logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method)
+ w.Write([]byte("TEST"))
+ }
+ })
+
+ httpServer := &http.Server{
+ Handler: mux,
+ }
+ // Once httpServer is shut down we don't want any lingering
+ // connections, so disable KeepAlives.
+ httpServer.SetKeepAlivesEnabled(false)
+ httpServer.Serve(s.listener)
+ s.done <- true
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
new file mode 100644
index 000000000..fdd8f4d27
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
@@ -0,0 +1,57 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestHTTPChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token
+ resp, err := httpGet(uri)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if want := "text/plain"; resp.Header.Get("Content-Type") != want {
+ t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ bodyStr := string(body)
+
+ if bodyStr != chlng.KeyAuthorization {
+ t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization)
+ }
+
+ return nil
+ }
+ solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestHTTPChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http2"}
+ solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go
new file mode 100644
index 000000000..33a48a331
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go
@@ -0,0 +1,100 @@
+package acme
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestHTTPHeadUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ _, err := httpHead(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if method != "HEAD" {
+ t.Errorf("Expected method to be HEAD, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPGetUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpGet(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "GET" {
+ t.Errorf("Expected method to be GET, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPPostUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "POST" {
+ t.Errorf("Expected method to be POST, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestUserAgent(t *testing.T) {
+ ua := userAgent()
+
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if strings.HasSuffix(ua, " ") {
+ t.Errorf("UA should not have trailing spaces; got '%s'", ua)
+ }
+
+ // customize the UA by appending a value
+ UserAgent = "MyApp/1.2.3"
+ ua = userAgent()
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if !strings.Contains(ua, UserAgent) {
+ t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua)
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go
new file mode 100644
index 000000000..8435d0cfc
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go
@@ -0,0 +1,107 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "fmt"
+ "net/http"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type jws struct {
+ directoryURL string
+ privKey crypto.PrivateKey
+ nonces []string
+}
+
+func keyAsJWK(key interface{}) *jose.JsonWebKey {
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "EC"}
+ case *rsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "RSA"}
+
+ default:
+ return nil
+ }
+}
+
+// Posts a JWS signed message to the specified URL
+func (j *jws) post(url string, content []byte) (*http.Response, error) {
+ signedContent, err := j.signContent(content)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize())))
+ if err != nil {
+ return nil, err
+ }
+
+ j.getNonceFromResponse(resp)
+
+ return resp, err
+}
+
+func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
+
+ var alg jose.SignatureAlgorithm
+ switch k := j.privKey.(type) {
+ case *rsa.PrivateKey:
+ alg = jose.RS256
+ case *ecdsa.PrivateKey:
+ if k.Curve == elliptic.P256() {
+ alg = jose.ES256
+ } else if k.Curve == elliptic.P384() {
+ alg = jose.ES384
+ }
+ }
+
+ signer, err := jose.NewSigner(alg, j.privKey)
+ if err != nil {
+ return nil, err
+ }
+ signer.SetNonceSource(j)
+
+ signed, err := signer.Sign(content)
+ if err != nil {
+ return nil, err
+ }
+ return signed, nil
+}
+
+func (j *jws) getNonceFromResponse(resp *http.Response) error {
+ nonce := resp.Header.Get("Replay-Nonce")
+ if nonce == "" {
+ return fmt.Errorf("Server did not respond with a proper nonce header.")
+ }
+
+ j.nonces = append(j.nonces, nonce)
+ return nil
+}
+
+func (j *jws) getNonce() error {
+ resp, err := httpHead(j.directoryURL)
+ if err != nil {
+ return err
+ }
+
+ return j.getNonceFromResponse(resp)
+}
+
+func (j *jws) Nonce() (string, error) {
+ nonce := ""
+ if len(j.nonces) == 0 {
+ err := j.getNonce()
+ if err != nil {
+ return nonce, err
+ }
+ }
+
+ nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1]
+ return nonce, nil
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go
new file mode 100644
index 000000000..d1fac9200
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go
@@ -0,0 +1,115 @@
+package acme
+
+import (
+ "time"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type directory struct {
+ NewAuthzURL string `json:"new-authz"`
+ NewCertURL string `json:"new-cert"`
+ NewRegURL string `json:"new-reg"`
+ RevokeCertURL string `json:"revoke-cert"`
+}
+
+type recoveryKeyMessage struct {
+ Length int `json:"length,omitempty"`
+ Client jose.JsonWebKey `json:"client,omitempty"`
+ Server jose.JsonWebKey `json:"client,omitempty"`
+}
+
+type registrationMessage struct {
+ Resource string `json:"resource"`
+ Contact []string `json:"contact"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// Registration is returned by the ACME server after the registration
+// The client implementation should save this registration somewhere.
+type Registration struct {
+ Resource string `json:"resource,omitempty"`
+ ID int `json:"id"`
+ Key jose.JsonWebKey `json:"key"`
+ Contact []string `json:"contact"`
+ Agreement string `json:"agreement,omitempty"`
+ Authorizations string `json:"authorizations,omitempty"`
+ Certificates string `json:"certificates,omitempty"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// RegistrationResource represents all important informations about a registration
+// of which the client needs to keep track itself.
+type RegistrationResource struct {
+ Body Registration `json:"body,omitempty"`
+ URI string `json:"uri,omitempty"`
+ NewAuthzURL string `json:"new_authzr_uri,omitempty"`
+ TosURL string `json:"terms_of_service,omitempty"`
+}
+
+type authorizationResource struct {
+ Body authorization
+ Domain string
+ NewCertURL string
+ AuthURL string
+}
+
+type authorization struct {
+ Resource string `json:"resource,omitempty"`
+ Identifier identifier `json:"identifier"`
+ Status string `json:"status,omitempty"`
+ Expires time.Time `json:"expires,omitempty"`
+ Challenges []challenge `json:"challenges,omitempty"`
+ Combinations [][]int `json:"combinations,omitempty"`
+}
+
+type identifier struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+}
+
+type validationRecord struct {
+ URI string `json:"url,omitempty"`
+ Hostname string `json:"hostname,omitempty"`
+ Port string `json:"port,omitempty"`
+ ResolvedAddresses []string `json:"addressesResolved,omitempty"`
+ UsedAddress string `json:"addressUsed,omitempty"`
+}
+
+type challenge struct {
+ Resource string `json:"resource,omitempty"`
+ Type Challenge `json:"type,omitempty"`
+ Status string `json:"status,omitempty"`
+ URI string `json:"uri,omitempty"`
+ Token string `json:"token,omitempty"`
+ KeyAuthorization string `json:"keyAuthorization,omitempty"`
+ TLS bool `json:"tls,omitempty"`
+ Iterations int `json:"n,omitempty"`
+ Error RemoteError `json:"error,omitempty"`
+ ValidationRecords []validationRecord `json:"validationRecord,omitempty"`
+}
+
+type csrMessage struct {
+ Resource string `json:"resource,omitempty"`
+ Csr string `json:"csr"`
+ Authorizations []string `json:"authorizations"`
+}
+
+type revokeCertMessage struct {
+ Resource string `json:"resource"`
+ Certificate string `json:"certificate"`
+}
+
+// CertificateResource represents a CA issued certificate.
+// PrivateKey and Certificate are both already PEM encoded
+// and can be directly written to disk. Certificate may
+// be a certificate bundle, depending on the options supplied
+// to create it.
+type CertificateResource struct {
+ Domain string `json:"domain"`
+ CertURL string `json:"certUrl"`
+ CertStableURL string `json:"certStableUrl"`
+ AccountRef string `json:"accountRef,omitempty"`
+ PrivateKey []byte `json:"-"`
+ Certificate []byte `json:"-"`
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go
new file mode 100644
index 000000000..d177ff07a
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go
@@ -0,0 +1,28 @@
+package acme
+
+import "time"
+
+// ChallengeProvider enables implementing a custom challenge
+// provider. Present presents the solution to a challenge available to
+// be solved. CleanUp will be called by the challenge if Present ends
+// in a non-error state.
+type ChallengeProvider interface {
+ Present(domain, token, keyAuth string) error
+ CleanUp(domain, token, keyAuth string) error
+}
+
+// ChallengeProviderTimeout allows for implementing a
+// ChallengeProvider where an unusually long timeout is required when
+// waiting for an ACME challenge to be satisfied, such as when
+// checking for DNS record progagation. If an implementor of a
+// ChallengeProvider provides a Timeout method, then the return values
+// of the Timeout method will be used when appropriate by the acme
+// package. The interval value is the time between checks.
+//
+// The default values used for timeout and interval are 60 seconds and
+// 2 seconds respectively. These are used when no Timeout method is
+// defined for the ChallengeProvider.
+type ChallengeProviderTimeout interface {
+ ChallengeProvider
+ Timeout() (timeout, interval time.Duration)
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
new file mode 100644
index 000000000..f184b17a5
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
@@ -0,0 +1,73 @@
+package acme
+
+import (
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "log"
+)
+
+type tlsSNIChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error {
+ // FIXME: https://github.com/ietf-wg-acme/acme/pull/22
+ // Currently we implement this challenge to track boulder, not the current spec!
+
+ logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = t.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := t.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+ return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
+
+// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge
+func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error) {
+ // generate a new RSA key for the certificates
+ tempPrivKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+ rsaPrivKey := tempPrivKey.(*rsa.PrivateKey)
+ rsaPrivPEM := pemEncode(rsaPrivKey)
+
+ zBytes := sha256.Sum256([]byte(keyAuth))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+ tempCertPEM, err := generatePemCert(rsaPrivKey, domain)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ return certificate, domain, nil
+}
+
+// TLSSNI01ChallengeCert returns a certificate for the `tls-sni-01` challenge
+func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, error) {
+ cert, _, err := TLSSNI01ChallengeCertDomain(keyAuth)
+ return cert, err
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
new file mode 100644
index 000000000..faaf16f6b
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
@@ -0,0 +1,62 @@
+package acme
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+)
+
+// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge
+// It may be instantiated without using the NewTLSProviderServer function if
+// you want only to use the default values.
+type TLSProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 443 respectively.
+func NewTLSProviderServer(iface, port string) *TLSProviderServer {
+ return &TLSProviderServer{iface: iface, port: port}
+}
+
+// Present makes the keyAuth available as a cert
+func (s *TLSProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "443"
+ }
+
+ cert, err := TLSSNI01ChallengeCert(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ tlsConf := new(tls.Config)
+ tlsConf.Certificates = []tls.Certificate{cert}
+
+ s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf)
+ if err != nil {
+ return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go func() {
+ http.Serve(s.listener, nil)
+ s.done <- true
+ }()
+ return nil
+}
+
+// CleanUp closes the HTTP server.
+func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
new file mode 100644
index 000000000..3aec74565
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
@@ -0,0 +1,65 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestTLSSNIChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{
+ InsecureSkipVerify: true,
+ })
+ if err != nil {
+ t.Errorf("Expected to connect to challenge server without an error. %s", err.Error())
+ }
+
+ // Expect the server to only return one certificate
+ connState := conn.ConnectionState()
+ if count := len(connState.PeerCertificates); count != 1 {
+ t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count)
+ }
+
+ remoteCert := connState.PeerCertificates[0]
+ if count := len(remoteCert.DNSNames); count != 1 {
+ t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count)
+ }
+
+ zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+
+ if remoteCert.DNSNames[0] != domain {
+ t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0])
+ }
+
+ return nil
+ }
+ solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestTLSSNIChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"}
+ solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go
new file mode 100644
index 000000000..2fa0db304
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go
@@ -0,0 +1,29 @@
+package acme
+
+import (
+ "fmt"
+ "time"
+)
+
+// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'.
+func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error {
+ var lastErr string
+ timeup := time.After(timeout)
+ for {
+ select {
+ case <-timeup:
+ return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr)
+ default:
+ }
+
+ stop, err := f()
+ if stop {
+ return nil
+ }
+ if err != nil {
+ lastErr = err.Error()
+ }
+
+ time.Sleep(interval)
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go
new file mode 100644
index 000000000..158af4116
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go
@@ -0,0 +1,26 @@
+package acme
+
+import (
+ "testing"
+ "time"
+)
+
+func TestWaitForTimeout(t *testing.T) {
+ c := make(chan error)
+ go func() {
+ err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) {
+ return false, nil
+ })
+ c <- err
+ }()
+
+ timeout := time.After(4 * time.Second)
+ select {
+ case <-timeout:
+ t.Fatal("timeout exceeded")
+ case err := <-c:
+ if err == nil {
+ t.Errorf("expected timeout error; got %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/rsc/letsencrypt/vendor/vendor.json b/vendor/github.com/rsc/letsencrypt/vendor/vendor.json
new file mode 100644
index 000000000..8a4241102
--- /dev/null
+++ b/vendor/github.com/rsc/letsencrypt/vendor/vendor.json
@@ -0,0 +1,31 @@
+{
+ "comment": "",
+ "ignore": "",
+ "package": [
+ {
+ "checksumSHA1": "CHmdoMriAboKW2nHYSXo0yBizaE=",
+ "path": "github.com/xenolf/lego/acme",
+ "revision": "ca19a90028e242e878585941c2a27c8f3b3efc25",
+ "revisionTime": "2016-03-28T16:28:34Z"
+ },
+ {
+ "checksumSHA1": "jrheBzltbBE1frmNXQiu911T7dE=",
+ "path": "gopkg.in/square/go-jose.v1",
+ "revision": "40d457b439244b546f023d056628e5184136899b",
+ "revisionTime": "2016-03-29T20:33:11Z"
+ },
+ {
+ "checksumSHA1": "fX4KSC9E1oX9yRx20Zjb3rVJHn4=",
+ "path": "gopkg.in/square/go-jose.v1/cipher",
+ "revision": "40d457b439244b546f023d056628e5184136899b",
+ "revisionTime": "2016-03-29T20:33:11Z"
+ },
+ {
+ "checksumSHA1": "NxdXsIcLGuuX654ygsaOhoLsg6s=",
+ "path": "gopkg.in/square/go-jose.v1/json",
+ "revision": "40d457b439244b546f023d056628e5184136899b",
+ "revisionTime": "2016-03-29T20:33:11Z"
+ }
+ ],
+ "rootPath": "rsc.io/letsencrypt"
+}
diff --git a/vendor/github.com/tylerb/graceful/.gitignore b/vendor/github.com/tylerb/graceful/.gitignore
new file mode 100644
index 000000000..836562412
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/tylerb/graceful/.travis.yml b/vendor/github.com/tylerb/graceful/.travis.yml
new file mode 100644
index 000000000..66fdff76d
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+sudo: false
+go:
+ - 1.7
+ - 1.6.2
+ - 1.5.4
+ - 1.4.3
+ - 1.3.3
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/tylerb/graceful/LICENSE b/vendor/github.com/tylerb/graceful/LICENSE
new file mode 100644
index 000000000..a4f2f281b
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Tyler Bunnell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/tylerb/graceful/README.md b/vendor/github.com/tylerb/graceful/README.md
new file mode 100644
index 000000000..328c3acf8
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/README.md
@@ -0,0 +1,152 @@
+graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](http://godoc.org/github.com/tylerb/graceful) [![Build Status](https://travis-ci.org/tylerb/graceful.svg?branch=master)](https://travis-ci.org/tylerb/graceful) [![Coverage Status](https://coveralls.io/repos/tylerb/graceful/badge.svg)](https://coveralls.io/r/tylerb/graceful) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tylerb/graceful?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+========
+
+Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers.
+
+## Installation
+
+To install, simply execute:
+
+```
+go get gopkg.in/tylerb/graceful.v1
+```
+
+I am using [gopkg.in](http://labix.org/gopkg.in) to control releases.
+
+## Usage
+
+Using Graceful is easy. Simply create your http.Handler and pass it to the `Run` function:
+
+```go
+package main
+
+import (
+ "gopkg.in/tylerb/graceful.v1"
+ "net/http"
+ "fmt"
+ "time"
+)
+
+func main() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(w, "Welcome to the home page!")
+ })
+
+ graceful.Run(":3001",10*time.Second,mux)
+}
+```
+
+Another example, using [Negroni](https://github.com/codegangsta/negroni), functions in much the same manner:
+
+```go
+package main
+
+import (
+ "github.com/codegangsta/negroni"
+ "gopkg.in/tylerb/graceful.v1"
+ "net/http"
+ "fmt"
+ "time"
+)
+
+func main() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(w, "Welcome to the home page!")
+ })
+
+ n := negroni.Classic()
+ n.UseHandler(mux)
+ //n.Run(":3000")
+ graceful.Run(":3001",10*time.Second,n)
+}
+```
+
+In addition to Run there are the http.Server counterparts ListenAndServe, ListenAndServeTLS and Serve, which allow you to configure HTTPS, custom timeouts and error handling.
+Graceful may also be used by instantiating its Server type directly, which embeds an http.Server:
+
+```go
+mux := // ...
+
+srv := &graceful.Server{
+ Timeout: 10 * time.Second,
+
+ Server: &http.Server{
+ Addr: ":1234",
+ Handler: mux,
+ },
+}
+
+srv.ListenAndServe()
+```
+
+This form allows you to set the ConnState callback, which works in the same way as in http.Server:
+
+```go
+mux := // ...
+
+srv := &graceful.Server{
+ Timeout: 10 * time.Second,
+
+ ConnState: func(conn net.Conn, state http.ConnState) {
+ // conn has a new state
+ },
+
+ Server: &http.Server{
+ Addr: ":1234",
+ Handler: mux,
+ },
+}
+
+srv.ListenAndServe()
+```
+
+## Behaviour
+
+When Graceful is sent a SIGINT or SIGTERM (possibly from ^C or a kill command), it:
+
+1. Disables keepalive connections.
+2. Closes the listening socket, allowing another process to listen on that port immediately.
+3. Starts a timer of `timeout` duration to give active requests a chance to finish.
+4. When timeout expires, closes all active connections.
+5. Closes the `stopChan`, waking up any blocking goroutines.
+6. Returns from the function, allowing the server to terminate.
+
+## Notes
+
+If the `timeout` argument to `Run` is 0, the server never times out, allowing all active requests to complete.
+
+If you wish to stop the server in some way other than an OS signal, you may call the `Stop()` function.
+This function stops the server, gracefully, using the new timeout value you provide. The `StopChan()` function
+returns a channel on which you can block while waiting for the server to stop. This channel will be closed when
+the server is stopped, allowing your execution to proceed. Multiple goroutines can block on this channel at the
+same time and all will be signalled when stopping is complete.
+
+### Important things to note when setting `timeout` to 0:
+
+If you set the `timeout` to `0`, it waits for all connections to the server to disconnect before shutting down.
+This means that even though requests over a connection have finished, it is possible for the client to hold the
+connection open and block the server from shutting down indefinitely.
+
+This is especially evident when graceful is used to run HTTP/2 servers. Clients like Chrome and Firefox have been
+observed to hold onto the open connection indefinitely over HTTP/2, preventing the server from shutting down. In
+addition, there is also the risk of malicious clients holding and keeping the connection alive.
+
+It is understandable that sometimes, you might want to wait for the client indefinitely because they might be
+uploading large files. In these type of cases, it is recommended that you set a reasonable timeout to kill the
+connection, and have the client perform resumable uploads. For example, the client can divide the file into chunks
+and reupload chunks that were in transit when the connection was terminated.
+
+## Contributing
+
+If you would like to contribute, please:
+
+1. Create a GitHub issue regarding the contribution. Features and bugs should be discussed beforehand.
+2. Fork the repository.
+3. Create a pull request with your solution. This pull request should reference and close the issues (Fix #2).
+
+All pull requests should:
+
+1. Pass [gometalinter -t .](https://github.com/alecthomas/gometalinter) with no warnings.
+2. Be `go fmt` formatted.
diff --git a/vendor/github.com/tylerb/graceful/graceful.go b/vendor/github.com/tylerb/graceful/graceful.go
new file mode 100644
index 000000000..a5e2395e0
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/graceful.go
@@ -0,0 +1,487 @@
+package graceful
+
+import (
+ "crypto/tls"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// Server wraps an http.Server with graceful connection handling.
+// It may be used directly in the same way as http.Server, or may
+// be constructed with the global functions in this package.
+//
+// Example:
+// srv := &graceful.Server{
+// Timeout: 5 * time.Second,
+// Server: &http.Server{Addr: ":1234", Handler: handler},
+// }
+// srv.ListenAndServe()
+type Server struct {
+ *http.Server
+
+ // Timeout is the duration to allow outstanding requests to survive
+ // before forcefully terminating them.
+ Timeout time.Duration
+
+ // Limit the number of outstanding requests
+ ListenLimit int
+
+ // TCPKeepAlive sets the TCP keep-alive timeouts on accepted
+ // connections. It prunes dead TCP connections ( e.g. closing
+ // laptop mid-download)
+ TCPKeepAlive time.Duration
+
+ // ConnState specifies an optional callback function that is
+ // called when a client connection changes state. This is a proxy
+ // to the underlying http.Server's ConnState, and the original
+ // must not be set directly.
+ ConnState func(net.Conn, http.ConnState)
+
+ // BeforeShutdown is an optional callback function that is called
+ // before the listener is closed. Returns true if shutdown is allowed
+ BeforeShutdown func() bool
+
+ // ShutdownInitiated is an optional callback function that is called
+ // when shutdown is initiated. It can be used to notify the client
+ // side of long lived connections (e.g. websockets) to reconnect.
+ ShutdownInitiated func()
+
+ // NoSignalHandling prevents graceful from automatically shutting down
+ // on SIGINT and SIGTERM. If set to true, you must shut down the server
+ // manually with Stop().
+ NoSignalHandling bool
+
+ // Logger used to notify of errors on startup and on stop.
+ Logger *log.Logger
+
+ // LogFunc can be assigned with a logging function of your choice, allowing
+ // you to use whatever logging approach you would like
+ LogFunc func(format string, args ...interface{})
+
+ // Interrupted is true if the server is handling a SIGINT or SIGTERM
+ // signal and is thus shutting down.
+ Interrupted bool
+
+ // interrupt signals the listener to stop serving connections,
+ // and the server to shut down.
+ interrupt chan os.Signal
+
+ // stopLock is used to protect against concurrent calls to Stop
+ stopLock sync.Mutex
+
+ // stopChan is the channel on which callers may block while waiting for
+ // the server to stop.
+ stopChan chan struct{}
+
+ // chanLock is used to protect access to the various channel constructors.
+ chanLock sync.RWMutex
+
+ // connections holds all connections managed by graceful
+ connections map[net.Conn]struct{}
+
+ // idleConnections holds all idle connections managed by graceful
+ idleConnections map[net.Conn]struct{}
+}
+
+// Run serves the http.Handler with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func Run(addr string, timeout time.Duration, n http.Handler) {
+ srv := &Server{
+ Timeout: timeout,
+ TCPKeepAlive: 3 * time.Minute,
+ Server: &http.Server{Addr: addr, Handler: n},
+ // Logger: DefaultLogger(),
+ }
+
+ if err := srv.ListenAndServe(); err != nil {
+ if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
+ srv.logf("%s", err)
+ os.Exit(1)
+ }
+ }
+
+}
+
+// RunWithErr is an alternative version of Run function which can return error.
+//
+// Unlike Run this version will not exit the program if an error is encountered but will
+// return it instead.
+func RunWithErr(addr string, timeout time.Duration, n http.Handler) error {
+ srv := &Server{
+ Timeout: timeout,
+ TCPKeepAlive: 3 * time.Minute,
+ Server: &http.Server{Addr: addr, Handler: n},
+ Logger: DefaultLogger(),
+ }
+
+ return srv.ListenAndServe()
+}
+
+// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func ListenAndServe(server *http.Server, timeout time.Duration) error {
+ srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()}
+ return srv.ListenAndServe()
+}
+
+// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled.
+func (srv *Server) ListenAndServe() error {
+ // Create the listener so we can control their lifetime
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":http"
+ }
+ conn, err := srv.newTCPListener(addr)
+ if err != nil {
+ return err
+ }
+
+ return srv.Serve(conn)
+}
+
+// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func ListenAndServeTLS(server *http.Server, certFile, keyFile string, timeout time.Duration) error {
+ srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()}
+ return srv.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenTLS is a convenience method that creates an https listener using the
+// provided cert and key files. Use this method if you need access to the
+// listener object directly. When ready, pass it to the Serve method.
+func (srv *Server) ListenTLS(certFile, keyFile string) (net.Listener, error) {
+ // Create the listener ourselves so we can control its lifetime
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+
+ config := &tls.Config{}
+ if srv.TLSConfig != nil {
+ *config = *srv.TLSConfig
+ }
+
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable http2
+ enableHTTP2ForTLSConfig(config)
+
+ conn, err := srv.newTCPListener(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ srv.TLSConfig = config
+
+ tlsListener := tls.NewListener(conn, config)
+ return tlsListener, nil
+}
+
+// Enable HTTP2ForTLSConfig explicitly enables http/2 for a TLS Config. This is due to changes in Go 1.7 where
+// http servers are no longer automatically configured to enable http/2 if the server's TLSConfig is set.
+// See https://github.com/golang/go/issues/15908
+func enableHTTP2ForTLSConfig(t *tls.Config) {
+
+ if TLSConfigHasHTTP2Enabled(t) {
+ return
+ }
+
+ t.NextProtos = append(t.NextProtos, "h2")
+}
+
+// TLSConfigHasHTTP2Enabled checks to see if a given TLS Config has http2 enabled.
+func TLSConfigHasHTTP2Enabled(t *tls.Config) bool {
+ for _, value := range t.NextProtos {
+ if value == "h2" {
+ return true
+ }
+ }
+ return false
+}
+
+// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled.
+func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ l, err := srv.ListenTLS(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+
+ return srv.Serve(l)
+}
+
+// ListenAndServeTLSConfig can be used with an existing TLS config and is equivalent to
+// http.Server.ListenAndServeTLS with graceful shutdown enabled,
+func (srv *Server) ListenAndServeTLSConfig(config *tls.Config) error {
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+
+ conn, err := srv.newTCPListener(addr)
+ if err != nil {
+ return err
+ }
+
+ srv.TLSConfig = config
+
+ tlsListener := tls.NewListener(conn, config)
+ return srv.Serve(tlsListener)
+}
+
+// Serve is equivalent to http.Server.Serve with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func Serve(server *http.Server, l net.Listener, timeout time.Duration) error {
+ srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()}
+
+ return srv.Serve(l)
+}
+
+// Serve is equivalent to http.Server.Serve with graceful shutdown enabled.
+func (srv *Server) Serve(listener net.Listener) error {
+
+ if srv.ListenLimit != 0 {
+ listener = LimitListener(listener, srv.ListenLimit)
+ }
+
+ // Make our stopchan
+ srv.StopChan()
+
+ // Track connection state
+ add := make(chan net.Conn)
+ idle := make(chan net.Conn)
+ active := make(chan net.Conn)
+ remove := make(chan net.Conn)
+
+ srv.Server.ConnState = func(conn net.Conn, state http.ConnState) {
+ switch state {
+ case http.StateNew:
+ add <- conn
+ case http.StateActive:
+ active <- conn
+ case http.StateIdle:
+ idle <- conn
+ case http.StateClosed, http.StateHijacked:
+ remove <- conn
+ }
+
+ srv.stopLock.Lock()
+ defer srv.stopLock.Unlock()
+
+ if srv.ConnState != nil {
+ srv.ConnState(conn, state)
+ }
+ }
+
+ // Manage open connections
+ shutdown := make(chan chan struct{})
+ kill := make(chan struct{})
+ go srv.manageConnections(add, idle, active, remove, shutdown, kill)
+
+ interrupt := srv.interruptChan()
+ // Set up the interrupt handler
+ if !srv.NoSignalHandling {
+ signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
+ }
+ quitting := make(chan struct{})
+ go srv.handleInterrupt(interrupt, quitting, listener)
+
+ // Serve with graceful listener.
+ // Execution blocks here until listener.Close() is called, above.
+ err := srv.Server.Serve(listener)
+ if err != nil {
+ // If the underlying listening is closed, Serve returns an error
+ // complaining about listening on a closed socket. This is expected, so
+ // let's ignore the error if we are the ones who explicitly closed the
+ // socket.
+ select {
+ case <-quitting:
+ err = nil
+ default:
+ }
+ }
+
+ srv.shutdown(shutdown, kill)
+
+ return err
+}
+
+// Stop instructs the type to halt operations and close
+// the stop channel when it is finished.
+//
+// timeout is grace period for which to wait before shutting
+// down the server. The timeout value passed here will override the
+// timeout given when constructing the server, as this is an explicit
+// command to stop the server.
+func (srv *Server) Stop(timeout time.Duration) {
+ srv.stopLock.Lock()
+ defer srv.stopLock.Unlock()
+
+ srv.Timeout = timeout
+ interrupt := srv.interruptChan()
+ interrupt <- syscall.SIGINT
+}
+
+// StopChan gets the stop channel which will block until
+// stopping has completed, at which point it is closed.
+// Callers should never close the stop channel.
+func (srv *Server) StopChan() <-chan struct{} {
+ srv.chanLock.Lock()
+ defer srv.chanLock.Unlock()
+
+ if srv.stopChan == nil {
+ srv.stopChan = make(chan struct{})
+ }
+ return srv.stopChan
+}
+
+// DefaultLogger returns the logger used by Run, RunWithErr, ListenAndServe, ListenAndServeTLS and Serve.
+// The logger outputs to STDERR by default.
+func DefaultLogger() *log.Logger {
+ return log.New(os.Stderr, "[graceful] ", 0)
+}
+
+func (srv *Server) manageConnections(add, idle, active, remove chan net.Conn, shutdown chan chan struct{}, kill chan struct{}) {
+ var done chan struct{}
+ srv.connections = map[net.Conn]struct{}{}
+ srv.idleConnections = map[net.Conn]struct{}{}
+ for {
+ select {
+ case conn := <-add:
+ srv.connections[conn] = struct{}{}
+ case conn := <-idle:
+ srv.idleConnections[conn] = struct{}{}
+ case conn := <-active:
+ delete(srv.idleConnections, conn)
+ case conn := <-remove:
+ delete(srv.connections, conn)
+ delete(srv.idleConnections, conn)
+ if done != nil && len(srv.connections) == 0 {
+ done <- struct{}{}
+ return
+ }
+ case done = <-shutdown:
+ if len(srv.connections) == 0 && len(srv.idleConnections) == 0 {
+ done <- struct{}{}
+ return
+ }
+ // a shutdown request has been received. if we have open idle
+ // connections, we must close all of them now. this prevents idle
+ // connections from holding the server open while waiting for them to
+ // hit their idle timeout.
+ for k := range srv.idleConnections {
+ if err := k.Close(); err != nil {
+ srv.logf("[ERROR] %s", err)
+ }
+ }
+ case <-kill:
+ srv.stopLock.Lock()
+ defer srv.stopLock.Unlock()
+
+ srv.Server.ConnState = nil
+ for k := range srv.connections {
+ if err := k.Close(); err != nil {
+ srv.logf("[ERROR] %s", err)
+ }
+ }
+ return
+ }
+ }
+}
+
+func (srv *Server) interruptChan() chan os.Signal {
+ srv.chanLock.Lock()
+ defer srv.chanLock.Unlock()
+
+ if srv.interrupt == nil {
+ srv.interrupt = make(chan os.Signal, 1)
+ }
+
+ return srv.interrupt
+}
+
+func (srv *Server) handleInterrupt(interrupt chan os.Signal, quitting chan struct{}, listener net.Listener) {
+ for _ = range interrupt {
+ if srv.Interrupted {
+ srv.logf("already shutting down")
+ continue
+ }
+ srv.logf("shutdown initiated")
+ srv.Interrupted = true
+ if srv.BeforeShutdown != nil {
+ if !srv.BeforeShutdown() {
+ srv.Interrupted = false
+ continue
+ }
+ }
+
+ close(quitting)
+ srv.SetKeepAlivesEnabled(false)
+ if err := listener.Close(); err != nil {
+ srv.logf("[ERROR] %s", err)
+ }
+
+ if srv.ShutdownInitiated != nil {
+ srv.ShutdownInitiated()
+ }
+ }
+}
+
+func (srv *Server) logf(format string, args ...interface{}) {
+ if srv.LogFunc != nil {
+ srv.LogFunc(format, args...)
+ } else if srv.Logger != nil {
+ srv.Logger.Printf(format, args...)
+ }
+}
+
+func (srv *Server) shutdown(shutdown chan chan struct{}, kill chan struct{}) {
+ // Request done notification
+ done := make(chan struct{})
+ shutdown <- done
+
+ if srv.Timeout > 0 {
+ select {
+ case <-done:
+ case <-time.After(srv.Timeout):
+ close(kill)
+ }
+ } else {
+ <-done
+ }
+ // Close the stopChan to wake up any blocked goroutines.
+ srv.chanLock.Lock()
+ if srv.stopChan != nil {
+ close(srv.stopChan)
+ }
+ srv.chanLock.Unlock()
+}
+
+func (srv *Server) newTCPListener(addr string) (net.Listener, error) {
+ conn, err := net.Listen("tcp", addr)
+ if err != nil {
+ return conn, err
+ }
+ if srv.TCPKeepAlive != 0 {
+ conn = keepAliveListener{conn, srv.TCPKeepAlive}
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/tylerb/graceful/graceful_test.go b/vendor/github.com/tylerb/graceful/graceful_test.go
new file mode 100644
index 000000000..b9c49336b
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/graceful_test.go
@@ -0,0 +1,692 @@
+package graceful
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+)
+
+const (
+ // The tests will run a test server on this port.
+ port = 9654
+ concurrentRequestN = 8
+ killTime = 500 * time.Millisecond
+ timeoutTime = 1000 * time.Millisecond
+ waitTime = 100 * time.Millisecond
+)
+
+func runQuery(t *testing.T, expected int, shouldErr bool, wg *sync.WaitGroup, once *sync.Once) {
+ defer wg.Done()
+ client := http.Client{}
+ r, err := client.Get(fmt.Sprintf("http://localhost:%d", port))
+ if shouldErr && err == nil {
+ once.Do(func() {
+ t.Error("Expected an error but none was encountered.")
+ })
+ } else if shouldErr && err != nil {
+ if checkErr(t, err, once) {
+ return
+ }
+ }
+ if r != nil && r.StatusCode != expected {
+ once.Do(func() {
+ t.Errorf("Incorrect status code on response. Expected %d. Got %d", expected, r.StatusCode)
+ })
+ } else if r == nil {
+ once.Do(func() {
+ t.Error("No response when a response was expected.")
+ })
+ }
+}
+
+func checkErr(t *testing.T, err error, once *sync.Once) bool {
+ if err.(*url.Error).Err == io.EOF {
+ return true
+ }
+ var errno syscall.Errno
+ switch oe := err.(*url.Error).Err.(type) {
+ case *net.OpError:
+ switch e := oe.Err.(type) {
+ case syscall.Errno:
+ errno = e
+ case *os.SyscallError:
+ errno = e.Err.(syscall.Errno)
+ }
+ if errno == syscall.ECONNREFUSED {
+ return true
+ } else if err != nil {
+ once.Do(func() {
+ t.Error("Error on Get:", err)
+ })
+ }
+ default:
+ if strings.Contains(err.Error(), "transport closed before response was received") {
+ return true
+ }
+ if strings.Contains(err.Error(), "server closed connection") {
+ return true
+ }
+ fmt.Printf("unknown err: %s, %#v\n", err, err)
+ }
+ return false
+}
+
+func createListener(sleep time.Duration) (*http.Server, net.Listener, error) {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
+ time.Sleep(sleep)
+ rw.WriteHeader(http.StatusOK)
+ })
+
+ server := &http.Server{Addr: fmt.Sprintf(":%d", port), Handler: mux}
+ l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ return server, l, err
+}
+
+func launchTestQueries(t *testing.T, wg *sync.WaitGroup, c chan os.Signal) {
+ defer wg.Done()
+ var once sync.Once
+
+ for i := 0; i < concurrentRequestN; i++ {
+ wg.Add(1)
+ go runQuery(t, http.StatusOK, false, wg, &once)
+ }
+
+ time.Sleep(waitTime)
+ c <- os.Interrupt
+ time.Sleep(waitTime)
+
+ for i := 0; i < concurrentRequestN; i++ {
+ wg.Add(1)
+ go runQuery(t, 0, true, wg, &once)
+ }
+}
+
+func TestGracefulRun(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime / 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{Timeout: killTime, Server: server, interrupt: c}
+ srv.Serve(l)
+ }()
+
+ wg.Add(1)
+ go launchTestQueries(t, &wg, c)
+}
+
+func TestGracefulRunLimitKeepAliveListener(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime / 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{
+ Timeout: killTime,
+ ListenLimit: concurrentRequestN,
+ TCPKeepAlive: 1 * time.Second,
+ Server: server,
+ interrupt: c,
+ }
+ srv.Serve(l)
+ }()
+
+ wg.Add(1)
+ go launchTestQueries(t, &wg, c)
+}
+
+func TestGracefulRunTimesOut(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime * 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{Timeout: killTime, Server: server, interrupt: c}
+ srv.Serve(l)
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var once sync.Once
+
+ for i := 0; i < concurrentRequestN; i++ {
+ wg.Add(1)
+ go runQuery(t, 0, true, &wg, &once)
+ }
+
+ time.Sleep(waitTime)
+ c <- os.Interrupt
+ time.Sleep(waitTime)
+
+ for i := 0; i < concurrentRequestN; i++ {
+ wg.Add(1)
+ go runQuery(t, 0, true, &wg, &once)
+ }
+ }()
+}
+
+func TestGracefulRunDoesntTimeOut(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime * 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{Timeout: 0, Server: server, interrupt: c}
+ srv.Serve(l)
+ }()
+
+ wg.Add(1)
+ go launchTestQueries(t, &wg, c)
+}
+
+func TestGracefulRunDoesntTimeOutAfterConnectionCreated(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{Timeout: 0, Server: server, interrupt: c}
+ srv.Serve(l)
+ }()
+ time.Sleep(waitTime)
+
+ // Make a sample first request. The connection will be left idle.
+ resp, err := http.Get(fmt.Sprintf("http://localhost:%d", port))
+ if err != nil {
+ panic(fmt.Sprintf("first request failed: %v", err))
+ }
+ resp.Body.Close()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ // With idle connections improperly handled, the server doesn't wait for this
+ // to complete and the request fails. It should be allowed to complete successfully.
+ _, err := http.Get(fmt.Sprintf("http://localhost:%d", port))
+ if err != nil {
+ t.Errorf("Get failed: %v", err)
+ }
+ }()
+
+ // Ensure the request goes out
+ time.Sleep(waitTime)
+ c <- os.Interrupt
+ wg.Wait()
+}
+
+func TestGracefulRunNoRequests(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime * 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{Timeout: 0, Server: server, interrupt: c}
+ srv.Serve(l)
+ }()
+
+ c <- os.Interrupt
+}
+
+func TestGracefulForwardsConnState(t *testing.T) {
+ var stateLock sync.Mutex
+ states := make(map[http.ConnState]int)
+ connState := func(conn net.Conn, state http.ConnState) {
+ stateLock.Lock()
+ states[state]++
+ stateLock.Unlock()
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ expected := map[http.ConnState]int{
+ http.StateNew: concurrentRequestN,
+ http.StateActive: concurrentRequestN,
+ http.StateClosed: concurrentRequestN,
+ }
+
+ c := make(chan os.Signal, 1)
+ server, l, err := createListener(killTime / 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv := &Server{
+ ConnState: connState,
+ Timeout: killTime,
+ Server: server,
+ interrupt: c,
+ }
+ srv.Serve(l)
+ }()
+
+ wg.Add(1)
+ go launchTestQueries(t, &wg, c)
+ wg.Wait()
+
+ stateLock.Lock()
+ if !reflect.DeepEqual(states, expected) {
+ t.Errorf("Incorrect connection state tracking.\n actual: %v\nexpected: %v\n", states, expected)
+ }
+ stateLock.Unlock()
+}
+
+func TestGracefulExplicitStop(t *testing.T) {
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{Timeout: killTime, Server: server}
+
+ go func() {
+ go srv.Serve(l)
+ time.Sleep(waitTime)
+ srv.Stop(killTime)
+ }()
+
+ // block on the stopChan until the server has shut down
+ select {
+ case <-srv.StopChan():
+ case <-time.After(timeoutTime):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+}
+
+func TestGracefulExplicitStopOverride(t *testing.T) {
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{Timeout: killTime, Server: server}
+
+ go func() {
+ go srv.Serve(l)
+ time.Sleep(waitTime)
+ srv.Stop(killTime / 2)
+ }()
+
+ // block on the stopChan until the server has shut down
+ select {
+ case <-srv.StopChan():
+ case <-time.After(killTime):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+}
+
+func TestBeforeShutdownAndShutdownInitiatedCallbacks(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ beforeShutdownCalled := make(chan struct{})
+ cb1 := func() bool { close(beforeShutdownCalled); return true }
+ shutdownInitiatedCalled := make(chan struct{})
+ cb2 := func() { close(shutdownInitiatedCalled) }
+
+ wg.Add(2)
+ srv := &Server{Server: server, BeforeShutdown: cb1, ShutdownInitiated: cb2}
+ go func() {
+ defer wg.Done()
+ srv.Serve(l)
+ }()
+ go func() {
+ defer wg.Done()
+ time.Sleep(waitTime)
+ srv.Stop(killTime)
+ }()
+
+ beforeShutdown := false
+ shutdownInitiated := false
+ for i := 0; i < 2; i++ {
+ select {
+ case <-beforeShutdownCalled:
+ beforeShutdownCalled = nil
+ beforeShutdown = true
+ case <-shutdownInitiatedCalled:
+ shutdownInitiatedCalled = nil
+ shutdownInitiated = true
+ case <-time.After(killTime):
+ t.Fatal("Timed out while waiting for ShutdownInitiated callback to be called")
+ }
+ }
+
+ if !beforeShutdown {
+ t.Fatal("beforeShutdown should be true")
+ }
+ if !shutdownInitiated {
+ t.Fatal("shutdownInitiated should be true")
+ }
+}
+
+func TestBeforeShutdownCanceled(t *testing.T) {
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ beforeShutdownCalled := make(chan struct{})
+ cb1 := func() bool { close(beforeShutdownCalled); return false }
+ shutdownInitiatedCalled := make(chan struct{})
+ cb2 := func() { close(shutdownInitiatedCalled) }
+
+ srv := &Server{Server: server, BeforeShutdown: cb1, ShutdownInitiated: cb2}
+ go func() {
+ srv.Serve(l)
+ wg.Done()
+ }()
+ go func() {
+ time.Sleep(waitTime)
+ srv.Stop(killTime)
+ }()
+
+ beforeShutdown := false
+ shutdownInitiated := false
+ timeouted := false
+
+ for i := 0; i < 2; i++ {
+ select {
+ case <-beforeShutdownCalled:
+ beforeShutdownCalled = nil
+ beforeShutdown = true
+ case <-shutdownInitiatedCalled:
+ shutdownInitiatedCalled = nil
+ shutdownInitiated = true
+ case <-time.After(killTime):
+ timeouted = true
+ }
+ }
+
+ if !beforeShutdown {
+ t.Fatal("beforeShutdown should be true")
+ }
+ if !timeouted {
+ t.Fatal("timeouted should be true")
+ }
+ if shutdownInitiated {
+ t.Fatal("shutdownInitiated shouldn't be true")
+ }
+
+ srv.BeforeShutdown = func() bool { return true }
+ srv.Stop(killTime)
+
+ wg.Wait()
+}
+
+func hijackingListener(srv *Server) (*http.Server, net.Listener, error) {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
+ conn, bufrw, err := rw.(http.Hijacker).Hijack()
+ if err != nil {
+ http.Error(rw, "webserver doesn't support hijacking", http.StatusInternalServerError)
+ return
+ }
+
+ defer conn.Close()
+
+ bufrw.WriteString("HTTP/1.1 200 OK\r\n\r\n")
+ bufrw.Flush()
+ })
+
+ server := &http.Server{Addr: fmt.Sprintf(":%d", port), Handler: mux}
+ l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ return server, l, err
+}
+
+func TestNotifyClosed(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan os.Signal, 1)
+ srv := &Server{Timeout: killTime, interrupt: c}
+ server, l, err := hijackingListener(srv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv.Server = server
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ srv.Serve(l)
+ }()
+
+ var once sync.Once
+ for i := 0; i < concurrentRequestN; i++ {
+ wg.Add(1)
+ runQuery(t, http.StatusOK, false, &wg, &once)
+ }
+
+ srv.Stop(0)
+
+ // block on the stopChan until the server has shut down
+ select {
+ case <-srv.StopChan():
+ case <-time.After(timeoutTime):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+
+ if len(srv.connections) > 0 {
+ t.Fatal("hijacked connections should not be managed")
+ }
+
+}
+
+func TestStopDeadlock(t *testing.T) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c := make(chan struct{})
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{Server: server, NoSignalHandling: true}
+
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ time.Sleep(waitTime)
+ srv.Serve(l)
+ }()
+ go func() {
+ defer wg.Done()
+ srv.Stop(0)
+ close(c)
+ }()
+
+ select {
+ case <-c:
+ l.Close()
+ case <-time.After(timeoutTime):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+}
+
+// Run with --race
+func TestStopRace(t *testing.T) {
+ server, l, err := createListener(1 * time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{Timeout: killTime, Server: server}
+
+ go func() {
+ go srv.Serve(l)
+ srv.Stop(killTime)
+ }()
+ srv.Stop(0)
+ select {
+ case <-srv.StopChan():
+ case <-time.After(timeoutTime):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+}
+
+func TestInterruptLog(t *testing.T) {
+ c := make(chan os.Signal, 1)
+
+ server, l, err := createListener(killTime * 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ var tbuf bytes.Buffer
+ logger := log.New(&buf, "", 0)
+ expected := log.New(&tbuf, "", 0)
+
+ srv := &Server{Timeout: killTime, Server: server, Logger: logger, interrupt: c}
+ go func() { srv.Serve(l) }()
+
+ stop := srv.StopChan()
+ c <- os.Interrupt
+ expected.Print("shutdown initiated")
+
+ <-stop
+
+ if buf.String() != tbuf.String() {
+ t.Fatal("shutdown log incorrect - got '" + buf.String() + "'")
+ }
+}
+
+func TestMultiInterrupts(t *testing.T) {
+ c := make(chan os.Signal, 1)
+
+ server, l, err := createListener(killTime * 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var wg sync.WaitGroup
+ var bu bytes.Buffer
+ buf := SyncBuffer{&wg, &bu}
+ var tbuf bytes.Buffer
+ logger := log.New(&buf, "", 0)
+ expected := log.New(&tbuf, "", 0)
+
+ srv := &Server{Timeout: killTime, Server: server, Logger: logger, interrupt: c}
+ go func() { srv.Serve(l) }()
+
+ stop := srv.StopChan()
+ buf.Add(1 + 10) // Expecting 11 log calls
+ c <- os.Interrupt
+ expected.Printf("shutdown initiated")
+ for i := 0; i < 10; i++ {
+ c <- os.Interrupt
+ expected.Printf("already shutting down")
+ }
+
+ <-stop
+
+ wg.Wait()
+ bb, bt := buf.Bytes(), tbuf.Bytes()
+ for i, b := range bb {
+ if b != bt[i] {
+ t.Fatal(fmt.Sprintf("shutdown log incorrect - got '%s', expected '%s'", buf.String(), tbuf.String()))
+ }
+ }
+}
+
+func TestLogFunc(t *testing.T) {
+ c := make(chan os.Signal, 1)
+
+ server, l, err := createListener(killTime * 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var called bool
+ srv := &Server{Timeout: killTime, Server: server,
+ LogFunc: func(format string, args ...interface{}) {
+ called = true
+ }, interrupt: c}
+ stop := srv.StopChan()
+ go func() { srv.Serve(l) }()
+ c <- os.Interrupt
+ <-stop
+
+ if called != true {
+ t.Fatal("Expected LogFunc to be called.")
+ }
+}
+
+// SyncBuffer calls Done on the embedded wait group after each call to Write.
+type SyncBuffer struct {
+ *sync.WaitGroup
+ *bytes.Buffer
+}
+
+func (buf *SyncBuffer) Write(b []byte) (int, error) {
+ defer buf.Done()
+ return buf.Buffer.Write(b)
+}
diff --git a/vendor/github.com/tylerb/graceful/http2_test.go b/vendor/github.com/tylerb/graceful/http2_test.go
new file mode 100644
index 000000000..5b2ebbb8f
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/http2_test.go
@@ -0,0 +1,125 @@
+// +build go1.6
+
+package graceful
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2"
+)
+
+func createServer() *http.Server {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
+ rw.WriteHeader(http.StatusOK)
+ })
+
+ server := &http.Server{Addr: fmt.Sprintf(":%d", port), Handler: mux}
+
+ return server
+}
+
+func checkIfConnectionToServerIsHTTP2(t *testing.T, wg *sync.WaitGroup, c chan os.Signal) {
+
+ defer wg.Done()
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+
+ err := http2.ConfigureTransport(tr)
+
+ if err != nil {
+ t.Fatal("Unable to upgrade client transport to HTTP/2")
+ }
+
+ client := http.Client{Transport: tr}
+ r, err := client.Get(fmt.Sprintf("https://localhost:%d", port))
+
+ c <- os.Interrupt
+
+ if err != nil {
+ t.Fatalf("Error encountered while connecting to test server: %s", err)
+ }
+
+ if !r.ProtoAtLeast(2, 0) {
+ t.Fatalf("Expected HTTP/2 connection to server, but connection was using %s", r.Proto)
+ }
+}
+
+func TestHTTP2ListenAndServeTLS(t *testing.T) {
+
+ c := make(chan os.Signal, 1)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ server := createServer()
+
+ var srv *Server
+ go func() {
+ // set timeout of 0 to test idle connection closing
+ srv = &Server{Timeout: 0, TCPKeepAlive: 1 * time.Minute, Server: server, interrupt: c}
+ srv.ListenAndServeTLS("test-fixtures/cert.crt", "test-fixtures/key.pem")
+ wg.Done()
+ }()
+
+ time.Sleep(waitTime) // Wait for the server to start
+
+ wg.Add(1)
+ go checkIfConnectionToServerIsHTTP2(t, &wg, c)
+ wg.Wait()
+
+ c <- os.Interrupt // kill the server to close idle connections
+
+ // block on the stopChan until the server has shut down
+ select {
+ case <-srv.StopChan():
+ case <-time.After(killTime * 2):
+ t.Fatal("Timed out while waiting for explicit stop to complete")
+ }
+
+}
+
+func TestHTTP2ListenAndServeTLSConfig(t *testing.T) {
+
+ c := make(chan os.Signal, 1)
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+
+ server2 := createServer()
+
+ go func() {
+ srv := &Server{Timeout: killTime, TCPKeepAlive: 1 * time.Minute, Server: server2, interrupt: c}
+
+ cert, err := tls.LoadX509KeyPair("test-fixtures/cert.crt", "test-fixtures/key.pem")
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ tlsConf := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ NextProtos: []string{"h2"}, // We need to explicitly enable http/2 in Go 1.7+
+ }
+
+ tlsConf.BuildNameToCertificate()
+
+ srv.ListenAndServeTLSConfig(tlsConf)
+ wg.Done()
+ }()
+
+ time.Sleep(waitTime) // Wait for the server to start
+
+ wg.Add(1)
+ go checkIfConnectionToServerIsHTTP2(t, &wg, c)
+ wg.Wait()
+}
diff --git a/vendor/github.com/tylerb/graceful/keepalive_listener.go b/vendor/github.com/tylerb/graceful/keepalive_listener.go
new file mode 100644
index 000000000..1484bc213
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/keepalive_listener.go
@@ -0,0 +1,32 @@
+package graceful
+
+import (
+ "net"
+ "time"
+)
+
+type keepAliveConn interface {
+ SetKeepAlive(bool) error
+ SetKeepAlivePeriod(d time.Duration) error
+}
+
+// keepAliveListener sets TCP keep-alive timeouts on accepted
+// connections. It's used by ListenAndServe and ListenAndServeTLS so
+// dead TCP connections (e.g. closing laptop mid-download) eventually
+// go away.
+type keepAliveListener struct {
+ net.Listener
+ keepAlivePeriod time.Duration
+}
+
+func (ln keepAliveListener) Accept() (net.Conn, error) {
+ c, err := ln.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ kac := c.(keepAliveConn)
+ kac.SetKeepAlive(true)
+ kac.SetKeepAlivePeriod(ln.keepAlivePeriod)
+ return c, nil
+}
diff --git a/vendor/github.com/tylerb/graceful/limit_listen.go b/vendor/github.com/tylerb/graceful/limit_listen.go
new file mode 100644
index 000000000..ce32ce992
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/limit_listen.go
@@ -0,0 +1,77 @@
+// Copyright 2013 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package graceful
+
+import (
+ "errors"
+ "net"
+ "sync"
+ "time"
+)
+
+// ErrNotTCP indicates that network connection is not a TCP connection.
+var ErrNotTCP = errors.New("only tcp connections have keepalive")
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+ return &limitListener{l, make(chan struct{}, n)}
+}
+
+type limitListener struct {
+ net.Listener
+ sem chan struct{}
+}
+
+func (l *limitListener) acquire() { l.sem <- struct{}{} }
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+ l.acquire()
+ c, err := l.Listener.Accept()
+ if err != nil {
+ l.release()
+ return nil, err
+ }
+ return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+type limitListenerConn struct {
+ net.Conn
+ releaseOnce sync.Once
+ release func()
+}
+
+func (l *limitListenerConn) Close() error {
+ err := l.Conn.Close()
+ l.releaseOnce.Do(l.release)
+ return err
+}
+
+func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
+ tcpc, ok := l.Conn.(*net.TCPConn)
+ if !ok {
+ return ErrNotTCP
+ }
+ return tcpc.SetKeepAlive(doKeepAlive)
+}
+
+func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
+ tcpc, ok := l.Conn.(*net.TCPConn)
+ if !ok {
+ return ErrNotTCP
+ }
+ return tcpc.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/github.com/tylerb/graceful/test-fixtures/cert.crt b/vendor/github.com/tylerb/graceful/test-fixtures/cert.crt
new file mode 100644
index 000000000..84bd02a3d
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/test-fixtures/cert.crt
@@ -0,0 +1,43 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIUDvdWhjUd/JS+E5bxZlmCM+giGHMwDQYJKoZIhvcNAQEL
+BQAwHzEdMBsGA1UEAxMUVGVzdCBJbnRlcm1lZGlhdGUgQ0EwHhcNMTYwNjAyMDMy
+MjA0WhcNMTkwNjAyMDMyMjM0WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDoyMTUK2OSp+XhKRXB/+uO6YAJE/W
+2rzqARahWT6boHZMDhHXRtdwYxWwiUqoxlEeBrEerQ2qPFAqlWkDw8zliE/DWgXg
+BiW+Vq5DAn3F1jZ5WskLWr1iP48oK4/l+BXEsDd44MHZFoSZiWlr2Fi4iaIHJE7+
+LGBqPVQXwBYTyc7Jvi3HY8I4/waaAwXoSo8vDPjRiMCD2wlg24Rimocf4goa/2Xs
+Z0NU76Uf2jPdsZ5MujjKRqwHDEAjiBq0aPvm6igkNGAGoZ6QYEptO+J4t1oFrbdP
+gYRlpqCa3ekr9gc+wg5AO/V9x8/cypbQ8tpwFwvvSYg2TJaUMZ5abc+HAgMBAAGj
+gcMwgcAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQC
+R0Y69NLOfFCLRiB5N3uoacILXTAfBgNVHSMEGDAWgBRm0fFHSXtDCVHC8UW7/obv
+DLp9tTBJBggrBgEFBQcBAQQ9MDswOQYIKwYBBQUHMAKGLWh0dHA6Ly9sb2NhbGhv
+c3Qvc2VsZi1pc3N1ZWQtaW50ZXJtZWRpYXRlLmNydDAUBgNVHREEDTALgglsb2Nh
+bGhvc3QwDQYJKoZIhvcNAQELBQADggEBALAf/nowwB0NJ7lGGaoVKhmMHxBEQkd1
+K/jBAlJg9Kgmg1IJJ7zLE3SeYF8tGTNYATd4RLmqo1GakrMDaKWNXd74v3p/tWmb
+4vqCh6WzFPHU1dpxDKtbbmaLt9Ije7s6DuQAz9bBXM0mN0vy5F0dORpx/j0h3u1B
+j7B5O8kLejPY2w/8pd+QECCb1Q5A6Xx1EEsJpzTlGXO0SBla/oCg+nvirsBGVpWr
+bGskAIwG9wNKuGfg4m5u1bL87iX80NemeLtWRWVM+Ry/RhfOokH59/EIFRAXeRz6
+gXjIWa0vcXnhW1MOvbD1GFYhO6AJAnDwWes48WfBHysOhq0RycdpGw0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIUMzpit8+j2dWxdk1PdMqGWYalZyIwDQYJKoZIhvcNAQEL
+BQAwFzEVMBMGA1UEAxMMVGVzdCBSb290IENBMB4XDTE2MDUyOTEwNDYwMFoXDTMx
+MDUyNjEwNDYzMFowHzEdMBsGA1UEAxMUVGVzdCBJbnRlcm1lZGlhdGUgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDs6kY6mHJWzupq5dsSavPZHuv6
+0E9PczHbujWLuzv7+qbwzcAgfRvaeR0xgvf7q9pjMgJ7/kNANgneWGpwciLgHtiJ
+rSHii3RZfWlK4gdbCXya9EmHj8zO+9xGBHM0FrqfqA+IA70SimFcwGPrGHyERsdX
++mqO64Z95yI5uJpoS8OBAUPU8i6xvNLZGmgUEF3CRhDDTYVGcTEtKAPcnnBuZzZU
+Ds+DrHf/MC7HHK0/l0auuRz3p+/GFNePGePG+FFbInS/vwHwrkMW2tzBKG41K+gD
+GfkTjVU8xBSiMYOiEja6YcJ4GuzEPcmu5LS+6BkLlsIbazDW5IM8p+7+8RKjAgMB
+AAGjgcgwgcUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFGbR8UdJe0MJUcLxRbv+hu8Mun21MB8GA1UdIwQYMBaAFKmz0h3CW1HBO9uz
+uCzg+MNPGZtkMEEGCCsGAQUFBwEBBDUwMzAxBggrBgEFBQcwAoYlaHR0cDovL2xv
+Y2FsaG9zdC9zZWxmLWlzc3VlZC1yb290LmNydDAfBgNVHREEGDAWghRUZXN0IElu
+dGVybWVkaWF0ZSBDQTANBgkqhkiG9w0BAQsFAAOCAQEAaYVGqHbaE0c9F/kyIMgu
+S3HuNn4pBh2EwGcKIlPkDe43hqXjhS/+itmWk75rQz+Rw+acevGoxbpDR38abTIS
+RJd9L/3MA644z8F82er3pNjKqvS/vTre/wsvGYwmEM+GrgJw3HUcisc93qLgaWH2
+kjky208k9kOuzJDiY45eu9TfSSmjSHSMCtxk8p5wYKDcfVz+uqlBhVEiHGjQIc2E
+66SituusiwgQv/mdtEW7y48EvMGdzxPfLFcvj06B3vTsZaaYyB6GyKwMcaPFvHRr
+V0yYaKRZgAh4X6LHlgPJqvIv3gjMdJR55durAO7tI9Pos0o5Lv5WJgi0g0KvMsco
+qQ==
+-----END CERTIFICATE----- \ No newline at end of file
diff --git a/vendor/github.com/tylerb/graceful/test-fixtures/key.pem b/vendor/github.com/tylerb/graceful/test-fixtures/key.pem
new file mode 100644
index 000000000..78f3232c8
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/test-fixtures/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAw6MjE1Ctjkqfl4SkVwf/rjumACRP1tq86gEWoVk+m6B2TA4R
+10bXcGMVsIlKqMZRHgaxHq0NqjxQKpVpA8PM5YhPw1oF4AYlvlauQwJ9xdY2eVrJ
+C1q9Yj+PKCuP5fgVxLA3eODB2RaEmYlpa9hYuImiByRO/ixgaj1UF8AWE8nOyb4t
+x2PCOP8GmgMF6EqPLwz40YjAg9sJYNuEYpqHH+IKGv9l7GdDVO+lH9oz3bGeTLo4
+ykasBwxAI4gatGj75uooJDRgBqGekGBKbTvieLdaBa23T4GEZaagmt3pK/YHPsIO
+QDv1fcfP3MqW0PLacBcL70mINkyWlDGeWm3PhwIDAQABAoIBAQC87HWa2XZAyt+D
+OpxZT2ghoYiU6nwPR/zXHWX1OnGzaCnVGGEyOz8hUQ5JBMwMYDdFf8DbltJzavsf
+pFldQWBE6HXeeLjjtgwM2zg9jdJXkp3YY0tyo5XvouFkMW0s735WCrYHDUUllxFG
+E+SyOKK00nSd4PpHiiMxdTgYF286exwOpzjhcJfAkn7oBNeOGc5VLOvcvakrSrdq
+OYBAJ25HSVFnSQbeAAsCzBEBZC0WLyB1BQGcidbtEn8sxyGnV8HWjbXY+MJQWHg+
+q2iK+uvO4wtrE/WC6p4Ty44Myh+AB79s35HWKYd4okwKkpI1QdD543TIiZnkNEVI
+aS/uH13BAoGBAP/psBxKzIft59hw+U9NscH6N9/ze8iAtOtqsWdER/qXCrlUn8+j
+F/xquJR6gDj5GwGBt07asEuoG8CKJMQI0c3AeHF7XBcmUunBStktb9O97Zsp6bNJ
+olsrWlM4yvVuCVizEwIYjHrMBOS3YIPErM1LmAyDHmzx3+yz+3+WxRQLAoGBAMO0
+MaJDPisMC05pvieHRb91HlsiSrASeMkw1FmHI0b/gcC88mEnuXIze1ySoF6FE7B7
+xaEm6Lf5Snl0JgXPDSj6ukd51NdaU2VmpKvDOrvQ5QQE9mXaDkXv/i2B0YkCh+Hy
+bkziW1IKnWT2PTRAAEIJQ22oK51MdQnvCdmtsIP1AoGBAKnMiEl9Z9AZDmgSLZls
+17D5MPGrQEp8+43oMOVv7MJcTYVCnPbMJDIbLXV3AnTK9Bw/0TzE5YyNcjyCbHqV
+z39RYZkKXMQPbZwj4GHRQA2iS3FUkfeft9X+IeRuHlxSMmlkCAyv9SXVELog4i0L
+5gwhSDWlGh73LbiEgy7Y/tKZAoGBALTiMhYGDMoA4dpiBi3G7AKgH6SgN2QyTo22
+oi71pveSZb1dZrHB47fYOadApxV17tLqM6pVqjeRJPLJFfO8gi9kPxSdWMqLZBWP
+H5jaY8kAtQxYAd32A8dEoSwylxcJzcpbJvPNLBbSVNPifIN0vEhNA5OxIk7LQkoi
+NHqL/WCZAoGAPf3kb9Gw/NkBq4Cn86pQfP/xE0h7zcoNmFtLbdKIjId+DDDOPOeX
+9tm33fZzw0SG4KlRQlsqgzFvm8aDD8rpW17341Z/rWlLo8uHNdRkMvbSabc34vPv
+4lrs0rHSYW06MlqkJBNVraySRz7hmU4+n7YMvNI0Due9mVGmE1NU/vI=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/vendor/github.com/tylerb/graceful/tests/main.go b/vendor/github.com/tylerb/graceful/tests/main.go
new file mode 100644
index 000000000..9380ae69c
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/tests/main.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/urfave/negroni"
+ "gopkg.in/tylerb/graceful.v1"
+)
+
+func main() {
+
+ var wg sync.WaitGroup
+
+ wg.Add(3)
+ go func() {
+ n := negroni.New()
+ fmt.Println("Launching server on :3000")
+ graceful.Run(":3000", 0, n)
+ fmt.Println("Terminated server on :3000")
+ wg.Done()
+ }()
+ go func() {
+ n := negroni.New()
+ fmt.Println("Launching server on :3001")
+ graceful.Run(":3001", 0, n)
+ fmt.Println("Terminated server on :3001")
+ wg.Done()
+ }()
+ go func() {
+ n := negroni.New()
+ fmt.Println("Launching server on :3002")
+ graceful.Run(":3002", 0, n)
+ fmt.Println("Terminated server on :3002")
+ wg.Done()
+ }()
+ fmt.Println("Press ctrl+c. All servers should terminate.")
+ wg.Wait()
+
+}
diff --git a/vendor/github.com/xenolf/lego/.gitcookies.enc b/vendor/github.com/xenolf/lego/.gitcookies.enc
new file mode 100644
index 000000000..09c303c94
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/.gitcookies.enc
Binary files differ
diff --git a/vendor/github.com/xenolf/lego/.gitignore b/vendor/github.com/xenolf/lego/.gitignore
new file mode 100644
index 000000000..74d32f0ab
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/.gitignore
@@ -0,0 +1,4 @@
+lego.exe
+lego
+.lego
+.idea
diff --git a/vendor/github.com/xenolf/lego/.travis.yml b/vendor/github.com/xenolf/lego/.travis.yml
new file mode 100644
index 000000000..f1af03bd6
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+go:
+- 1.6.3
+- 1.7
+- tip
+install:
+- go get -t ./...
+script:
+- go vet ./...
+- go test -v ./...
+before_install:
+- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_26c593b079d9_key -iv $encrypted_26c593b079d9_iv -in .gitcookies.enc -out .gitcookies -d || true'
diff --git a/vendor/github.com/xenolf/lego/CHANGELOG.md b/vendor/github.com/xenolf/lego/CHANGELOG.md
new file mode 100644
index 000000000..c43c4a936
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/CHANGELOG.md
@@ -0,0 +1,94 @@
+# Changelog
+
+## [0.3.1] - 2016-04-19
+
+### Added:
+- lib: A new DNS provider for Vultr.
+
+### Fixed:
+- lib: DNS Provider for DigitalOcean could not handle subdomains properly.
+- lib: handleHTTPError should only try to JSON decode error messages with the right content type.
+- lib: The propagation checker for the DNS challenge would not retry on send errors.
+
+
+## [0.3.0] - 2016-03-19
+
+### Added:
+- CLI: The `--dns` switch. To include the DNS challenge for consideration. When using this switch, all other solvers are disabled. Supported are the following solvers: cloudflare, digitalocean, dnsimple, dyn, gandi, googlecloud, namecheap, route53, rfc2136 and manual.
+- CLI: The `--accept-tos` switch. Indicates your acceptance of the Let's Encrypt terms of service without prompting you.
+- CLI: The `--webroot` switch. The HTTP-01 challenge may now be completed by dropping a file into a webroot. When using this switch, all other solvers are disabled.
+- CLI: The `--key-type` switch. This replaces the `--rsa-key-size` switch and supports the following key types: EC256, EC384, RSA2048, RSA4096 and RSA8192.
+- CLI: The `--dnshelp` switch. This displays a more in-depth help topic for DNS solvers.
+- CLI: The `--no-bundle` sub switch for the `run` and `renew` commands. When this switch is set, the CLI will not bundle the issuer certificate with your certificate.
+- lib: A new type for challenge identifiers `Challenge`
+- lib: A new interface for custom challenge providers `acme.ChallengeProvider`
+- lib: A new interface for DNS-01 providers to allow for custom timeouts for the validation function `acme.ChallengeProviderTimeout`
+- lib: SetChallengeProvider function. Pass a challenge identifier and a Provider to replace the default behaviour of a challenge.
+- lib: The DNS-01 challenge has been implemented with modular solvers using the `ChallengeProvider` interface. Included solvers are: cloudflare, digitalocean, dnsimple, gandi, namecheap, route53, rfc2136 and manual.
+- lib: The `acme.KeyType` type was added and is used for the configuration of crypto parameters for RSA and EC keys. Valid KeyTypes are: EC256, EC384, RSA2048, RSA4096 and RSA8192.
+
+### Changed
+- lib: ExcludeChallenges now expects to be passed an array of `Challenge` types.
+- lib: HTTP-01 now supports custom solvers using the `ChallengeProvider` interface.
+- lib: TLS-SNI-01 now supports custom solvers using the `ChallengeProvider` interface.
+- lib: The `GetPrivateKey` function in the `acme.User` interface is now expected to return a `crypto.PrivateKey` instead of an `rsa.PrivateKey` for EC compat.
+- lib: The `acme.NewClient` function now expects an `acme.KeyType` instead of the keyBits parameter.
+
+### Removed
+- CLI: The `rsa-key-size` switch was removed in favor of `key-type` to support EC keys.
+
+### Fixed
+- lib: Fixed a race condition in HTTP-01
+- lib: Fixed an issue where status codes on ACME challenge responses could lead to no action being taken.
+- lib: Fixed a regression when calling the Renew function with a SAN certificate.
+
+## [0.2.0] - 2016-01-09
+
+### Added:
+- CLI: The `--exclude` or `-x` switch. To exclude a challenge from being solved.
+- CLI: The `--http` switch. To set the listen address and port of HTTP based challenges. Supports `host:port` and `:port` for any interface.
+- CLI: The `--tls` switch. To set the listen address and port of TLS based challenges. Supports `host:port` and `:port` for any interface.
+- CLI: The `--reuse-key` switch for the `renew` operation. This lets you reuse an existing private key for renewals.
+- lib: ExcludeChallenges function. Pass an array of challenge identifiers to exclude them from solving.
+- lib: SetHTTPAddress function. Pass a port to set the listen port for HTTP based challenges.
+- lib: SetTLSAddress function. Pass a port to set the listen port of TLS based challenges.
+- lib: acme.UserAgent variable. Use this to customize the user agent on all requests sent by lego.
+
+### Changed:
+- lib: NewClient does no longer accept the optPort parameter
+- lib: ObtainCertificate now returns a SAN certificate if you pass more then one domain.
+- lib: GetOCSPForCert now returns the parsed OCSP response instead of just the status.
+- lib: ObtainCertificate has a new parameter `privKey crypto.PrivateKey` which lets you reuse an existing private key for new certificates.
+- lib: RenewCertificate now expects the PrivateKey property of the CertificateResource to be set only if you want to reuse the key.
+
+### Removed:
+- CLI: The `--port` switch was removed.
+- lib: RenewCertificate does no longer offer to also revoke your old certificate.
+
+### Fixed:
+- CLI: Fix logic using the `--days` parameter for renew
+
+## [0.1.1] - 2015-12-18
+
+### Added:
+- CLI: Added a way to automate renewal through a cronjob using the --days parameter to renew
+
+### Changed:
+- lib: Improved log output on challenge failures.
+
+### Fixed:
+- CLI: The short parameter for domains would not get accepted
+- CLI: The cli did not return proper exit codes on error library errors.
+- lib: RenewCertificate did not properly renew SAN certificates.
+
+### Security
+- lib: Fix possible DOS on GetOCSPForCert
+
+## [0.1.0] - 2015-12-03
+- Initial release
+
+[0.3.1]: https://github.com/xenolf/lego/compare/v0.3.0...v0.3.1
+[0.3.0]: https://github.com/xenolf/lego/compare/v0.2.0...v0.3.0
+[0.2.0]: https://github.com/xenolf/lego/compare/v0.1.1...v0.2.0
+[0.1.1]: https://github.com/xenolf/lego/compare/v0.1.0...v0.1.1
+[0.1.0]: https://github.com/xenolf/lego/tree/v0.1.0
diff --git a/vendor/github.com/xenolf/lego/CONTRIBUTING.md b/vendor/github.com/xenolf/lego/CONTRIBUTING.md
new file mode 100644
index 000000000..9939a5ab3
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to contribute to lego
+
+Contributions in the form of patches and proposals are essential to keep lego great and to make it even better.
+To ensure a great and easy experience for everyone, please review the few guidelines in this document.
+
+## Bug reports
+
+- Use the issue search to see if the issue has already been reported.
+- Also look for closed issues to see if your issue has already been fixed.
+- If both of the above do not apply create a new issue and include as much information as possible.
+
+Bug reports should include all information a person could need to reproduce your problem without the need to
+follow up for more information. If possible, provide detailed steps for us to reproduce it, the expected behaviour and the actual behaviour.
+
+## Feature proposals and requests
+
+Feature requests are welcome and should be discussed in an issue.
+Please keep proposals focused on one thing at a time and be as detailed as possible.
+It is up to you to make a strong point about your proposal and convince us of the merits and the added complexity of this feature.
+
+## Pull requests
+
+Patches, new features and improvements are a great way to help the project.
+Please keep them focused on one thing and do not include unrelated commits.
+
+All pull requests which alter the behaviour of the program, add new behaviour or somehow alter code in a non-trivial way should **always** include tests.
+
+If you want to contribute a significant pull request (with a non-trivial workload for you) please **ask first**. We do not want you to spend
+a lot of time on something the project's developers might not want to merge into the project.
+
+**IMPORTANT**: By submitting a patch, you agree to allow the project
+owners to license your work under the terms of the [MIT License](LICENSE).
diff --git a/vendor/github.com/xenolf/lego/Dockerfile b/vendor/github.com/xenolf/lego/Dockerfile
new file mode 100644
index 000000000..3749dfcee
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/Dockerfile
@@ -0,0 +1,14 @@
+FROM alpine:3.4
+
+ENV GOPATH /go
+
+RUN apk update && apk add ca-certificates go git && \
+ rm -rf /var/cache/apk/* && \
+ go get -u github.com/xenolf/lego && \
+ cd /go/src/github.com/xenolf/lego && \
+ go build -o /usr/bin/lego . && \
+ apk del ca-certificates go git && \
+ rm -rf /var/cache/apk/* && \
+ rm -rf /go
+
+ENTRYPOINT [ "/usr/bin/lego" ]
diff --git a/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE
new file mode 100644
index 000000000..17460b716
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Sebastian Erhart
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md
new file mode 100644
index 000000000..136bc5548
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/README.md
@@ -0,0 +1,257 @@
+# lego
+Let's Encrypt client and ACME library written in Go
+
+[![GoDoc](https://godoc.org/github.com/xenolf/lego/acme?status.svg)](https://godoc.org/github.com/xenolf/lego/acme)
+[![Build Status](https://travis-ci.org/xenolf/lego.svg?branch=master)](https://travis-ci.org/xenolf/lego)
+[![Dev Chat](https://img.shields.io/badge/dev%20chat-gitter-blue.svg?label=dev+chat)](https://gitter.im/xenolf/lego)
+
+#### General
+This is a work in progress. Please do *NOT* run this on a production server and please report any bugs you find!
+
+#### Installation
+lego supports both binary installs and install from source.
+
+To get the binary just download the latest release for your OS/Arch from [the release page](https://github.com/xenolf/lego/releases)
+and put the binary somewhere convenient. lego does not assume anything about the location you run it from.
+
+To install from source, just run
+```
+go get -u github.com/xenolf/lego
+```
+
+To build lego inside a Docker container, just run
+```
+docker build -t lego .
+```
+
+#### Features
+
+- Register with CA
+- Obtain certificates, both from scratch or with an existing CSR
+- Renew certificates
+- Revoke certificates
+- Robust implementation of all ACME challenges
+ - HTTP (http-01)
+ - TLS with Server Name Indication (tls-sni-01)
+ - DNS (dns-01)
+- SAN certificate support
+- Comes with multiple optional [DNS providers](https://github.com/xenolf/lego/tree/master/providers/dns)
+- [Custom challenge solvers](https://github.com/xenolf/lego/wiki/Writing-a-Challenge-Solver)
+- Certificate bundling
+- OCSP helper function
+
+Please keep in mind that CLI switches and APIs are still subject to change.
+
+When using the standard `--path` option, all certificates and account configurations are saved to a folder *.lego* in the current working directory.
+
+#### Sudo
+The CLI does not require root permissions but needs to bind to port 80 and 443 for certain challenges.
+To run the CLI without sudo, you have four options:
+
+- Use setcap 'cap_net_bind_service=+ep' /path/to/program
+- Pass the `--http` or/and the `--tls` option and specify a custom port to bind to. In this case you have to forward port 80/443 to these custom ports (see [Port Usage](#port-usage)).
+- Pass the `--webroot` option and specify the path to your webroot folder. In this case the challenge will be written in a file in `.well-known/acme-challenge/` inside your webroot.
+- Pass the `--dns` option and specify a DNS provider.
+
+#### Port Usage
+By default lego assumes it is able to bind to ports 80 and 443 to solve challenges.
+If this is not possible in your environment, you can use the `--http` and `--tls` options to instruct
+lego to listen on that interface:port for any incoming challenges.
+
+If you are using this option, make sure you proxy all of the following traffic to these ports.
+
+HTTP Port:
+- All plaintext HTTP requests to port 80 which begin with a request path of `/.well-known/acme-challenge/` for the HTTP challenge.
+
+TLS Port:
+- All TLS handshakes on port 443 for the TLS-SNI challenge.
+
+This traffic redirection is only needed as long as lego solves challenges. As soon as you have received your certificates you can deactivate the forwarding.
+
+#### Usage
+
+```
+NAME:
+ lego - Let's Encrypt client written in Go
+
+USAGE:
+ lego [global options] command [command options] [arguments...]
+
+VERSION:
+ 0.3.1
+
+COMMANDS:
+ run Register an account, then create and install a certificate
+ revoke Revoke a certificate
+ renew Renew a certificate
+ dnshelp Shows additional help for the --dns global option
+ help, h Shows a list of commands or help for one command
+
+GLOBAL OPTIONS:
+ --domains, -d [--domains option --domains option] Add domains to the process
+ --csr, -c Certificate signing request filename, if an external CSR is to be used
+ --server, -s "https://acme-v01.api.letsencrypt.org/directory" CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.
+ --email, -m Email used for registration and recovery contact.
+ --accept-tos, -a By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.
+ --key-type, -k "rsa2048" Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384
+ --path "${CWD}/.lego" Directory to use for storing the data
+ --exclude, -x [--exclude option --exclude option] Explicitly disallow solvers by name from being used. Solvers: "http-01", "tls-sni-01".
+ --webroot Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge
+ --http Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port
+ --tls Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port
+ --dns Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.
+ --help, -h show help
+ --version, -v print the version
+```
+
+##### CLI Example
+
+Assumes the `lego` binary has permission to bind to ports 80 and 443. You can get a pre-built binary from the [releases](https://github.com/xenolf/lego/releases) page.
+If your environment does not allow you to bind to these ports, please read [Port Usage](#port-usage).
+
+Obtain a certificate:
+
+```bash
+$ lego --email="foo@bar.com" --domains="example.com" run
+```
+
+(Find your certificate in the `.lego` folder of current working directory.)
+
+To renew the certificate:
+
+```bash
+$ lego --email="foo@bar.com" --domains="example.com" renew
+```
+
+Obtain a certificate using the DNS challenge and AWS Route 53:
+
+```bash
+$ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=my_id AWS_SECRET_ACCESS_KEY=my_key lego --email="foo@bar.com" --domains="example.com" --dns="route53" run
+```
+
+Note that `--dns=foo` implies `--exclude=http-01` and `--exclude=tls-sni-01`. lego will not attempt other challenges if you've told it to use DNS instead.
+
+Obtain a certificate given a certificate signing request (CSR) generated by something else:
+
+```bash
+$ lego --email="foo@bar.com" --csr=/path/to/csr.pem run
+```
+
+(lego will infer the domains to be validated based on the contents of the CSR, so make sure the CSR's Common Name and optional SubjectAltNames are set correctly.)
+
+lego defaults to communicating with the production Let's Encrypt ACME server. If you'd like to test something without issuing real certificates, consider using the staging endpoint instead:
+
+```bash
+$ lego --server=https://acme-staging.api.letsencrypt.org/directory …
+```
+
+#### DNS Challenge API Details
+
+##### AWS Route 53
+
+The following AWS IAM policy document describes the permissions required for lego to complete the DNS challenge.
+Replace `<INSERT_YOUR_HOSTED_ZONE_ID_HERE>` with the Route 53 zone ID of the domain you are authorizing.
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "route53:GetChange",
+ "route53:ListHostedZonesByName"
+ ],
+ "Resource": [
+ "*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "route53:ChangeResourceRecordSets"
+ ],
+ "Resource": [
+ "arn:aws:route53:::hostedzone/<INSERT_YOUR_HOSTED_ZONE_ID_HERE>"
+ ]
+ }
+ ]
+}
+```
+
+#### ACME Library Usage
+
+A valid, but bare-bones example use of the acme package:
+
+```go
+// You'll need a user or account type that implements acme.User
+type MyUser struct {
+ Email string
+ Registration *acme.RegistrationResource
+ key crypto.PrivateKey
+}
+func (u MyUser) GetEmail() string {
+ return u.Email
+}
+func (u MyUser) GetRegistration() *acme.RegistrationResource {
+ return u.Registration
+}
+func (u MyUser) GetPrivateKey() crypto.PrivateKey {
+ return u.key
+}
+
+// Create a user. New accounts need an email and private key to start.
+const rsaKeySize = 2048
+privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize)
+if err != nil {
+ log.Fatal(err)
+}
+myUser := MyUser{
+ Email: "you@yours.com",
+ key: privateKey,
+}
+
+// A client facilitates communication with the CA server. This CA URL is
+// configured for a local dev instance of Boulder running in Docker in a VM.
+client, err := acme.NewClient("http://192.168.99.100:4000", &myUser, acme.RSA2048)
+if err != nil {
+ log.Fatal(err)
+}
+
+// We specify an http port of 5002 and an tls port of 5001 on all interfaces
+// because we aren't running as root and can't bind a listener to port 80 and 443
+// (used later when we attempt to pass challenges). Keep in mind that we still
+// need to proxy challenge traffic to port 5002 and 5001.
+client.SetHTTPAddress(":5002")
+client.SetTLSAddress(":5001")
+
+// New users will need to register
+reg, err := client.Register()
+if err != nil {
+ log.Fatal(err)
+}
+myUser.Registration = reg
+
+// SAVE THE USER.
+
+// The client has a URL to the current Let's Encrypt Subscriber
+// Agreement. The user will need to agree to it.
+err = client.AgreeToTOS()
+if err != nil {
+ log.Fatal(err)
+}
+
+// The acme library takes care of completing the challenges to obtain the certificate(s).
+// The domains must resolve to this machine or you have to use the DNS challenge.
+bundle := false
+certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil)
+if len(failures) > 0 {
+ log.Fatal(failures)
+}
+
+// Each certificate comes back with the cert bytes, the bytes of the client's
+// private key, and a certificate URL. SAVE THESE TO DISK.
+fmt.Printf("%#v\n", certificates)
+
+// ... all done.
+```
diff --git a/vendor/github.com/xenolf/lego/account.go b/vendor/github.com/xenolf/lego/account.go
new file mode 100644
index 000000000..34856e16f
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/account.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "crypto"
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// Account represents a users local saved credentials
+type Account struct {
+ Email string `json:"email"`
+ key crypto.PrivateKey
+ Registration *acme.RegistrationResource `json:"registration"`
+
+ conf *Configuration
+}
+
+// NewAccount creates a new account for an email address
+func NewAccount(email string, conf *Configuration) *Account {
+ accKeysPath := conf.AccountKeysPath(email)
+ // TODO: move to function in configuration?
+ accKeyPath := accKeysPath + string(os.PathSeparator) + email + ".key"
+ if err := checkFolder(accKeysPath); err != nil {
+ logger().Fatalf("Could not check/create directory for account %s: %v", email, err)
+ }
+
+ var privKey crypto.PrivateKey
+ if _, err := os.Stat(accKeyPath); os.IsNotExist(err) {
+
+ logger().Printf("No key found for account %s. Generating a curve P384 EC key.", email)
+ privKey, err = generatePrivateKey(accKeyPath)
+ if err != nil {
+ logger().Fatalf("Could not generate RSA private account key for account %s: %v", email, err)
+ }
+
+ logger().Printf("Saved key to %s", accKeyPath)
+ } else {
+ privKey, err = loadPrivateKey(accKeyPath)
+ if err != nil {
+ logger().Fatalf("Could not load RSA private key from file %s: %v", accKeyPath, err)
+ }
+ }
+
+ accountFile := path.Join(conf.AccountPath(email), "account.json")
+ if _, err := os.Stat(accountFile); os.IsNotExist(err) {
+ return &Account{Email: email, key: privKey, conf: conf}
+ }
+
+ fileBytes, err := ioutil.ReadFile(accountFile)
+ if err != nil {
+ logger().Fatalf("Could not load file for account %s -> %v", email, err)
+ }
+
+ var acc Account
+ err = json.Unmarshal(fileBytes, &acc)
+ if err != nil {
+ logger().Fatalf("Could not parse file for account %s -> %v", email, err)
+ }
+
+ acc.key = privKey
+ acc.conf = conf
+
+ if acc.Registration == nil {
+ logger().Fatalf("Could not load account for %s. Registration is nil.", email)
+ }
+
+ if acc.conf == nil {
+ logger().Fatalf("Could not load account for %s. Configuration is nil.", email)
+ }
+
+ return &acc
+}
+
+/** Implementation of the acme.User interface **/
+
+// GetEmail returns the email address for the account
+func (a *Account) GetEmail() string {
+ return a.Email
+}
+
+// GetPrivateKey returns the private RSA account key.
+func (a *Account) GetPrivateKey() crypto.PrivateKey {
+ return a.key
+}
+
+// GetRegistration returns the server registration
+func (a *Account) GetRegistration() *acme.RegistrationResource {
+ return a.Registration
+}
+
+/** End **/
+
+// Save the account to disk
+func (a *Account) Save() error {
+ jsonBytes, err := json.MarshalIndent(a, "", "\t")
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(
+ path.Join(a.conf.AccountPath(a.Email), "account.json"),
+ jsonBytes,
+ 0600,
+ )
+}
diff --git a/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/xenolf/lego/acme/challenges.go
new file mode 100644
index 000000000..857900507
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/challenges.go
@@ -0,0 +1,16 @@
+package acme
+
+// Challenge is a string that identifies a particular type and version of ACME challenge.
+type Challenge string
+
+const (
+ // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http
+ // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge
+ HTTP01 = Challenge("http-01")
+ // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni
+ // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge
+ TLSSNI01 = Challenge("tls-sni-01")
+ // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns
+ // Note: DNS01Record returns a DNS record which will fulfill this challenge
+ DNS01 = Challenge("dns-01")
+)
diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go
new file mode 100644
index 000000000..5eae8d26a
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/client.go
@@ -0,0 +1,804 @@
+// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers.
+package acme
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // Logger is an optional custom logger.
+ Logger *log.Logger
+)
+
+// logf writes a log entry. It uses Logger if not
+// nil, otherwise it uses the default log.Logger.
+func logf(format string, args ...interface{}) {
+ if Logger != nil {
+ Logger.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// User interface is to be implemented by users of this library.
+// It is used by the client type to get user specific information.
+type User interface {
+ GetEmail() string
+ GetRegistration() *RegistrationResource
+ GetPrivateKey() crypto.PrivateKey
+}
+
+// Interface for all challenge solvers to implement.
+type solver interface {
+ Solve(challenge challenge, domain string) error
+}
+
+type validateFunc func(j *jws, domain, uri string, chlng challenge) error
+
+// Client is the user-friendy way to ACME
+type Client struct {
+ directory directory
+ user User
+ jws *jws
+ keyType KeyType
+ issuerCert []byte
+ solvers map[Challenge]solver
+}
+
+// NewClient creates a new ACME client on behalf of the user. The client will depend on
+// the ACME directory located at caDirURL for the rest of its actions. A private
+// key of type keyType (see KeyType contants) will be generated when requesting a new
+// certificate if one isn't provided.
+func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) {
+ privKey := user.GetPrivateKey()
+ if privKey == nil {
+ return nil, errors.New("private key was nil")
+ }
+
+ var dir directory
+ if _, err := getJSON(caDirURL, &dir); err != nil {
+ return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err)
+ }
+
+ if dir.NewRegURL == "" {
+ return nil, errors.New("directory missing new registration URL")
+ }
+ if dir.NewAuthzURL == "" {
+ return nil, errors.New("directory missing new authz URL")
+ }
+ if dir.NewCertURL == "" {
+ return nil, errors.New("directory missing new certificate URL")
+ }
+ if dir.RevokeCertURL == "" {
+ return nil, errors.New("directory missing revoke certificate URL")
+ }
+
+ jws := &jws{privKey: privKey, directoryURL: caDirURL}
+
+ // REVIEW: best possibility?
+ // Add all available solvers with the right index as per ACME
+ // spec to this map. Otherwise they won`t be found.
+ solvers := make(map[Challenge]solver)
+ solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}}
+ solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}}
+
+ return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil
+}
+
+// SetChallengeProvider specifies a custom provider that will make the solution available
+func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error {
+ switch challenge {
+ case HTTP01:
+ c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p}
+ case TLSSNI01:
+ c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p}
+ case DNS01:
+ c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p}
+ default:
+ return fmt.Errorf("Unknown challenge %v", challenge)
+ }
+ return nil
+}
+
+// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges.
+// If this option is not used, the default port 80 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetHTTPAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[HTTP01]; ok {
+ chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port)
+ }
+
+ return nil
+}
+
+// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges.
+// If this option is not used, the default port 443 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetTLSAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[TLSSNI01]; ok {
+ chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port)
+ }
+ return nil
+}
+
+// ExcludeChallenges explicitly removes challenges from the pool for solving.
+func (c *Client) ExcludeChallenges(challenges []Challenge) {
+ // Loop through all challenges and delete the requested one if found.
+ for _, challenge := range challenges {
+ delete(c.solvers, challenge)
+ }
+}
+
+// Register the current account to the ACME server.
+func (c *Client) Register() (*RegistrationResource, error) {
+ if c == nil || c.user == nil {
+ return nil, errors.New("acme: cannot register a nil client or user")
+ }
+ logf("[INFO] acme: Registering account for %s", c.user.GetEmail())
+
+ regMsg := registrationMessage{
+ Resource: "new-reg",
+ }
+ if c.user.GetEmail() != "" {
+ regMsg.Contact = []string{"mailto:" + c.user.GetEmail()}
+ } else {
+ regMsg.Contact = []string{}
+ }
+
+ var serverReg Registration
+ var regURI string
+ hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg)
+ if err != nil {
+ remoteErr, ok := err.(RemoteError)
+ if ok && remoteErr.StatusCode == 409 {
+ regURI = hdr.Get("Location")
+ regMsg = registrationMessage{
+ Resource: "reg",
+ }
+ if hdr, err = postJSON(c.jws, regURI, regMsg, &serverReg); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+
+ reg := &RegistrationResource{Body: serverReg}
+
+ links := parseLinks(hdr["Link"])
+
+ if regURI == "" {
+ regURI = hdr.Get("Location")
+ }
+ reg.URI = regURI
+ if links["terms-of-service"] != "" {
+ reg.TosURL = links["terms-of-service"]
+ }
+
+ if links["next"] != "" {
+ reg.NewAuthzURL = links["next"]
+ } else {
+ return nil, errors.New("acme: The server did not return 'next' link to proceed")
+ }
+
+ return reg, nil
+}
+
+// DeleteRegistration deletes the client's user registration from the ACME
+// server.
+func (c *Client) DeleteRegistration() error {
+ if c == nil || c.user == nil {
+ return errors.New("acme: cannot unregister a nil client or user")
+ }
+ logf("[INFO] acme: Deleting account for %s", c.user.GetEmail())
+
+ regMsg := registrationMessage{
+ Resource: "reg",
+ Delete: true,
+ }
+
+ _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// QueryRegistration runs a POST request on the client's registration and
+// returns the result.
+//
+// This is similar to the Register function, but acting on an existing
+// registration link and resource.
+func (c *Client) QueryRegistration() (*RegistrationResource, error) {
+ if c == nil || c.user == nil {
+ return nil, errors.New("acme: cannot query the registration of a nil client or user")
+ }
+ // Log the URL here instead of the email as the email may not be set
+ logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI)
+
+ regMsg := registrationMessage{
+ Resource: "reg",
+ }
+
+ var serverReg Registration
+ hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg)
+ if err != nil {
+ return nil, err
+ }
+
+ reg := &RegistrationResource{Body: serverReg}
+
+ links := parseLinks(hdr["Link"])
+ // Location: header is not returned so this needs to be populated off of
+ // existing URI
+ reg.URI = c.user.GetRegistration().URI
+ if links["terms-of-service"] != "" {
+ reg.TosURL = links["terms-of-service"]
+ }
+
+ if links["next"] != "" {
+ reg.NewAuthzURL = links["next"]
+ } else {
+ return nil, errors.New("acme: No new-authz link in response to registration query")
+ }
+
+ return reg, nil
+}
+
+// AgreeToTOS updates the Client registration and sends the agreement to
+// the server.
+func (c *Client) AgreeToTOS() error {
+ reg := c.user.GetRegistration()
+
+ reg.Body.Agreement = c.user.GetRegistration().TosURL
+ reg.Body.Resource = "reg"
+ _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil)
+ return err
+}
+
+// ObtainCertificateForCSR tries to obtain a certificate matching the CSR passed into it.
+// The domains are inferred from the CommonName and SubjectAltNames, if any. The private key
+// for this CSR is not required.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// This function will never return a partial certificate. If one domain in the list fails,
+// the whole certificate will fail.
+func (c *Client) ObtainCertificateForCSR(csr x509.CertificateRequest, bundle bool) (CertificateResource, map[string]error) {
+ // figure out what domains it concerns
+ // start with the common name
+ domains := []string{csr.Subject.CommonName}
+
+ // loop over the SubjectAltName DNS names
+DNSNames:
+ for _, sanName := range csr.DNSNames {
+ for _, existingName := range domains {
+ if existingName == sanName {
+ // duplicate; skip this name
+ continue DNSNames
+ }
+ }
+
+ // name is unique
+ domains = append(domains, sanName)
+ }
+
+ if bundle {
+ logf("[INFO][%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", "))
+ } else {
+ logf("[INFO][%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", "))
+ }
+
+ challenges, failures := c.getChallenges(domains)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(failures) > 0 {
+ return CertificateResource{}, failures
+ }
+
+ errs := c.solveChallenges(challenges)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(errs) > 0 {
+ return CertificateResource{}, errs
+ }
+
+ logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
+
+ cert, err := c.requestCertificateForCsr(challenges, bundle, csr.Raw, nil)
+ if err != nil {
+ for _, chln := range challenges {
+ failures[chln.Domain] = err
+ }
+ }
+
+ // Add the CSR to the certificate so that it can be used for renewals.
+ cert.CSR = pemEncode(&csr)
+
+ return cert, failures
+}
+
+// ObtainCertificate tries to obtain a single certificate using all domains passed into it.
+// The first domain in domains is used for the CommonName field of the certificate, all other
+// domains are added using the Subject Alternate Names extension. A new private key is generated
+// for every invocation of this function. If you do not want that you can supply your own private key
+// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// This function will never return a partial certificate. If one domain in the list fails,
+// the whole certificate will fail.
+func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) {
+ if bundle {
+ logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", "))
+ } else {
+ logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", "))
+ }
+
+ challenges, failures := c.getChallenges(domains)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(failures) > 0 {
+ return CertificateResource{}, failures
+ }
+
+ errs := c.solveChallenges(challenges)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(errs) > 0 {
+ return CertificateResource{}, errs
+ }
+
+ logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
+
+ cert, err := c.requestCertificate(challenges, bundle, privKey)
+ if err != nil {
+ for _, chln := range challenges {
+ failures[chln.Domain] = err
+ }
+ }
+
+ return cert, failures
+}
+
+// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA.
+func (c *Client) RevokeCertificate(certificate []byte) error {
+ certificates, err := parsePEMBundle(certificate)
+ if err != nil {
+ return err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return fmt.Errorf("Certificate bundle starts with a CA certificate")
+ }
+
+ encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw)
+
+ _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil)
+ return err
+}
+
+// RenewCertificate takes a CertificateResource and tries to renew the certificate.
+// If the renewal process succeeds, the new certificate will ge returned in a new CertResource.
+// Please be aware that this function will return a new certificate in ANY case that is not an error.
+// If the server does not provide us with a new cert on a GET request to the CertURL
+// this function will start a new-cert flow where a new certificate gets generated.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil.
+func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) {
+ // Input certificate is PEM encoded. Decode it here as we may need the decoded
+ // cert later on in the renewal process. The input may be a bundle or a single certificate.
+ certificates, err := parsePEMBundle(cert.Certificate)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain)
+ }
+
+ // This is just meant to be informal for the user.
+ timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC())
+ logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours()))
+
+ // The first step of renewal is to check if we get a renewed cert
+ // directly from the cert URL.
+ resp, err := httpGet(cert.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ defer resp.Body.Close()
+ serverCertBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ serverCert, err := x509.ParseCertificate(serverCertBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // If the server responds with a different certificate we are effectively renewed.
+ // TODO: Further test if we can actually use the new certificate (Our private key works)
+ if !x509Cert.Equal(serverCert) {
+ logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain)
+ issuedCert := pemEncode(derCertificateBytes(serverCertBytes))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cert.Certificate = issuedCert
+ return cert, nil
+ }
+
+ // If the certificate is the same, then we need to request a new certificate.
+ // Start by checking to see if the certificate was based off a CSR, and
+ // use that if it's defined.
+ if len(cert.CSR) > 0 {
+ csr, err := pemDecodeTox509CSR(cert.CSR)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ newCert, failures := c.ObtainCertificateForCSR(*csr, bundle)
+ return newCert, failures[cert.Domain]
+ }
+
+ var privKey crypto.PrivateKey
+ if cert.PrivateKey != nil {
+ privKey, err = parsePEMPrivateKey(cert.PrivateKey)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ var domains []string
+ var failures map[string]error
+ // check for SAN certificate
+ if len(x509Cert.DNSNames) > 1 {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ for _, sanDomain := range x509Cert.DNSNames {
+ if sanDomain == x509Cert.Subject.CommonName {
+ continue
+ }
+ domains = append(domains, sanDomain)
+ }
+ } else {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ }
+
+ newCert, failures := c.ObtainCertificate(domains, bundle, privKey)
+ return newCert, failures[cert.Domain]
+}
+
+// Looks through the challenge combinations to find a solvable match.
+// Then solves the challenges in series and returns.
+func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error {
+ // loop through the resources, basically through the domains.
+ failures := make(map[string]error)
+ for _, authz := range challenges {
+ if authz.Body.Status == "valid" {
+ // Boulder might recycle recent validated authz (see issue #267)
+ logf("[INFO][%s] acme: Authorization already valid; skipping challenge", authz.Domain)
+ continue
+ }
+ // no solvers - no solving
+ if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil {
+ for i, solver := range solvers {
+ // TODO: do not immediately fail if one domain fails to validate.
+ err := solver.Solve(authz.Body.Challenges[i], authz.Domain)
+ if err != nil {
+ failures[authz.Domain] = err
+ }
+ }
+ } else {
+ failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain)
+ }
+ }
+
+ return failures
+}
+
+// Checks all combinations from the server and returns an array of
+// solvers which should get executed in series.
+func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver {
+ for _, combination := range auth.Combinations {
+ solvers := make(map[int]solver)
+ for _, idx := range combination {
+ if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok {
+ solvers[idx] = solver
+ } else {
+ logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type)
+ }
+ }
+
+ // If we can solve the whole combination, return the solvers
+ if len(solvers) == len(combination) {
+ return solvers
+ }
+ }
+ return nil
+}
+
+// Get the challenges needed to proof our identifier to the ACME server.
+func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) {
+ resc, errc := make(chan authorizationResource), make(chan domainError)
+
+ for _, domain := range domains {
+ go func(domain string) {
+ authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}}
+ var authz authorization
+ hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz)
+ if err != nil {
+ errc <- domainError{Domain: domain, Error: err}
+ return
+ }
+
+ links := parseLinks(hdr["Link"])
+ if links["next"] == "" {
+ logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain)
+ return
+ }
+
+ resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain}
+ }(domain)
+ }
+
+ responses := make(map[string]authorizationResource)
+ failures := make(map[string]error)
+ for i := 0; i < len(domains); i++ {
+ select {
+ case res := <-resc:
+ responses[res.Domain] = res
+ case err := <-errc:
+ failures[err.Domain] = err.Error
+ }
+ }
+
+ challenges := make([]authorizationResource, 0, len(responses))
+ for _, domain := range domains {
+ if challenge, ok := responses[domain]; ok {
+ challenges = append(challenges, challenge)
+ }
+ }
+
+ close(resc)
+ close(errc)
+
+ return challenges, failures
+}
+
+func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) {
+ if len(authz) == 0 {
+ return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!")
+ }
+
+ var err error
+ if privKey == nil {
+ privKey, err = generatePrivateKey(c.keyType)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ // determine certificate name(s) based on the authorization resources
+ commonName := authz[0]
+ var san []string
+ for _, auth := range authz[1:] {
+ san = append(san, auth.Domain)
+ }
+
+ // TODO: should the CSR be customizable?
+ csr, err := generateCsr(privKey, commonName.Domain, san)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ return c.requestCertificateForCsr(authz, bundle, csr, pemEncode(privKey))
+}
+
+func (c *Client) requestCertificateForCsr(authz []authorizationResource, bundle bool, csr []byte, privateKeyPem []byte) (CertificateResource, error) {
+ commonName := authz[0]
+
+ var authURLs []string
+ for _, auth := range authz[1:] {
+ authURLs = append(authURLs, auth.AuthURL)
+ }
+
+ csrString := base64.URLEncoding.EncodeToString(csr)
+ jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs})
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ resp, err := c.jws.post(commonName.NewCertURL, jsonBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ cerRes := CertificateResource{
+ Domain: commonName.Domain,
+ CertURL: resp.Header.Get("Location"),
+ PrivateKey: privateKeyPem}
+
+ for {
+ switch resp.StatusCode {
+ case 201, 202:
+ cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ resp.Body.Close()
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // The server returns a body with a length of zero if the
+ // certificate was not ready at the time this request completed.
+ // Otherwise the body is the certificate.
+ if len(cert) > 0 {
+
+ cerRes.CertStableURL = resp.Header.Get("Content-Location")
+ cerRes.AccountRef = c.user.GetRegistration().URI
+
+ issuedCert := pemEncode(derCertificateBytes(cert))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cerRes.Certificate = issuedCert
+ logf("[INFO][%s] Server responded with a certificate.", commonName.Domain)
+ return cerRes, nil
+ }
+
+ // The certificate was granted but is not yet issued.
+ // Check retry-after and loop.
+ ra := resp.Header.Get("Retry-After")
+ retryAfter, err := strconv.Atoi(ra)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter)
+ time.Sleep(time.Duration(retryAfter) * time.Second)
+
+ break
+ default:
+ return CertificateResource{}, handleHTTPError(resp)
+ }
+
+ resp, err = httpGet(cerRes.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+}
+
+// getIssuerCertificate requests the issuer certificate and caches it for
+// subsequent requests.
+func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
+ logf("[INFO] acme: Requesting issuer cert from %s", url)
+ if c.issuerCert != nil {
+ return c.issuerCert, nil
+ }
+
+ resp, err := httpGet(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ c.issuerCert = issuerBytes
+ return issuerBytes, err
+}
+
+func parseLinks(links []string) map[string]string {
+ aBrkt := regexp.MustCompile("[<>]")
+ slver := regexp.MustCompile("(.+) *= *\"(.+)\"")
+ linkMap := make(map[string]string)
+
+ for _, link := range links {
+
+ link = aBrkt.ReplaceAllString(link, "")
+ parts := strings.Split(link, ";")
+
+ matches := slver.FindStringSubmatch(parts[1])
+ if len(matches) > 0 {
+ linkMap[matches[2]] = parts[0]
+ }
+ }
+
+ return linkMap
+}
+
+// validate makes the ACME server start validating a
+// challenge response, only returning once it is done.
+func validate(j *jws, domain, uri string, chlng challenge) error {
+ var challengeResponse challenge
+
+ hdr, err := postJSON(j, uri, chlng, &challengeResponse)
+ if err != nil {
+ return err
+ }
+
+ // After the path is sent, the ACME server will access our server.
+ // Repeatedly check the server for an updated status on our request.
+ for {
+ switch challengeResponse.Status {
+ case "valid":
+ logf("[INFO][%s] The server validated our request", domain)
+ return nil
+ case "pending":
+ break
+ case "invalid":
+ return handleChallengeError(challengeResponse)
+ default:
+ return errors.New("The server returned an unexpected state.")
+ }
+
+ ra, err := strconv.Atoi(hdr.Get("Retry-After"))
+ if err != nil {
+ // The ACME server MUST return a Retry-After.
+ // If it doesn't, we'll just poll hard.
+ ra = 1
+ }
+ time.Sleep(time.Duration(ra) * time.Second)
+
+ hdr, err = getJSON(uri, &challengeResponse)
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/xenolf/lego/acme/client_test.go
new file mode 100644
index 000000000..e309554f3
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/client_test.go
@@ -0,0 +1,198 @@
+package acme
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestNewClient(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ keyType := RSA2048
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ client, err := NewClient(ts.URL, user, keyType)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+
+ if client.jws == nil {
+ t.Fatalf("Expected client.jws to not be nil")
+ }
+ if expected, actual := key, client.jws.privKey; actual != expected {
+ t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual)
+ }
+
+ if client.keyType != keyType {
+ t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType)
+ }
+
+ if expected, actual := 2, len(client.solvers); actual != expected {
+ t.Fatalf("Expected %d solver(s), got %d", expected, actual)
+ }
+}
+
+func TestClientOptPort(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ optPort := "1234"
+ optHost := ""
+ client, err := NewClient(ts.URL, user, RSA2048)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ httpSolver, ok := client.solvers[HTTP01].(*httpChallenge)
+ if !ok {
+ t.Fatal("Expected http-01 solver to be httpChallenge type")
+ }
+ if httpSolver.jws != client.jws {
+ t.Error("Expected http-01 to have same jws as client")
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort {
+ t.Errorf("Expected http-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+
+ httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge)
+ if !ok {
+ t.Fatal("Expected tls-sni-01 solver to be httpChallenge type")
+ }
+ if httpsSolver.jws != client.jws {
+ t.Error("Expected tls-sni-01 to have same jws as client")
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got)
+ }
+
+ // test setting different host
+ optHost = "127.0.0.1"
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+}
+
+func TestValidate(t *testing.T) {
+ var statuses []string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Minimal stub ACME server for validation.
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Header().Add("Retry-After", "0")
+ switch r.Method {
+ case "HEAD":
+ case "POST":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ case "GET":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ default:
+ http.Error(w, r.Method, http.StatusMethodNotAllowed)
+ }
+ }))
+ defer ts.Close()
+
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey, directoryURL: ts.URL}
+
+ tsts := []struct {
+ name string
+ statuses []string
+ want string
+ }{
+ {"POST-unexpected", []string{"weird"}, "unexpected"},
+ {"POST-valid", []string{"valid"}, ""},
+ {"POST-invalid", []string{"invalid"}, "Error Detail"},
+ {"GET-unexpected", []string{"pending", "weird"}, "unexpected"},
+ {"GET-valid", []string{"pending", "valid"}, ""},
+ {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"},
+ }
+
+ for _, tst := range tsts {
+ statuses = tst.statuses
+ if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ } else if err != nil && !strings.Contains(err.Error(), tst.want) {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ }
+ }
+}
+
+// writeJSONResponse marshals the body as JSON and writes it to the response.
+func writeJSONResponse(w http.ResponseWriter, body interface{}) {
+ bs, err := json.Marshal(body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if _, err := w.Write(bs); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// stubValidate is like validate, except it does nothing.
+func stubValidate(j *jws, domain, uri string, chlng challenge) error {
+ return nil
+}
+
+type mockUser struct {
+ email string
+ regres *RegistrationResource
+ privatekey *rsa.PrivateKey
+}
+
+func (u mockUser) GetEmail() string { return u.email }
+func (u mockUser) GetRegistration() *RegistrationResource { return u.regres }
+func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey }
diff --git a/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go
new file mode 100644
index 000000000..af97f5d1e
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/crypto.go
@@ -0,0 +1,332 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "net/http"
+ "strings"
+ "time"
+
+ "golang.org/x/crypto/ocsp"
+)
+
+// KeyType represents the key algo as well as the key size or curve to use.
+type KeyType string
+type derCertificateBytes []byte
+
+// Constants for all key types we support.
+const (
+ EC256 = KeyType("P256")
+ EC384 = KeyType("P384")
+ RSA2048 = KeyType("2048")
+ RSA4096 = KeyType("4096")
+ RSA8192 = KeyType("8192")
+)
+
+const (
+ // OCSPGood means that the certificate is valid.
+ OCSPGood = ocsp.Good
+ // OCSPRevoked means that the certificate has been deliberately revoked.
+ OCSPRevoked = ocsp.Revoked
+ // OCSPUnknown means that the OCSP responder doesn't know about the certificate.
+ OCSPUnknown = ocsp.Unknown
+ // OCSPServerFailed means that the OCSP responder failed to process the request.
+ OCSPServerFailed = ocsp.ServerFailed
+)
+
+// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
+// the parsed response, and an error, if any. The returned []byte can be passed directly
+// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
+// issued certificate, this function will try to get the issuer certificate from the
+// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
+// values are nil, the OCSP status may be assumed OCSPUnknown.
+func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) {
+ certificates, err := parsePEMBundle(bundle)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // We expect the certificate slice to be ordered downwards the chain.
+ // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
+ // which should always be the first two certificates. If there's no
+ // OCSP server listed in the leaf cert, there's nothing to do. And if
+ // we have only one certificate so far, we need to get the issuer cert.
+ issuedCert := certificates[0]
+ if len(issuedCert.OCSPServer) == 0 {
+ return nil, nil, errors.New("no OCSP server specified in cert")
+ }
+ if len(certificates) == 1 {
+ // TODO: build fallback. If this fails, check the remaining array entries.
+ if len(issuedCert.IssuingCertificateURL) == 0 {
+ return nil, nil, errors.New("no issuing certificate URL")
+ }
+
+ resp, err := httpGet(issuedCert.IssuingCertificateURL[0])
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ issuerCert, err := x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Insert it into the slice on position 0
+ // We want it ordered right SRV CRT -> CA
+ certificates = append(certificates, issuerCert)
+ }
+ issuerCert := certificates[1]
+
+ // Finally kick off the OCSP request.
+ ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ reader := bytes.NewReader(ocspReq)
+ req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer req.Body.Close()
+
+ ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024))
+ ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return ocspResBytes, ocspRes, nil
+}
+
+func getKeyAuthorization(token string, key interface{}) (string, error) {
+ var publicKey crypto.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ publicKey = k.Public()
+ case *rsa.PrivateKey:
+ publicKey = k.Public()
+ }
+
+ // Generate the Key Authorization for the challenge
+ jwk := keyAsJWK(publicKey)
+ if jwk == nil {
+ return "", errors.New("Could not generate JWK from key.")
+ }
+ thumbBytes, err := jwk.Thumbprint(crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+
+ // unpad the base64URL
+ keyThumb := base64.URLEncoding.EncodeToString(thumbBytes)
+ index := strings.Index(keyThumb, "=")
+ if index != -1 {
+ keyThumb = keyThumb[:index]
+ }
+
+ return token + "." + keyThumb, nil
+}
+
+// parsePEMBundle parses a certificate bundle from top to bottom and returns
+// a slice of x509 certificates. This function will error if no certificates are found.
+func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) {
+ var certificates []*x509.Certificate
+ var certDERBlock *pem.Block
+
+ for {
+ certDERBlock, bundle = pem.Decode(bundle)
+ if certDERBlock == nil {
+ break
+ }
+
+ if certDERBlock.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(certDERBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ }
+ }
+
+ if len(certificates) == 0 {
+ return nil, errors.New("No certificates were found while parsing the bundle.")
+ }
+
+ return certificates, nil
+}
+
+func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) {
+ keyBlock, _ := pem.Decode(key)
+
+ switch keyBlock.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(keyBlock.Bytes)
+ default:
+ return nil, errors.New("Unknown PEM header value")
+ }
+}
+
+func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) {
+
+ switch keyType {
+ case EC256:
+ return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ case EC384:
+ return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+ case RSA2048:
+ return rsa.GenerateKey(rand.Reader, 2048)
+ case RSA4096:
+ return rsa.GenerateKey(rand.Reader, 4096)
+ case RSA8192:
+ return rsa.GenerateKey(rand.Reader, 8192)
+ }
+
+ return nil, fmt.Errorf("Invalid KeyType: %s", keyType)
+}
+
+func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) {
+ template := x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: domain,
+ },
+ }
+
+ if len(san) > 0 {
+ template.DNSNames = san
+ }
+
+ return x509.CreateCertificateRequest(rand.Reader, &template, privateKey)
+}
+
+func pemEncode(data interface{}) []byte {
+ var pemBlock *pem.Block
+ switch key := data.(type) {
+ case *ecdsa.PrivateKey:
+ keyBytes, _ := x509.MarshalECPrivateKey(key)
+ pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}
+ case *rsa.PrivateKey:
+ pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}
+ break
+ case *x509.CertificateRequest:
+ pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw}
+ break
+ case derCertificateBytes:
+ pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))}
+ }
+
+ return pem.EncodeToMemory(pemBlock)
+}
+
+func pemDecode(data []byte) (*pem.Block, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?")
+ }
+
+ return pemBlock, nil
+}
+
+func pemDecodeTox509(pem []byte) (*x509.Certificate, error) {
+ pemBlock, err := pemDecode(pem)
+ if pemBlock == nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificate(pemBlock.Bytes)
+}
+
+func pemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) {
+ pemBlock, err := pemDecode(pem)
+ if pemBlock == nil {
+ return nil, err
+ }
+
+ if pemBlock.Type != "CERTIFICATE REQUEST" {
+ return nil, fmt.Errorf("PEM block is not a certificate request")
+ }
+
+ return x509.ParseCertificateRequest(pemBlock.Bytes)
+}
+
+// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate.
+// The certificate has to be PEM encoded. Any other encodings like DER will fail.
+func GetPEMCertExpiration(cert []byte) (time.Time, error) {
+ pemBlock, err := pemDecode(cert)
+ if pemBlock == nil {
+ return time.Time{}, err
+ }
+
+ return getCertExpiration(pemBlock.Bytes)
+}
+
+// getCertExpiration returns the "NotAfter" date of a DER encoded certificate.
+func getCertExpiration(cert []byte) (time.Time, error) {
+ pCert, err := x509.ParseCertificate(cert)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return pCert.NotAfter, nil
+}
+
+func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) {
+ derBytes, err := generateDerCert(privKey, time.Time{}, domain)
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil
+}
+
+func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) {
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return nil, err
+ }
+
+ if expiration.IsZero() {
+ expiration = time.Now().Add(365)
+ }
+
+ template := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ CommonName: "ACME Challenge TEMP",
+ },
+ NotBefore: time.Now(),
+ NotAfter: expiration,
+
+ KeyUsage: x509.KeyUsageKeyEncipherment,
+ BasicConstraintsValid: true,
+ DNSNames: []string{domain},
+ }
+
+ return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
+}
+
+func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser {
+ return http.MaxBytesReader(nil, rd, numBytes)
+}
diff --git a/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/xenolf/lego/acme/crypto_test.go
new file mode 100644
index 000000000..d2fc5088b
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/crypto_test.go
@@ -0,0 +1,93 @@
+package acme
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+ "time"
+)
+
+func TestGeneratePrivateKey(t *testing.T) {
+ key, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Error("Error generating private key:", err)
+ }
+ if key == nil {
+ t.Error("Expected key to not be nil, but it was")
+ }
+}
+
+func TestGenerateCSR(t *testing.T) {
+ key, err := rsa.GenerateKey(rand.Reader, 512)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ csr, err := generateCsr(key, "fizz.buzz", nil)
+ if err != nil {
+ t.Error("Error generating CSR:", err)
+ }
+ if csr == nil || len(csr) == 0 {
+ t.Error("Expected CSR with data, but it was nil or length 0")
+ }
+}
+
+func TestPEMEncode(t *testing.T) {
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ reader := MockRandReader{b: buf}
+ key, err := rsa.GenerateKey(reader, 32)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ data := pemEncode(key)
+
+ if data == nil {
+ t.Fatal("Expected result to not be nil, but it was")
+ }
+ if len(data) != 127 {
+ t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data))
+ }
+}
+
+func TestPEMCertExpiration(t *testing.T) {
+ privKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ expiration := time.Now().Add(365)
+ expiration = expiration.Round(time.Second)
+ certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com")
+ if err != nil {
+ t.Fatal("Error generating cert:", err)
+ }
+
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ // Some random string should return an error.
+ if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime)
+ }
+
+ // A DER encoded certificate should return an error.
+ if _, err := GetPEMCertExpiration(certBytes); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.")
+ }
+
+ // A PEM encoded certificate should work ok.
+ pemCert := pemEncode(derCertificateBytes(certBytes))
+ if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) {
+ t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err)
+ }
+}
+
+type MockRandReader struct {
+ b *bytes.Buffer
+}
+
+func (r MockRandReader) Read(p []byte) (int, error) {
+ return r.b.Read(p)
+}
diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go
new file mode 100644
index 000000000..c5fd354a1
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/dns_challenge.go
@@ -0,0 +1,282 @@
+package acme
+
+import (
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/miekg/dns"
+ "golang.org/x/net/publicsuffix"
+)
+
+type preCheckDNSFunc func(fqdn, value string) (bool, error)
+
+var (
+ // PreCheckDNS checks DNS propagation before notifying ACME that
+ // the DNS challenge is ready.
+ PreCheckDNS preCheckDNSFunc = checkDNSPropagation
+ fqdnToZone = map[string]string{}
+)
+
+var RecursiveNameservers = []string{
+ "google-public-dns-a.google.com:53",
+ "google-public-dns-b.google.com:53",
+}
+
+// DNSTimeout is used to override the default DNS timeout of 10 seconds.
+var DNSTimeout = 10 * time.Second
+
+// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge
+func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) {
+ keyAuthShaBytes := sha256.Sum256([]byte(keyAuth))
+ // base64URL encoding without padding
+ keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size])
+ value = strings.TrimRight(keyAuthSha, "=")
+ ttl = 120
+ fqdn = fmt.Sprintf("_acme-challenge.%s.", domain)
+ return
+}
+
+// dnsChallenge implements the dns-01 challenge according to ACME 7.5
+type dnsChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+func (s *dnsChallenge) Solve(chlng challenge, domain string) error {
+ logf("[INFO][%s] acme: Trying to solve DNS-01", domain)
+
+ if s.provider == nil {
+ return errors.New("No DNS Provider configured")
+ }
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = s.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("Error presenting token: %s", err)
+ }
+ defer func() {
+ err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("Error cleaning up %s: %v ", domain, err)
+ }
+ }()
+
+ fqdn, value, _ := DNS01Record(domain, keyAuth)
+
+ logf("[INFO][%s] Checking DNS record propagation...", domain)
+
+ var timeout, interval time.Duration
+ switch provider := s.provider.(type) {
+ case ChallengeProviderTimeout:
+ timeout, interval = provider.Timeout()
+ default:
+ timeout, interval = 60*time.Second, 2*time.Second
+ }
+
+ err = WaitFor(timeout, interval, func() (bool, error) {
+ return PreCheckDNS(fqdn, value)
+ })
+ if err != nil {
+ return err
+ }
+
+ return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
+
+// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
+func checkDNSPropagation(fqdn, value string) (bool, error) {
+ // Initial attempt to resolve at the recursive NS
+ r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true)
+ if err != nil {
+ return false, err
+ }
+ if r.Rcode == dns.RcodeSuccess {
+ // If we see a CNAME here then use the alias
+ for _, rr := range r.Answer {
+ if cn, ok := rr.(*dns.CNAME); ok {
+ if cn.Hdr.Name == fqdn {
+ fqdn = cn.Target
+ break
+ }
+ }
+ }
+ }
+
+ authoritativeNss, err := lookupNameservers(fqdn)
+ if err != nil {
+ return false, err
+ }
+
+ return checkAuthoritativeNss(fqdn, value, authoritativeNss)
+}
+
+// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
+func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
+ for _, ns := range nameservers {
+ r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
+ if err != nil {
+ return false, err
+ }
+
+ if r.Rcode != dns.RcodeSuccess {
+ return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
+ }
+
+ var found bool
+ for _, rr := range r.Answer {
+ if txt, ok := rr.(*dns.TXT); ok {
+ if strings.Join(txt.Txt, "") == value {
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return false, fmt.Errorf("NS %s did not return the expected TXT record", ns)
+ }
+ }
+
+ return true, nil
+}
+
+// dnsQuery will query a nameserver, iterating through the supplied servers as it retries
+// The nameserver should include a port, to facilitate testing where we talk to a mock dns server.
+func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) {
+ m := new(dns.Msg)
+ m.SetQuestion(fqdn, rtype)
+ m.SetEdns0(4096, false)
+
+ if !recursive {
+ m.RecursionDesired = false
+ }
+
+ // Will retry the request based on the number of servers (n+1)
+ for i := 1; i <= len(nameservers)+1; i++ {
+ ns := nameservers[i%len(nameservers)]
+ udp := &dns.Client{Net: "udp", Timeout: DNSTimeout}
+ in, _, err = udp.Exchange(m, ns)
+
+ if err == dns.ErrTruncated {
+ tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout}
+ // If the TCP request suceeds, the err will reset to nil
+ in, _, err = tcp.Exchange(m, ns)
+ }
+
+ if err == nil {
+ break
+ }
+ }
+ return
+}
+
+// lookupNameservers returns the authoritative nameservers for the given fqdn.
+func lookupNameservers(fqdn string) ([]string, error) {
+ var authoritativeNss []string
+
+ zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
+ if err != nil {
+ return nil, fmt.Errorf("Could not determine the zone: %v", err)
+ }
+
+ r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rr := range r.Answer {
+ if ns, ok := rr.(*dns.NS); ok {
+ authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
+ }
+ }
+
+ if len(authoritativeNss) > 0 {
+ return authoritativeNss, nil
+ }
+ return nil, fmt.Errorf("Could not determine authoritative nameservers")
+}
+
+// FindZoneByFqdn determines the zone apex for the given fqdn by recursing up the
+// domain labels until the nameserver returns a SOA record in the answer section.
+func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) {
+ // Do we have it cached?
+ if zone, ok := fqdnToZone[fqdn]; ok {
+ return zone, nil
+ }
+
+ labelIndexes := dns.Split(fqdn)
+ for _, index := range labelIndexes {
+ domain := fqdn[index:]
+ // Give up if we have reached the TLD
+ if isTLD(domain) {
+ break
+ }
+
+ in, err := dnsQuery(domain, dns.TypeSOA, nameservers, true)
+ if err != nil {
+ return "", err
+ }
+
+ // Any response code other than NOERROR and NXDOMAIN is treated as error
+ if in.Rcode != dns.RcodeNameError && in.Rcode != dns.RcodeSuccess {
+ return "", fmt.Errorf("Unexpected response code '%s' for %s",
+ dns.RcodeToString[in.Rcode], domain)
+ }
+
+ // Check if we got a SOA RR in the answer section
+ if in.Rcode == dns.RcodeSuccess {
+ for _, ans := range in.Answer {
+ if soa, ok := ans.(*dns.SOA); ok {
+ zone := soa.Hdr.Name
+ fqdnToZone[fqdn] = zone
+ return zone, nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("Could not find the start of authority")
+}
+
+func isTLD(domain string) bool {
+ publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(domain))
+ if publicsuffix == UnFqdn(domain) {
+ return true
+ }
+ return false
+}
+
+// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing.
+func ClearFqdnCache() {
+ fqdnToZone = map[string]string{}
+}
+
+// ToFqdn converts the name into a fqdn appending a trailing dot.
+func ToFqdn(name string) string {
+ n := len(name)
+ if n == 0 || name[n-1] == '.' {
+ return name
+ }
+ return name + "."
+}
+
+// UnFqdn converts the fqdn into a name removing the trailing dot.
+func UnFqdn(name string) string {
+ n := len(name)
+ if n != 0 && name[n-1] == '.' {
+ return name[:n-1]
+ }
+ return name
+}
diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go
new file mode 100644
index 000000000..240384e60
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go
@@ -0,0 +1,53 @@
+package acme
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+const (
+ dnsTemplate = "%s %d IN TXT \"%s\""
+)
+
+// DNSProviderManual is an implementation of the ChallengeProvider interface
+type DNSProviderManual struct{}
+
+// NewDNSProviderManual returns a DNSProviderManual instance.
+func NewDNSProviderManual() (*DNSProviderManual, error) {
+ return &DNSProviderManual{}, nil
+}
+
+// Present prints instructions for manually creating the TXT record
+func (*DNSProviderManual) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := DNS01Record(domain, keyAuth)
+ dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value)
+
+ authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone)
+ logf("[INFO] acme: %s", dnsRecord)
+ logf("[INFO] acme: Press 'Enter' when you are done")
+
+ reader := bufio.NewReader(os.Stdin)
+ _, _ = reader.ReadString('\n')
+ return nil
+}
+
+// CleanUp prints instructions for manually removing the TXT record
+func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, ttl := DNS01Record(domain, keyAuth)
+ dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...")
+
+ authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone)
+ logf("[INFO] acme: %s", dnsRecord)
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go
new file mode 100644
index 000000000..6e448854b
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/dns_challenge_test.go
@@ -0,0 +1,185 @@
+package acme
+
+import (
+ "bufio"
+ "crypto/rand"
+ "crypto/rsa"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+)
+
+var lookupNameserversTestsOK = []struct {
+ fqdn string
+ nss []string
+}{
+ {"books.google.com.ng.",
+ []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."},
+ },
+ {"www.google.com.",
+ []string{"ns1.google.com.", "ns2.google.com.", "ns3.google.com.", "ns4.google.com."},
+ },
+ {"physics.georgetown.edu.",
+ []string{"ns1.georgetown.edu.", "ns2.georgetown.edu.", "ns3.georgetown.edu."},
+ },
+}
+
+var lookupNameserversTestsErr = []struct {
+ fqdn string
+ error string
+}{
+ // invalid tld
+ {"_null.n0n0.",
+ "Could not determine the zone",
+ },
+ // invalid domain
+ {"_null.com.",
+ "Could not determine the zone",
+ },
+ // invalid domain
+ {"in-valid.co.uk.",
+ "Could not determine the zone",
+ },
+}
+
+var findZoneByFqdnTests = []struct {
+ fqdn string
+ zone string
+}{
+ {"mail.google.com.", "google.com."}, // domain is a CNAME
+ {"foo.google.com.", "google.com."}, // domain is a non-existent subdomain
+}
+
+var checkAuthoritativeNssTests = []struct {
+ fqdn, value string
+ ns []string
+ ok bool
+}{
+ // TXT RR w/ expected value
+ {"8.8.8.8.asn.routeviews.org.", "151698.8.8.024", []string{"asnums.routeviews.org."},
+ true,
+ },
+ // No TXT RR
+ {"ns1.google.com.", "", []string{"ns2.google.com."},
+ false,
+ },
+}
+
+var checkAuthoritativeNssTestsErr = []struct {
+ fqdn, value string
+ ns []string
+ error string
+}{
+ // TXT RR /w unexpected value
+ {"8.8.8.8.asn.routeviews.org.", "fe01=", []string{"asnums.routeviews.org."},
+ "did not return the expected TXT record",
+ },
+ // No TXT RR
+ {"ns1.google.com.", "fe01=", []string{"ns2.google.com."},
+ "did not return the expected TXT record",
+ },
+}
+
+func TestDNSValidServerResponse(t *testing.T) {
+ PreCheckDNS = func(fqdn, value string) (bool, error) {
+ return true, nil
+ }
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Write([]byte("{\"type\":\"dns01\",\"status\":\"valid\",\"uri\":\"http://some.url\",\"token\":\"http8\"}"))
+ }))
+
+ manualProvider, _ := NewDNSProviderManual()
+ jws := &jws{privKey: privKey, directoryURL: ts.URL}
+ solver := &dnsChallenge{jws: jws, validate: validate, provider: manualProvider}
+ clientChallenge := challenge{Type: "dns01", Status: "pending", URI: ts.URL, Token: "http8"}
+
+ go func() {
+ time.Sleep(time.Second * 2)
+ f := bufio.NewWriter(os.Stdout)
+ defer f.Flush()
+ f.WriteString("\n")
+ }()
+
+ if err := solver.Solve(clientChallenge, "example.com"); err != nil {
+ t.Errorf("VALID: Expected Solve to return no error but the error was -> %v", err)
+ }
+}
+
+func TestPreCheckDNS(t *testing.T) {
+ ok, err := PreCheckDNS("acme-staging.api.letsencrypt.org", "fe01=")
+ if err != nil || !ok {
+ t.Errorf("preCheckDNS failed for acme-staging.api.letsencrypt.org")
+ }
+}
+
+func TestLookupNameserversOK(t *testing.T) {
+ for _, tt := range lookupNameserversTestsOK {
+ nss, err := lookupNameservers(tt.fqdn)
+ if err != nil {
+ t.Fatalf("#%s: got %q; want nil", tt.fqdn, err)
+ }
+
+ sort.Strings(nss)
+ sort.Strings(tt.nss)
+
+ if !reflect.DeepEqual(nss, tt.nss) {
+ t.Errorf("#%s: got %v; want %v", tt.fqdn, nss, tt.nss)
+ }
+ }
+}
+
+func TestLookupNameserversErr(t *testing.T) {
+ for _, tt := range lookupNameserversTestsErr {
+ _, err := lookupNameservers(tt.fqdn)
+ if err == nil {
+ t.Fatalf("#%s: expected %q (error); got <nil>", tt.fqdn, tt.error)
+ }
+
+ if !strings.Contains(err.Error(), tt.error) {
+ t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err)
+ continue
+ }
+ }
+}
+
+func TestFindZoneByFqdn(t *testing.T) {
+ for _, tt := range findZoneByFqdnTests {
+ res, err := FindZoneByFqdn(tt.fqdn, RecursiveNameservers)
+ if err != nil {
+ t.Errorf("FindZoneByFqdn failed for %s: %v", tt.fqdn, err)
+ }
+ if res != tt.zone {
+ t.Errorf("%s: got %s; want %s", tt.fqdn, res, tt.zone)
+ }
+ }
+}
+
+func TestCheckAuthoritativeNss(t *testing.T) {
+ for _, tt := range checkAuthoritativeNssTests {
+ ok, _ := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns)
+ if ok != tt.ok {
+ t.Errorf("%s: got %t; want %t", tt.fqdn, ok, tt.ok)
+ }
+ }
+}
+
+func TestCheckAuthoritativeNssErr(t *testing.T) {
+ for _, tt := range checkAuthoritativeNssTestsErr {
+ _, err := checkAuthoritativeNss(tt.fqdn, tt.value, tt.ns)
+ if err == nil {
+ t.Fatalf("#%s: expected %q (error); got <nil>", tt.fqdn, tt.error)
+ }
+ if !strings.Contains(err.Error(), tt.error) {
+ t.Errorf("#%s: expected %q (error); got %q", tt.fqdn, tt.error, err)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go
new file mode 100644
index 000000000..2aa690b33
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/error.go
@@ -0,0 +1,86 @@
+package acme
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+const (
+ tosAgreementError = "Must agree to subscriber agreement before any further actions"
+)
+
+// RemoteError is the base type for all errors specific to the ACME protocol.
+type RemoteError struct {
+ StatusCode int `json:"status,omitempty"`
+ Type string `json:"type"`
+ Detail string `json:"detail"`
+}
+
+func (e RemoteError) Error() string {
+ return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail)
+}
+
+// TOSError represents the error which is returned if the user needs to
+// accept the TOS.
+// TODO: include the new TOS url if we can somehow obtain it.
+type TOSError struct {
+ RemoteError
+}
+
+type domainError struct {
+ Domain string
+ Error error
+}
+
+type challengeError struct {
+ RemoteError
+ records []validationRecord
+}
+
+func (c challengeError) Error() string {
+
+ var errStr string
+ for _, validation := range c.records {
+ errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n",
+ validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress)
+ }
+
+ return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr)
+}
+
+func handleHTTPError(resp *http.Response) error {
+ var errorDetail RemoteError
+
+ contenType := resp.Header.Get("Content-Type")
+ // try to decode the content as JSON
+ if contenType == "application/json" || contenType == "application/problem+json" {
+ decoder := json.NewDecoder(resp.Body)
+ err := decoder.Decode(&errorDetail)
+ if err != nil {
+ return err
+ }
+ } else {
+ detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return err
+ }
+
+ errorDetail.Detail = string(detailBytes)
+ }
+
+ errorDetail.StatusCode = resp.StatusCode
+
+ // Check for errors we handle specifically
+ if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError {
+ return TOSError{errorDetail}
+ }
+
+ return errorDetail
+}
+
+func handleChallengeError(chlng challenge) error {
+ return challengeError{chlng.Error, chlng.ValidationRecords}
+}
diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go
new file mode 100644
index 000000000..180db786d
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/http.go
@@ -0,0 +1,117 @@
+package acme
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests.
+var UserAgent string
+
+// HTTPClient is an HTTP client with a reasonable timeout value.
+var HTTPClient = http.Client{Timeout: 10 * time.Second}
+
+const (
+ // defaultGoUserAgent is the Go HTTP package user agent string. Too
+ // bad it isn't exported. If it changes, we should update it here, too.
+ defaultGoUserAgent = "Go-http-client/1.1"
+
+ // ourUserAgent is the User-Agent of this underlying library package.
+ ourUserAgent = "xenolf-acme"
+)
+
+// httpHead performs a HEAD request with a proper User-Agent string.
+// The response body (resp.Body) is already closed when this function returns.
+func httpHead(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("User-Agent", userAgent())
+
+ resp, err = HTTPClient.Do(req)
+ if err != nil {
+ return resp, err
+ }
+ resp.Body.Close()
+ return resp, err
+}
+
+// httpPost performs a POST request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ req.Header.Set("User-Agent", userAgent())
+
+ return HTTPClient.Do(req)
+}
+
+// httpGet performs a GET request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpGet(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent())
+
+ return HTTPClient.Do(req)
+}
+
+// getJSON performs an HTTP GET request and parses the response body
+// as JSON, into the provided respBody object.
+func getJSON(uri string, respBody interface{}) (http.Header, error) {
+ resp, err := httpGet(uri)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get %q: %v", uri, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// postJSON performs an HTTP POST request and parses the response body
+// as JSON, into the provided respBody object.
+func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) {
+ jsonBytes, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, errors.New("Failed to marshal network message...")
+ }
+
+ resp, err := j.post(uri, jsonBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to post JWS message. -> %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ if respBody == nil {
+ return resp.Header, nil
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// userAgent builds and returns the User-Agent string to use in requests.
+func userAgent() string {
+ ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent)
+ return strings.TrimSpace(ua)
+}
diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/xenolf/lego/acme/http_challenge.go
new file mode 100644
index 000000000..95cb1fd81
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/http_challenge.go
@@ -0,0 +1,41 @@
+package acme
+
+import (
+ "fmt"
+ "log"
+)
+
+type httpChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+// HTTP01ChallengePath returns the URL path for the `http-01` challenge
+func HTTP01ChallengePath(token string) string {
+ return "/.well-known/acme-challenge/" + token
+}
+
+func (s *httpChallenge) Solve(chlng challenge, domain string) error {
+
+ logf("[INFO][%s] acme: Trying to solve HTTP-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = s.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+
+ return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
new file mode 100644
index 000000000..42541380c
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
@@ -0,0 +1,79 @@
+package acme
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// HTTPProviderServer implements ChallengeProvider for `http-01` challenge
+// It may be instantiated without using the NewHTTPProviderServer function if
+// you want only to use the default values.
+type HTTPProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 80 respectively.
+func NewHTTPProviderServer(iface, port string) *HTTPProviderServer {
+ return &HTTPProviderServer{iface: iface, port: port}
+}
+
+// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests.
+func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "80"
+ }
+
+ var err error
+ s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port))
+ if err != nil {
+ return fmt.Errorf("Could not start HTTP server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go s.serve(domain, token, keyAuth)
+ return nil
+}
+
+// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)`
+func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
+
+func (s *HTTPProviderServer) serve(domain, token, keyAuth string) {
+ path := HTTP01ChallengePath(token)
+
+ // The handler validates the HOST header and request type.
+ // For validation it then writes the token the server returned with the challenge
+ mux := http.NewServeMux()
+ mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+ if strings.HasPrefix(r.Host, domain) && r.Method == "GET" {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte(keyAuth))
+ logf("[INFO][%s] Served key authentication", domain)
+ } else {
+ logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method)
+ w.Write([]byte("TEST"))
+ }
+ })
+
+ httpServer := &http.Server{
+ Handler: mux,
+ }
+ // Once httpServer is shut down we don't want any lingering
+ // connections, so disable KeepAlives.
+ httpServer.SetKeepAlivesEnabled(false)
+ httpServer.Serve(s.listener)
+ s.done <- true
+}
diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
new file mode 100644
index 000000000..fdd8f4d27
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
@@ -0,0 +1,57 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestHTTPChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token
+ resp, err := httpGet(uri)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if want := "text/plain"; resp.Header.Get("Content-Type") != want {
+ t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ bodyStr := string(body)
+
+ if bodyStr != chlng.KeyAuthorization {
+ t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization)
+ }
+
+ return nil
+ }
+ solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestHTTPChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http2"}
+ solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/xenolf/lego/acme/http_test.go
new file mode 100644
index 000000000..33a48a331
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/http_test.go
@@ -0,0 +1,100 @@
+package acme
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestHTTPHeadUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ _, err := httpHead(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if method != "HEAD" {
+ t.Errorf("Expected method to be HEAD, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPGetUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpGet(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "GET" {
+ t.Errorf("Expected method to be GET, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPPostUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "POST" {
+ t.Errorf("Expected method to be POST, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestUserAgent(t *testing.T) {
+ ua := userAgent()
+
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if strings.HasSuffix(ua, " ") {
+ t.Errorf("UA should not have trailing spaces; got '%s'", ua)
+ }
+
+ // customize the UA by appending a value
+ UserAgent = "MyApp/1.2.3"
+ ua = userAgent()
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if !strings.Contains(ua, UserAgent) {
+ t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua)
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go
new file mode 100644
index 000000000..f70513e38
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/jws.go
@@ -0,0 +1,115 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "fmt"
+ "net/http"
+ "sync"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type jws struct {
+ directoryURL string
+ privKey crypto.PrivateKey
+ nonces []string
+ sync.Mutex
+}
+
+func keyAsJWK(key interface{}) *jose.JsonWebKey {
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "EC"}
+ case *rsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "RSA"}
+
+ default:
+ return nil
+ }
+}
+
+// Posts a JWS signed message to the specified URL
+func (j *jws) post(url string, content []byte) (*http.Response, error) {
+ signedContent, err := j.signContent(content)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize())))
+ if err != nil {
+ return nil, err
+ }
+
+ j.getNonceFromResponse(resp)
+
+ return resp, err
+}
+
+func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
+
+ var alg jose.SignatureAlgorithm
+ switch k := j.privKey.(type) {
+ case *rsa.PrivateKey:
+ alg = jose.RS256
+ case *ecdsa.PrivateKey:
+ if k.Curve == elliptic.P256() {
+ alg = jose.ES256
+ } else if k.Curve == elliptic.P384() {
+ alg = jose.ES384
+ }
+ }
+
+ signer, err := jose.NewSigner(alg, j.privKey)
+ if err != nil {
+ return nil, err
+ }
+ signer.SetNonceSource(j)
+
+ signed, err := signer.Sign(content)
+ if err != nil {
+ return nil, err
+ }
+ return signed, nil
+}
+
+func (j *jws) getNonceFromResponse(resp *http.Response) error {
+ j.Lock()
+ defer j.Unlock()
+ nonce := resp.Header.Get("Replay-Nonce")
+ if nonce == "" {
+ return fmt.Errorf("Server did not respond with a proper nonce header.")
+ }
+
+ j.nonces = append(j.nonces, nonce)
+ return nil
+}
+
+func (j *jws) getNonce() error {
+ resp, err := httpHead(j.directoryURL)
+ if err != nil {
+ return err
+ }
+
+ return j.getNonceFromResponse(resp)
+}
+
+func (j *jws) Nonce() (string, error) {
+ nonce := ""
+ if len(j.nonces) == 0 {
+ err := j.getNonce()
+ if err != nil {
+ return nonce, err
+ }
+ }
+ if len(j.nonces) == 0 {
+ return "", fmt.Errorf("Can't get nonce")
+ }
+ j.Lock()
+ defer j.Unlock()
+ nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1]
+ return nonce, nil
+}
diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go
new file mode 100644
index 000000000..0efeae674
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/messages.go
@@ -0,0 +1,117 @@
+package acme
+
+import (
+ "time"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type directory struct {
+ NewAuthzURL string `json:"new-authz"`
+ NewCertURL string `json:"new-cert"`
+ NewRegURL string `json:"new-reg"`
+ RevokeCertURL string `json:"revoke-cert"`
+}
+
+type recoveryKeyMessage struct {
+ Length int `json:"length,omitempty"`
+ Client jose.JsonWebKey `json:"client,omitempty"`
+ Server jose.JsonWebKey `json:"client,omitempty"`
+}
+
+type registrationMessage struct {
+ Resource string `json:"resource"`
+ Contact []string `json:"contact"`
+ Delete bool `json:"delete,omitempty"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// Registration is returned by the ACME server after the registration
+// The client implementation should save this registration somewhere.
+type Registration struct {
+ Resource string `json:"resource,omitempty"`
+ ID int `json:"id"`
+ Key jose.JsonWebKey `json:"key"`
+ Contact []string `json:"contact"`
+ Agreement string `json:"agreement,omitempty"`
+ Authorizations string `json:"authorizations,omitempty"`
+ Certificates string `json:"certificates,omitempty"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// RegistrationResource represents all important informations about a registration
+// of which the client needs to keep track itself.
+type RegistrationResource struct {
+ Body Registration `json:"body,omitempty"`
+ URI string `json:"uri,omitempty"`
+ NewAuthzURL string `json:"new_authzr_uri,omitempty"`
+ TosURL string `json:"terms_of_service,omitempty"`
+}
+
+type authorizationResource struct {
+ Body authorization
+ Domain string
+ NewCertURL string
+ AuthURL string
+}
+
+type authorization struct {
+ Resource string `json:"resource,omitempty"`
+ Identifier identifier `json:"identifier"`
+ Status string `json:"status,omitempty"`
+ Expires time.Time `json:"expires,omitempty"`
+ Challenges []challenge `json:"challenges,omitempty"`
+ Combinations [][]int `json:"combinations,omitempty"`
+}
+
+type identifier struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+}
+
+type validationRecord struct {
+ URI string `json:"url,omitempty"`
+ Hostname string `json:"hostname,omitempty"`
+ Port string `json:"port,omitempty"`
+ ResolvedAddresses []string `json:"addressesResolved,omitempty"`
+ UsedAddress string `json:"addressUsed,omitempty"`
+}
+
+type challenge struct {
+ Resource string `json:"resource,omitempty"`
+ Type Challenge `json:"type,omitempty"`
+ Status string `json:"status,omitempty"`
+ URI string `json:"uri,omitempty"`
+ Token string `json:"token,omitempty"`
+ KeyAuthorization string `json:"keyAuthorization,omitempty"`
+ TLS bool `json:"tls,omitempty"`
+ Iterations int `json:"n,omitempty"`
+ Error RemoteError `json:"error,omitempty"`
+ ValidationRecords []validationRecord `json:"validationRecord,omitempty"`
+}
+
+type csrMessage struct {
+ Resource string `json:"resource,omitempty"`
+ Csr string `json:"csr"`
+ Authorizations []string `json:"authorizations"`
+}
+
+type revokeCertMessage struct {
+ Resource string `json:"resource"`
+ Certificate string `json:"certificate"`
+}
+
+// CertificateResource represents a CA issued certificate.
+// PrivateKey and Certificate are both already PEM encoded
+// and can be directly written to disk. Certificate may
+// be a certificate bundle, depending on the options supplied
+// to create it.
+type CertificateResource struct {
+ Domain string `json:"domain"`
+ CertURL string `json:"certUrl"`
+ CertStableURL string `json:"certStableUrl"`
+ AccountRef string `json:"accountRef,omitempty"`
+ PrivateKey []byte `json:"-"`
+ Certificate []byte `json:"-"`
+ CSR []byte `json:"-"`
+}
diff --git a/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/xenolf/lego/acme/pop_challenge.go
new file mode 100644
index 000000000..8d2a213b0
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/pop_challenge.go
@@ -0,0 +1 @@
+package acme
diff --git a/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/xenolf/lego/acme/provider.go
new file mode 100644
index 000000000..d177ff07a
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/provider.go
@@ -0,0 +1,28 @@
+package acme
+
+import "time"
+
+// ChallengeProvider enables implementing a custom challenge
+// provider. Present presents the solution to a challenge available to
+// be solved. CleanUp will be called by the challenge if Present ends
+// in a non-error state.
+type ChallengeProvider interface {
+ Present(domain, token, keyAuth string) error
+ CleanUp(domain, token, keyAuth string) error
+}
+
+// ChallengeProviderTimeout allows for implementing a
+// ChallengeProvider where an unusually long timeout is required when
+// waiting for an ACME challenge to be satisfied, such as when
+// checking for DNS record progagation. If an implementor of a
+// ChallengeProvider provides a Timeout method, then the return values
+// of the Timeout method will be used when appropriate by the acme
+// package. The interval value is the time between checks.
+//
+// The default values used for timeout and interval are 60 seconds and
+// 2 seconds respectively. These are used when no Timeout method is
+// defined for the ChallengeProvider.
+type ChallengeProviderTimeout interface {
+ ChallengeProvider
+ Timeout() (timeout, interval time.Duration)
+}
diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
new file mode 100644
index 000000000..34383cbfa
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
@@ -0,0 +1,67 @@
+package acme
+
+import (
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "log"
+)
+
+type tlsSNIChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error {
+ // FIXME: https://github.com/ietf-wg-acme/acme/pull/22
+ // Currently we implement this challenge to track boulder, not the current spec!
+
+ logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = t.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := t.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+ return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
+
+// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge
+func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) {
+ // generate a new RSA key for the certificates
+ tempPrivKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+ rsaPrivKey := tempPrivKey.(*rsa.PrivateKey)
+ rsaPrivPEM := pemEncode(rsaPrivKey)
+
+ zBytes := sha256.Sum256([]byte(keyAuth))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+ tempCertPEM, err := generatePemCert(rsaPrivKey, domain)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ return certificate, domain, nil
+}
diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
new file mode 100644
index 000000000..df00fbb5a
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
@@ -0,0 +1,62 @@
+package acme
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+)
+
+// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge
+// It may be instantiated without using the NewTLSProviderServer function if
+// you want only to use the default values.
+type TLSProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 443 respectively.
+func NewTLSProviderServer(iface, port string) *TLSProviderServer {
+ return &TLSProviderServer{iface: iface, port: port}
+}
+
+// Present makes the keyAuth available as a cert
+func (s *TLSProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "443"
+ }
+
+ cert, _, err := TLSSNI01ChallengeCert(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ tlsConf := new(tls.Config)
+ tlsConf.Certificates = []tls.Certificate{cert}
+
+ s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf)
+ if err != nil {
+ return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go func() {
+ http.Serve(s.listener, nil)
+ s.done <- true
+ }()
+ return nil
+}
+
+// CleanUp closes the HTTP server.
+func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
new file mode 100644
index 000000000..3aec74565
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
@@ -0,0 +1,65 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestTLSSNIChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{
+ InsecureSkipVerify: true,
+ })
+ if err != nil {
+ t.Errorf("Expected to connect to challenge server without an error. %s", err.Error())
+ }
+
+ // Expect the server to only return one certificate
+ connState := conn.ConnectionState()
+ if count := len(connState.PeerCertificates); count != 1 {
+ t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count)
+ }
+
+ remoteCert := connState.PeerCertificates[0]
+ if count := len(remoteCert.DNSNames); count != 1 {
+ t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count)
+ }
+
+ zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+
+ if remoteCert.DNSNames[0] != domain {
+ t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0])
+ }
+
+ return nil
+ }
+ solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestTLSSNIChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"}
+ solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/xenolf/lego/acme/utils.go
new file mode 100644
index 000000000..2fa0db304
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/utils.go
@@ -0,0 +1,29 @@
+package acme
+
+import (
+ "fmt"
+ "time"
+)
+
+// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'.
+func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error {
+ var lastErr string
+ timeup := time.After(timeout)
+ for {
+ select {
+ case <-timeup:
+ return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr)
+ default:
+ }
+
+ stop, err := f()
+ if stop {
+ return nil
+ }
+ if err != nil {
+ lastErr = err.Error()
+ }
+
+ time.Sleep(interval)
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/xenolf/lego/acme/utils_test.go
new file mode 100644
index 000000000..158af4116
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/acme/utils_test.go
@@ -0,0 +1,26 @@
+package acme
+
+import (
+ "testing"
+ "time"
+)
+
+func TestWaitForTimeout(t *testing.T) {
+ c := make(chan error)
+ go func() {
+ err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) {
+ return false, nil
+ })
+ c <- err
+ }()
+
+ timeout := time.After(4 * time.Second)
+ select {
+ case <-timeout:
+ t.Fatal("timeout exceeded")
+ case err := <-c:
+ if err == nil {
+ t.Errorf("expected timeout error; got %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go
new file mode 100644
index 000000000..abdcf47de
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/cli.go
@@ -0,0 +1,214 @@
+// Let's Encrypt client to go!
+// CLI application for generating Let's Encrypt certificates using the ACME package.
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/urfave/cli"
+ "github.com/xenolf/lego/acme"
+)
+
+// Logger is used to log errors; if nil, the default log.Logger is used.
+var Logger *log.Logger
+
+// logger is an helper function to retrieve the available logger
+func logger() *log.Logger {
+ if Logger == nil {
+ Logger = log.New(os.Stderr, "", log.LstdFlags)
+ }
+ return Logger
+}
+
+var gittag string
+
+func main() {
+ app := cli.NewApp()
+ app.Name = "lego"
+ app.Usage = "Let's Encrypt client written in Go"
+
+ version := "0.3.1"
+ if strings.HasPrefix(gittag, "v") {
+ version = gittag
+ }
+
+ app.Version = version
+
+ acme.UserAgent = "lego/" + app.Version
+
+ defaultPath := ""
+ cwd, err := os.Getwd()
+ if err == nil {
+ defaultPath = path.Join(cwd, ".lego")
+ }
+
+ app.Before = func(c *cli.Context) error {
+ if c.GlobalString("path") == "" {
+ logger().Fatal("Could not determine current working directory. Please pass --path.")
+ }
+ return nil
+ }
+
+ app.Commands = []cli.Command{
+ {
+ Name: "run",
+ Usage: "Register an account, then create and install a certificate",
+ Action: run,
+ Flags: []cli.Flag{
+ cli.BoolFlag{
+ Name: "no-bundle",
+ Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.",
+ },
+ },
+ },
+ {
+ Name: "revoke",
+ Usage: "Revoke a certificate",
+ Action: revoke,
+ },
+ {
+ Name: "renew",
+ Usage: "Renew a certificate",
+ Action: renew,
+ Flags: []cli.Flag{
+ cli.IntFlag{
+ Name: "days",
+ Value: 0,
+ Usage: "The number of days left on a certificate to renew it.",
+ },
+ cli.BoolFlag{
+ Name: "reuse-key",
+ Usage: "Used to indicate you want to reuse your current private key for the new certificate.",
+ },
+ cli.BoolFlag{
+ Name: "no-bundle",
+ Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.",
+ },
+ },
+ },
+ {
+ Name: "dnshelp",
+ Usage: "Shows additional help for the --dns global option",
+ Action: dnshelp,
+ },
+ }
+
+ app.Flags = []cli.Flag{
+ cli.StringSliceFlag{
+ Name: "domains, d",
+ Usage: "Add domains to the process",
+ },
+ cli.StringFlag{
+ Name: "csr, c",
+ Usage: "Certificate signing request filename, if an external CSR is to be used",
+ },
+ cli.StringFlag{
+ Name: "server, s",
+ Value: "https://acme-v01.api.letsencrypt.org/directory",
+ Usage: "CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.",
+ },
+ cli.StringFlag{
+ Name: "email, m",
+ Usage: "Email used for registration and recovery contact.",
+ },
+ cli.BoolFlag{
+ Name: "accept-tos, a",
+ Usage: "By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.",
+ },
+ cli.StringFlag{
+ Name: "key-type, k",
+ Value: "rsa2048",
+ Usage: "Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384",
+ },
+ cli.StringFlag{
+ Name: "path",
+ Usage: "Directory to use for storing the data",
+ Value: defaultPath,
+ },
+ cli.StringSliceFlag{
+ Name: "exclude, x",
+ Usage: "Explicitly disallow solvers by name from being used. Solvers: \"http-01\", \"tls-sni-01\".",
+ },
+ cli.StringFlag{
+ Name: "webroot",
+ Usage: "Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge",
+ },
+ cli.StringFlag{
+ Name: "http",
+ Usage: "Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port",
+ },
+ cli.StringFlag{
+ Name: "tls",
+ Usage: "Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port",
+ },
+ cli.StringFlag{
+ Name: "dns",
+ Usage: "Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.",
+ },
+ cli.IntFlag{
+ Name: "http-timeout",
+ Usage: "Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.",
+ },
+ cli.IntFlag{
+ Name: "dns-timeout",
+ Usage: "Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.",
+ },
+ cli.StringSliceFlag{
+ Name: "dns-resolvers",
+ Usage: "Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.",
+ },
+ cli.BoolFlag{
+ Name: "pem",
+ Usage: "Generate a .pem file by concatanating the .key and .crt files together.",
+ },
+ }
+
+ err = app.Run(os.Args)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func dnshelp(c *cli.Context) error {
+ fmt.Printf(
+ `Credentials for DNS providers must be passed through environment variables.
+
+Here is an example bash command using the CloudFlare DNS provider:
+
+ $ CLOUDFLARE_EMAIL=foo@bar.com \
+ CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \
+ lego --dns cloudflare --domains www.example.com --email me@bar.com run
+
+`)
+
+ w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
+ fmt.Fprintln(w, "Valid providers and their associated credential environment variables:")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "\tcloudflare:\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY")
+ fmt.Fprintln(w, "\tdigitalocean:\tDO_AUTH_TOKEN")
+ fmt.Fprintln(w, "\tdnsimple:\tDNSIMPLE_EMAIL, DNSIMPLE_API_KEY")
+ fmt.Fprintln(w, "\tdnsmadeeasy:\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET")
+ fmt.Fprintln(w, "\tgandi:\tGANDI_API_KEY")
+ fmt.Fprintln(w, "\tgcloud:\tGCE_PROJECT")
+ fmt.Fprintln(w, "\tlinode:\tLINODE_API_KEY")
+ fmt.Fprintln(w, "\tmanual:\tnone")
+ fmt.Fprintln(w, "\tnamecheap:\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY")
+ fmt.Fprintln(w, "\trfc2136:\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\n\t\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER")
+ fmt.Fprintln(w, "\troute53:\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION")
+ fmt.Fprintln(w, "\tdyn:\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD")
+ fmt.Fprintln(w, "\tvultr:\tVULTR_API_KEY")
+ fmt.Fprintln(w, "\tovh:\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY")
+ fmt.Fprintln(w, "\tpdns:\tPDNS_API_KEY, PDNS_API_URL")
+ w.Flush()
+
+ fmt.Println(`
+For a more detailed explanation of a DNS provider's credential variables,
+please consult their online documentation.`)
+
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/cli_handlers.go b/vendor/github.com/xenolf/lego/cli_handlers.go
new file mode 100644
index 000000000..29a1166d8
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/cli_handlers.go
@@ -0,0 +1,444 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/urfave/cli"
+ "github.com/xenolf/lego/acme"
+ "github.com/xenolf/lego/providers/dns/cloudflare"
+ "github.com/xenolf/lego/providers/dns/digitalocean"
+ "github.com/xenolf/lego/providers/dns/dnsimple"
+ "github.com/xenolf/lego/providers/dns/dnsmadeeasy"
+ "github.com/xenolf/lego/providers/dns/dyn"
+ "github.com/xenolf/lego/providers/dns/gandi"
+ "github.com/xenolf/lego/providers/dns/googlecloud"
+ "github.com/xenolf/lego/providers/dns/linode"
+ "github.com/xenolf/lego/providers/dns/namecheap"
+ "github.com/xenolf/lego/providers/dns/ovh"
+ "github.com/xenolf/lego/providers/dns/pdns"
+ "github.com/xenolf/lego/providers/dns/rfc2136"
+ "github.com/xenolf/lego/providers/dns/route53"
+ "github.com/xenolf/lego/providers/dns/vultr"
+ "github.com/xenolf/lego/providers/http/webroot"
+)
+
+func checkFolder(path string) error {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return os.MkdirAll(path, 0700)
+ }
+ return nil
+}
+
+func setup(c *cli.Context) (*Configuration, *Account, *acme.Client) {
+
+ if c.GlobalIsSet("http-timeout") {
+ acme.HTTPClient = http.Client{Timeout: time.Duration(c.GlobalInt("http-timeout")) * time.Second}
+ }
+
+ if c.GlobalIsSet("dns-timeout") {
+ acme.DNSTimeout = time.Duration(c.GlobalInt("dns-timeout")) * time.Second
+ }
+
+ if len(c.GlobalStringSlice("dns-resolvers")) > 0 {
+ resolvers := []string{}
+ for _, resolver := range c.GlobalStringSlice("dns-resolvers") {
+ if !strings.Contains(resolver, ":") {
+ resolver += ":53"
+ }
+ resolvers = append(resolvers, resolver)
+ }
+ acme.RecursiveNameservers = resolvers
+ }
+
+ err := checkFolder(c.GlobalString("path"))
+ if err != nil {
+ logger().Fatalf("Could not check/create path: %s", err.Error())
+ }
+
+ conf := NewConfiguration(c)
+ if len(c.GlobalString("email")) == 0 {
+ logger().Fatal("You have to pass an account (email address) to the program using --email or -m")
+ }
+
+ //TODO: move to account struct? Currently MUST pass email.
+ acc := NewAccount(c.GlobalString("email"), conf)
+
+ keyType, err := conf.KeyType()
+ if err != nil {
+ logger().Fatal(err.Error())
+ }
+
+ client, err := acme.NewClient(c.GlobalString("server"), acc, keyType)
+ if err != nil {
+ logger().Fatalf("Could not create client: %s", err.Error())
+ }
+
+ if len(c.GlobalStringSlice("exclude")) > 0 {
+ client.ExcludeChallenges(conf.ExcludedSolvers())
+ }
+
+ if c.GlobalIsSet("webroot") {
+ provider, err := webroot.NewHTTPProvider(c.GlobalString("webroot"))
+ if err != nil {
+ logger().Fatal(err)
+ }
+
+ client.SetChallengeProvider(acme.HTTP01, provider)
+
+ // --webroot=foo indicates that the user specifically want to do a HTTP challenge
+ // infer that the user also wants to exclude all other challenges
+ client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01})
+ }
+ if c.GlobalIsSet("http") {
+ if strings.Index(c.GlobalString("http"), ":") == -1 {
+ logger().Fatalf("The --http switch only accepts interface:port or :port for its argument.")
+ }
+ client.SetHTTPAddress(c.GlobalString("http"))
+ }
+
+ if c.GlobalIsSet("tls") {
+ if strings.Index(c.GlobalString("tls"), ":") == -1 {
+ logger().Fatalf("The --tls switch only accepts interface:port or :port for its argument.")
+ }
+ client.SetTLSAddress(c.GlobalString("tls"))
+ }
+
+ if c.GlobalIsSet("dns") {
+ var err error
+ var provider acme.ChallengeProvider
+ switch c.GlobalString("dns") {
+ case "cloudflare":
+ provider, err = cloudflare.NewDNSProvider()
+ case "digitalocean":
+ provider, err = digitalocean.NewDNSProvider()
+ case "dnsimple":
+ provider, err = dnsimple.NewDNSProvider()
+ case "dnsmadeeasy":
+ provider, err = dnsmadeeasy.NewDNSProvider()
+ case "dyn":
+ provider, err = dyn.NewDNSProvider()
+ case "gandi":
+ provider, err = gandi.NewDNSProvider()
+ case "gcloud":
+ provider, err = googlecloud.NewDNSProvider()
+ case "linode":
+ provider, err = linode.NewDNSProvider()
+ case "manual":
+ provider, err = acme.NewDNSProviderManual()
+ case "namecheap":
+ provider, err = namecheap.NewDNSProvider()
+ case "route53":
+ provider, err = route53.NewDNSProvider()
+ case "rfc2136":
+ provider, err = rfc2136.NewDNSProvider()
+ case "vultr":
+ provider, err = vultr.NewDNSProvider()
+ case "ovh":
+ provider, err = ovh.NewDNSProvider()
+ case "pdns":
+ provider, err = pdns.NewDNSProvider()
+ }
+
+ if err != nil {
+ logger().Fatal(err)
+ }
+
+ client.SetChallengeProvider(acme.DNS01, provider)
+
+ // --dns=foo indicates that the user specifically want to do a DNS challenge
+ // infer that the user also wants to exclude all other challenges
+ client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01})
+ }
+
+ return conf, acc, client
+}
+
+func saveCertRes(certRes acme.CertificateResource, conf *Configuration) {
+ // We store the certificate, private key and metadata in different files
+ // as web servers would not be able to work with a combined file.
+ certOut := path.Join(conf.CertPath(), certRes.Domain+".crt")
+ privOut := path.Join(conf.CertPath(), certRes.Domain+".key")
+ pemOut := path.Join(conf.CertPath(), certRes.Domain+".pem")
+ metaOut := path.Join(conf.CertPath(), certRes.Domain+".json")
+
+ err := ioutil.WriteFile(certOut, certRes.Certificate, 0600)
+ if err != nil {
+ logger().Fatalf("Unable to save Certificate for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+
+ if certRes.PrivateKey != nil {
+ // if we were given a CSR, we don't know the private key
+ err = ioutil.WriteFile(privOut, certRes.PrivateKey, 0600)
+ if err != nil {
+ logger().Fatalf("Unable to save PrivateKey for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+
+ if conf.context.GlobalBool("pem") {
+ err = ioutil.WriteFile(pemOut, bytes.Join([][]byte{certRes.Certificate, certRes.PrivateKey}, nil), 0600)
+ if err != nil {
+ logger().Fatalf("Unable to save Certificate and PrivateKey in .pem for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+ }
+
+ } else if conf.context.GlobalBool("pem") {
+ // we don't have the private key; can't write the .pem file
+ logger().Fatalf("Unable to save pem without private key for domain %s\n\t%s; are you using a CSR?", certRes.Domain, err.Error())
+ }
+
+ jsonBytes, err := json.MarshalIndent(certRes, "", "\t")
+ if err != nil {
+ logger().Fatalf("Unable to marshal CertResource for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+
+ err = ioutil.WriteFile(metaOut, jsonBytes, 0600)
+ if err != nil {
+ logger().Fatalf("Unable to save CertResource for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+}
+
+func handleTOS(c *cli.Context, client *acme.Client, acc *Account) {
+ // Check for a global accept override
+ if c.GlobalBool("accept-tos") {
+ err := client.AgreeToTOS()
+ if err != nil {
+ logger().Fatalf("Could not agree to TOS: %s", err.Error())
+ }
+
+ acc.Save()
+ return
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ logger().Printf("Please review the TOS at %s", acc.Registration.TosURL)
+
+ for {
+ logger().Println("Do you accept the TOS? Y/n")
+ text, err := reader.ReadString('\n')
+ if err != nil {
+ logger().Fatalf("Could not read from console: %s", err.Error())
+ }
+
+ text = strings.Trim(text, "\r\n")
+
+ if text == "n" {
+ logger().Fatal("You did not accept the TOS. Unable to proceed.")
+ }
+
+ if text == "Y" || text == "y" || text == "" {
+ err = client.AgreeToTOS()
+ if err != nil {
+ logger().Fatalf("Could not agree to TOS: %s", err.Error())
+ }
+ acc.Save()
+ break
+ }
+
+ logger().Println("Your input was invalid. Please answer with one of Y/y, n or by pressing enter.")
+ }
+}
+
+func readCSRFile(filename string) (*x509.CertificateRequest, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ raw := bytes
+
+ // see if we can find a PEM-encoded CSR
+ var p *pem.Block
+ rest := bytes
+ for {
+ // decode a PEM block
+ p, rest = pem.Decode(rest)
+
+ // did we fail?
+ if p == nil {
+ break
+ }
+
+ // did we get a CSR?
+ if p.Type == "CERTIFICATE REQUEST" {
+ raw = p.Bytes
+ }
+ }
+
+ // no PEM-encoded CSR
+ // assume we were given a DER-encoded ASN.1 CSR
+ // (if this assumption is wrong, parsing these bytes will fail)
+ return x509.ParseCertificateRequest(raw)
+}
+
+func run(c *cli.Context) error {
+ conf, acc, client := setup(c)
+ if acc.Registration == nil {
+ reg, err := client.Register()
+ if err != nil {
+ logger().Fatalf("Could not complete registration\n\t%s", err.Error())
+ }
+
+ acc.Registration = reg
+ acc.Save()
+
+ logger().Print("!!!! HEADS UP !!!!")
+ logger().Printf(`
+ Your account credentials have been saved in your Let's Encrypt
+ configuration directory at "%s".
+ You should make a secure backup of this folder now. This
+ configuration directory will also contain certificates and
+ private keys obtained from Let's Encrypt so making regular
+ backups of this folder is ideal.`, conf.AccountPath(c.GlobalString("email")))
+
+ }
+
+ // If the agreement URL is empty, the account still needs to accept the LE TOS.
+ if acc.Registration.Body.Agreement == "" {
+ handleTOS(c, client, acc)
+ }
+
+ // we require either domains or csr, but not both
+ hasDomains := len(c.GlobalStringSlice("domains")) > 0
+ hasCsr := len(c.GlobalString("csr")) > 0
+ if hasDomains && hasCsr {
+ logger().Fatal("Please specify either --domains/-d or --csr/-c, but not both")
+ }
+ if !hasDomains && !hasCsr {
+ logger().Fatal("Please specify --domains/-d (or --csr/-c if you already have a CSR)")
+ }
+
+ var cert acme.CertificateResource
+ var failures map[string]error
+
+ if hasDomains {
+ // obtain a certificate, generating a new private key
+ cert, failures = client.ObtainCertificate(c.GlobalStringSlice("domains"), !c.Bool("no-bundle"), nil)
+ } else {
+ // read the CSR
+ csr, err := readCSRFile(c.GlobalString("csr"))
+ if err != nil {
+ // we couldn't read the CSR
+ failures = map[string]error{"csr": err}
+ } else {
+ // obtain a certificate for this CSR
+ cert, failures = client.ObtainCertificateForCSR(*csr, !c.Bool("no-bundle"))
+ }
+ }
+
+ if len(failures) > 0 {
+ for k, v := range failures {
+ logger().Printf("[%s] Could not obtain certificates\n\t%s", k, v.Error())
+ }
+
+ // Make sure to return a non-zero exit code if ObtainSANCertificate
+ // returned at least one error. Due to us not returning partial
+ // certificate we can just exit here instead of at the end.
+ os.Exit(1)
+ }
+
+ err := checkFolder(conf.CertPath())
+ if err != nil {
+ logger().Fatalf("Could not check/create path: %s", err.Error())
+ }
+
+ saveCertRes(cert, conf)
+
+ return nil
+}
+
+func revoke(c *cli.Context) error {
+
+ conf, _, client := setup(c)
+
+ err := checkFolder(conf.CertPath())
+ if err != nil {
+ logger().Fatalf("Could not check/create path: %s", err.Error())
+ }
+
+ for _, domain := range c.GlobalStringSlice("domains") {
+ logger().Printf("Trying to revoke certificate for domain %s", domain)
+
+ certPath := path.Join(conf.CertPath(), domain+".crt")
+ certBytes, err := ioutil.ReadFile(certPath)
+
+ err = client.RevokeCertificate(certBytes)
+ if err != nil {
+ logger().Fatalf("Error while revoking the certificate for domain %s\n\t%s", domain, err.Error())
+ } else {
+ logger().Print("Certificate was revoked.")
+ }
+ }
+
+ return nil
+}
+
+func renew(c *cli.Context) error {
+ conf, _, client := setup(c)
+
+ if len(c.GlobalStringSlice("domains")) <= 0 {
+ logger().Fatal("Please specify at least one domain.")
+ }
+
+ domain := c.GlobalStringSlice("domains")[0]
+
+ // load the cert resource from files.
+ // We store the certificate, private key and metadata in different files
+ // as web servers would not be able to work with a combined file.
+ certPath := path.Join(conf.CertPath(), domain+".crt")
+ privPath := path.Join(conf.CertPath(), domain+".key")
+ metaPath := path.Join(conf.CertPath(), domain+".json")
+
+ certBytes, err := ioutil.ReadFile(certPath)
+ if err != nil {
+ logger().Fatalf("Error while loading the certificate for domain %s\n\t%s", domain, err.Error())
+ }
+
+ if c.IsSet("days") {
+ expTime, err := acme.GetPEMCertExpiration(certBytes)
+ if err != nil {
+ logger().Printf("Could not get Certification expiration for domain %s", domain)
+ }
+
+ if int(expTime.Sub(time.Now()).Hours()/24.0) > c.Int("days") {
+ return nil
+ }
+ }
+
+ metaBytes, err := ioutil.ReadFile(metaPath)
+ if err != nil {
+ logger().Fatalf("Error while loading the meta data for domain %s\n\t%s", domain, err.Error())
+ }
+
+ var certRes acme.CertificateResource
+ err = json.Unmarshal(metaBytes, &certRes)
+ if err != nil {
+ logger().Fatalf("Error while marshalling the meta data for domain %s\n\t%s", domain, err.Error())
+ }
+
+ if c.Bool("reuse-key") {
+ keyBytes, err := ioutil.ReadFile(privPath)
+ if err != nil {
+ logger().Fatalf("Error while loading the private key for domain %s\n\t%s", domain, err.Error())
+ }
+ certRes.PrivateKey = keyBytes
+ }
+
+ certRes.Certificate = certBytes
+
+ newCert, err := client.RenewCertificate(certRes, !c.Bool("no-bundle"))
+ if err != nil {
+ logger().Fatalf("%s", err.Error())
+ }
+
+ saveCertRes(newCert, conf)
+
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/configuration.go b/vendor/github.com/xenolf/lego/configuration.go
new file mode 100644
index 000000000..f92c1fe96
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/configuration.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/urfave/cli"
+ "github.com/xenolf/lego/acme"
+)
+
+// Configuration type from CLI and config files.
+type Configuration struct {
+ context *cli.Context
+}
+
+// NewConfiguration creates a new configuration from CLI data.
+func NewConfiguration(c *cli.Context) *Configuration {
+ return &Configuration{context: c}
+}
+
+// KeyType the type from which private keys should be generated
+func (c *Configuration) KeyType() (acme.KeyType, error) {
+ switch strings.ToUpper(c.context.GlobalString("key-type")) {
+ case "RSA2048":
+ return acme.RSA2048, nil
+ case "RSA4096":
+ return acme.RSA4096, nil
+ case "RSA8192":
+ return acme.RSA8192, nil
+ case "EC256":
+ return acme.EC256, nil
+ case "EC384":
+ return acme.EC384, nil
+ }
+
+ return "", fmt.Errorf("Unsupported KeyType: %s", c.context.GlobalString("key-type"))
+}
+
+// ExcludedSolvers is a list of solvers that are to be excluded.
+func (c *Configuration) ExcludedSolvers() (cc []acme.Challenge) {
+ for _, s := range c.context.GlobalStringSlice("exclude") {
+ cc = append(cc, acme.Challenge(s))
+ }
+ return
+}
+
+// ServerPath returns the OS dependent path to the data for a specific CA
+func (c *Configuration) ServerPath() string {
+ srv, _ := url.Parse(c.context.GlobalString("server"))
+ srvStr := strings.Replace(srv.Host, ":", "_", -1)
+ return strings.Replace(srvStr, "/", string(os.PathSeparator), -1)
+}
+
+// CertPath gets the path for certificates.
+func (c *Configuration) CertPath() string {
+ return path.Join(c.context.GlobalString("path"), "certificates")
+}
+
+// AccountsPath returns the OS dependent path to the
+// local accounts for a specific CA
+func (c *Configuration) AccountsPath() string {
+ return path.Join(c.context.GlobalString("path"), "accounts", c.ServerPath())
+}
+
+// AccountPath returns the OS dependent path to a particular account
+func (c *Configuration) AccountPath(acc string) string {
+ return path.Join(c.AccountsPath(), acc)
+}
+
+// AccountKeysPath returns the OS dependent path to the keys of a particular account
+func (c *Configuration) AccountKeysPath(acc string) string {
+ return path.Join(c.AccountPath(acc), "keys")
+}
diff --git a/vendor/github.com/xenolf/lego/crypto.go b/vendor/github.com/xenolf/lego/crypto.go
new file mode 100644
index 000000000..8b23e2fc1
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/crypto.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "os"
+)
+
+func generatePrivateKey(file string) (crypto.PrivateKey, error) {
+
+ privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ keyBytes, err := x509.MarshalECPrivateKey(privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ pemKey := pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}
+
+ certOut, err := os.Create(file)
+ if err != nil {
+ return nil, err
+ }
+
+ pem.Encode(certOut, &pemKey)
+ certOut.Close()
+
+ return privateKey, nil
+}
+
+func loadPrivateKey(file string) (crypto.PrivateKey, error) {
+ keyBytes, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ keyBlock, _ := pem.Decode(keyBytes)
+
+ switch keyBlock.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(keyBlock.Bytes)
+ }
+
+ return nil, errors.New("Unknown private key type.")
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go
new file mode 100644
index 000000000..84952238d
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go
@@ -0,0 +1,223 @@
+// Package cloudflare implements a DNS provider for solving the DNS-01
+// challenge using cloudflare DNS.
+package cloudflare
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// CloudFlareAPIURL represents the API endpoint to call.
+// TODO: Unexport?
+const CloudFlareAPIURL = "https://api.cloudflare.com/client/v4"
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface
+type DNSProvider struct {
+ authEmail string
+ authKey string
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for cloudflare.
+// Credentials must be passed in the environment variables: CLOUDFLARE_EMAIL
+// and CLOUDFLARE_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ email := os.Getenv("CLOUDFLARE_EMAIL")
+ key := os.Getenv("CLOUDFLARE_API_KEY")
+ return NewDNSProviderCredentials(email, key)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for cloudflare.
+func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) {
+ if email == "" || key == "" {
+ return nil, fmt.Errorf("CloudFlare credentials missing")
+ }
+
+ return &DNSProvider{
+ authEmail: email,
+ authKey: key,
+ }, nil
+}
+
+// Timeout returns the timeout and interval to use when checking for DNS
+// propagation. Adjusting here to cope with spikes in propagation times.
+func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ return 120 * time.Second, 2 * time.Second
+}
+
+// Present creates a TXT record to fulfil the dns-01 challenge
+func (c *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ zoneID, err := c.getHostedZoneID(fqdn)
+ if err != nil {
+ return err
+ }
+
+ rec := cloudFlareRecord{
+ Type: "TXT",
+ Name: acme.UnFqdn(fqdn),
+ Content: value,
+ TTL: 120,
+ }
+
+ body, err := json.Marshal(rec)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.makeRequest("POST", fmt.Sprintf("/zones/%s/dns_records", zoneID), bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ record, err := c.findTxtRecord(fqdn)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.makeRequest("DELETE", fmt.Sprintf("/zones/%s/dns_records/%s", record.ZoneID, record.ID), nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) {
+ // HostedZone represents a CloudFlare DNS zone
+ type HostedZone struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ }
+
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return "", err
+ }
+
+ result, err := c.makeRequest("GET", "/zones?name="+acme.UnFqdn(authZone), nil)
+ if err != nil {
+ return "", err
+ }
+
+ var hostedZone []HostedZone
+ err = json.Unmarshal(result, &hostedZone)
+ if err != nil {
+ return "", err
+ }
+
+ if len(hostedZone) != 1 {
+ return "", fmt.Errorf("Zone %s not found in CloudFlare for domain %s", authZone, fqdn)
+ }
+
+ return hostedZone[0].ID, nil
+}
+
+func (c *DNSProvider) findTxtRecord(fqdn string) (*cloudFlareRecord, error) {
+ zoneID, err := c.getHostedZoneID(fqdn)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := c.makeRequest(
+ "GET",
+ fmt.Sprintf("/zones/%s/dns_records?per_page=1000&type=TXT&name=%s", zoneID, acme.UnFqdn(fqdn)),
+ nil,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ var records []cloudFlareRecord
+ err = json.Unmarshal(result, &records)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rec := range records {
+ if rec.Name == acme.UnFqdn(fqdn) {
+ return &rec, nil
+ }
+ }
+
+ return nil, fmt.Errorf("No existing record found for %s", fqdn)
+}
+
+func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) {
+ // APIError contains error details for failed requests
+ type APIError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+ ErrorChain []APIError `json:"error_chain,omitempty"`
+ }
+
+ // APIResponse represents a response from CloudFlare API
+ type APIResponse struct {
+ Success bool `json:"success"`
+ Errors []*APIError `json:"errors"`
+ Result json.RawMessage `json:"result"`
+ }
+
+ req, err := http.NewRequest(method, fmt.Sprintf("%s%s", CloudFlareAPIURL, uri), body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("X-Auth-Email", c.authEmail)
+ req.Header.Set("X-Auth-Key", c.authKey)
+ //req.Header.Set("User-Agent", userAgent())
+
+ client := http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("Error querying Cloudflare API -> %v", err)
+ }
+
+ defer resp.Body.Close()
+
+ var r APIResponse
+ err = json.NewDecoder(resp.Body).Decode(&r)
+ if err != nil {
+ return nil, err
+ }
+
+ if !r.Success {
+ if len(r.Errors) > 0 {
+ errStr := ""
+ for _, apiErr := range r.Errors {
+ errStr += fmt.Sprintf("\t Error: %d: %s", apiErr.Code, apiErr.Message)
+ for _, chainErr := range apiErr.ErrorChain {
+ errStr += fmt.Sprintf("<- %d: %s", chainErr.Code, chainErr.Message)
+ }
+ }
+ return nil, fmt.Errorf("Cloudflare API Error \n%s", errStr)
+ }
+ return nil, fmt.Errorf("Cloudflare API error")
+ }
+
+ return r.Result, nil
+}
+
+// cloudFlareRecord represents a CloudFlare DNS record
+type cloudFlareRecord struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Content string `json:"content"`
+ ID string `json:"id,omitempty"`
+ TTL int `json:"ttl,omitempty"`
+ ZoneID string `json:"zone_id,omitempty"`
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go
new file mode 100644
index 000000000..19b5a40b9
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare_test.go
@@ -0,0 +1,80 @@
+package cloudflare
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ cflareLiveTest bool
+ cflareEmail string
+ cflareAPIKey string
+ cflareDomain string
+)
+
+func init() {
+ cflareEmail = os.Getenv("CLOUDFLARE_EMAIL")
+ cflareAPIKey = os.Getenv("CLOUDFLARE_API_KEY")
+ cflareDomain = os.Getenv("CLOUDFLARE_DOMAIN")
+ if len(cflareEmail) > 0 && len(cflareAPIKey) > 0 && len(cflareDomain) > 0 {
+ cflareLiveTest = true
+ }
+}
+
+func restoreCloudFlareEnv() {
+ os.Setenv("CLOUDFLARE_EMAIL", cflareEmail)
+ os.Setenv("CLOUDFLARE_API_KEY", cflareAPIKey)
+}
+
+func TestNewDNSProviderValid(t *testing.T) {
+ os.Setenv("CLOUDFLARE_EMAIL", "")
+ os.Setenv("CLOUDFLARE_API_KEY", "")
+ _, err := NewDNSProviderCredentials("123", "123")
+ assert.NoError(t, err)
+ restoreCloudFlareEnv()
+}
+
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ os.Setenv("CLOUDFLARE_EMAIL", "test@example.com")
+ os.Setenv("CLOUDFLARE_API_KEY", "123")
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+ restoreCloudFlareEnv()
+}
+
+func TestNewDNSProviderMissingCredErr(t *testing.T) {
+ os.Setenv("CLOUDFLARE_EMAIL", "")
+ os.Setenv("CLOUDFLARE_API_KEY", "")
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "CloudFlare credentials missing")
+ restoreCloudFlareEnv()
+}
+
+func TestCloudFlarePresent(t *testing.T) {
+ if !cflareLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProviderCredentials(cflareEmail, cflareAPIKey)
+ assert.NoError(t, err)
+
+ err = provider.Present(cflareDomain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestCloudFlareCleanUp(t *testing.T) {
+ if !cflareLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 2)
+
+ provider, err := NewDNSProviderCredentials(cflareEmail, cflareAPIKey)
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(cflareDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go
new file mode 100644
index 000000000..da261b39a
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go
@@ -0,0 +1,166 @@
+// Package digitalocean implements a DNS provider for solving the DNS-01
+// challenge using digitalocean DNS.
+package digitalocean
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface
+// that uses DigitalOcean's REST API to manage TXT records for a domain.
+type DNSProvider struct {
+ apiAuthToken string
+ recordIDs map[string]int
+ recordIDsMu sync.Mutex
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for Digital
+// Ocean. Credentials must be passed in the environment variable:
+// DO_AUTH_TOKEN.
+func NewDNSProvider() (*DNSProvider, error) {
+ apiAuthToken := os.Getenv("DO_AUTH_TOKEN")
+ return NewDNSProviderCredentials(apiAuthToken)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for Digital Ocean.
+func NewDNSProviderCredentials(apiAuthToken string) (*DNSProvider, error) {
+ if apiAuthToken == "" {
+ return nil, fmt.Errorf("DigitalOcean credentials missing")
+ }
+ return &DNSProvider{
+ apiAuthToken: apiAuthToken,
+ recordIDs: make(map[string]int),
+ }, nil
+}
+
+// Present creates a TXT record using the specified parameters
+func (d *DNSProvider) Present(domain, token, keyAuth string) error {
+ // txtRecordRequest represents the request body to DO's API to make a TXT record
+ type txtRecordRequest struct {
+ RecordType string `json:"type"`
+ Name string `json:"name"`
+ Data string `json:"data"`
+ }
+
+ // txtRecordResponse represents a response from DO's API after making a TXT record
+ type txtRecordResponse struct {
+ DomainRecord struct {
+ ID int `json:"id"`
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Data string `json:"data"`
+ } `json:"domain_record"`
+ }
+
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err)
+ }
+
+ authZone = acme.UnFqdn(authZone)
+
+ reqURL := fmt.Sprintf("%s/v2/domains/%s/records", digitalOceanBaseURL, authZone)
+ reqData := txtRecordRequest{RecordType: "TXT", Name: fqdn, Data: value}
+ body, err := json.Marshal(reqData)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("POST", reqURL, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken))
+
+ client := http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ var errInfo digitalOceanAPIError
+ json.NewDecoder(resp.Body).Decode(&errInfo)
+ return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message)
+ }
+
+ // Everything looks good; but we'll need the ID later to delete the record
+ var respData txtRecordResponse
+ err = json.NewDecoder(resp.Body).Decode(&respData)
+ if err != nil {
+ return err
+ }
+ d.recordIDsMu.Lock()
+ d.recordIDs[fqdn] = respData.DomainRecord.ID
+ d.recordIDsMu.Unlock()
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ // get the record's unique ID from when we created it
+ d.recordIDsMu.Lock()
+ recordID, ok := d.recordIDs[fqdn]
+ d.recordIDsMu.Unlock()
+ if !ok {
+ return fmt.Errorf("unknown record ID for '%s'", fqdn)
+ }
+
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err)
+ }
+
+ authZone = acme.UnFqdn(authZone)
+
+ reqURL := fmt.Sprintf("%s/v2/domains/%s/records/%d", digitalOceanBaseURL, authZone, recordID)
+ req, err := http.NewRequest("DELETE", reqURL, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken))
+
+ client := http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ var errInfo digitalOceanAPIError
+ json.NewDecoder(resp.Body).Decode(&errInfo)
+ return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message)
+ }
+
+ // Delete record ID from map
+ d.recordIDsMu.Lock()
+ delete(d.recordIDs, fqdn)
+ d.recordIDsMu.Unlock()
+
+ return nil
+}
+
+type digitalOceanAPIError struct {
+ ID string `json:"id"`
+ Message string `json:"message"`
+}
+
+var digitalOceanBaseURL = "https://api.digitalocean.com"
diff --git a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go
new file mode 100644
index 000000000..7498508ba
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean_test.go
@@ -0,0 +1,117 @@
+package digitalocean
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+var fakeDigitalOceanAuth = "asdf1234"
+
+func TestDigitalOceanPresent(t *testing.T) {
+ var requestReceived bool
+
+ mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ requestReceived = true
+
+ if got, want := r.Method, "POST"; got != want {
+ t.Errorf("Expected method to be '%s' but got '%s'", want, got)
+ }
+ if got, want := r.URL.Path, "/v2/domains/example.com/records"; got != want {
+ t.Errorf("Expected path to be '%s' but got '%s'", want, got)
+ }
+ if got, want := r.Header.Get("Content-Type"), "application/json"; got != want {
+ t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got)
+ }
+ if got, want := r.Header.Get("Authorization"), "Bearer asdf1234"; got != want {
+ t.Errorf("Expected Authorization to be '%s' but got '%s'", want, got)
+ }
+
+ reqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatalf("Error reading request body: %v", err)
+ }
+ if got, want := string(reqBody), `{"type":"TXT","name":"_acme-challenge.example.com.","data":"w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI"}`; got != want {
+ t.Errorf("Expected body data to be: `%s` but got `%s`", want, got)
+ }
+
+ w.WriteHeader(http.StatusCreated)
+ fmt.Fprintf(w, `{
+ "domain_record": {
+ "id": 1234567,
+ "type": "TXT",
+ "name": "_acme-challenge",
+ "data": "w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI",
+ "priority": null,
+ "port": null,
+ "weight": null
+ }
+ }`)
+ }))
+ defer mock.Close()
+ digitalOceanBaseURL = mock.URL
+
+ doprov, err := NewDNSProviderCredentials(fakeDigitalOceanAuth)
+ if doprov == nil {
+ t.Fatal("Expected non-nil DigitalOcean provider, but was nil")
+ }
+ if err != nil {
+ t.Fatalf("Expected no error creating provider, but got: %v", err)
+ }
+
+ err = doprov.Present("example.com", "", "foobar")
+ if err != nil {
+ t.Fatalf("Expected no error creating TXT record, but got: %v", err)
+ }
+ if !requestReceived {
+ t.Error("Expected request to be received by mock backend, but it wasn't")
+ }
+}
+
+func TestDigitalOceanCleanUp(t *testing.T) {
+ var requestReceived bool
+
+ mock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ requestReceived = true
+
+ if got, want := r.Method, "DELETE"; got != want {
+ t.Errorf("Expected method to be '%s' but got '%s'", want, got)
+ }
+ if got, want := r.URL.Path, "/v2/domains/example.com/records/1234567"; got != want {
+ t.Errorf("Expected path to be '%s' but got '%s'", want, got)
+ }
+ // NOTE: Even though the body is empty, DigitalOcean API docs still show setting this Content-Type...
+ if got, want := r.Header.Get("Content-Type"), "application/json"; got != want {
+ t.Errorf("Expected Content-Type to be '%s' but got '%s'", want, got)
+ }
+ if got, want := r.Header.Get("Authorization"), "Bearer asdf1234"; got != want {
+ t.Errorf("Expected Authorization to be '%s' but got '%s'", want, got)
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+ }))
+ defer mock.Close()
+ digitalOceanBaseURL = mock.URL
+
+ doprov, err := NewDNSProviderCredentials(fakeDigitalOceanAuth)
+ if doprov == nil {
+ t.Fatal("Expected non-nil DigitalOcean provider, but was nil")
+ }
+ if err != nil {
+ t.Fatalf("Expected no error creating provider, but got: %v", err)
+ }
+
+ doprov.recordIDsMu.Lock()
+ doprov.recordIDs["_acme-challenge.example.com."] = 1234567
+ doprov.recordIDsMu.Unlock()
+
+ err = doprov.CleanUp("example.com", "", "")
+ if err != nil {
+ t.Fatalf("Expected no error removing TXT record, but got: %v", err)
+ }
+ if !requestReceived {
+ t.Error("Expected request to be received by mock backend, but it wasn't")
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go
new file mode 100644
index 000000000..c903a35ce
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go
@@ -0,0 +1,141 @@
+// Package dnsimple implements a DNS provider for solving the DNS-01 challenge
+// using dnsimple DNS.
+package dnsimple
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/weppos/dnsimple-go/dnsimple"
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface.
+type DNSProvider struct {
+ client *dnsimple.Client
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for dnsimple.
+// Credentials must be passed in the environment variables: DNSIMPLE_EMAIL
+// and DNSIMPLE_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ email := os.Getenv("DNSIMPLE_EMAIL")
+ key := os.Getenv("DNSIMPLE_API_KEY")
+ return NewDNSProviderCredentials(email, key)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for dnsimple.
+func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) {
+ if email == "" || key == "" {
+ return nil, fmt.Errorf("DNSimple credentials missing")
+ }
+
+ return &DNSProvider{
+ client: dnsimple.NewClient(key, email),
+ }, nil
+}
+
+// Present creates a TXT record to fulfil the dns-01 challenge.
+func (c *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+
+ zoneID, zoneName, err := c.getHostedZone(domain)
+ if err != nil {
+ return err
+ }
+
+ recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl)
+ _, _, err = c.client.Domains.CreateRecord(zoneID, *recordAttributes)
+ if err != nil {
+ return fmt.Errorf("DNSimple API call failed: %v", err)
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters.
+func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ records, err := c.findTxtRecords(domain, fqdn)
+ if err != nil {
+ return err
+ }
+
+ for _, rec := range records {
+ _, err := c.client.Domains.DeleteRecord(rec.DomainId, rec.Id)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *DNSProvider) getHostedZone(domain string) (string, string, error) {
+ zones, _, err := c.client.Domains.List()
+ if err != nil {
+ return "", "", fmt.Errorf("DNSimple API call failed: %v", err)
+ }
+
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return "", "", err
+ }
+
+ var hostedZone dnsimple.Domain
+ for _, zone := range zones {
+ if zone.Name == acme.UnFqdn(authZone) {
+ hostedZone = zone
+ }
+ }
+
+ if hostedZone.Id == 0 {
+ return "", "", fmt.Errorf("Zone %s not found in DNSimple for domain %s", authZone, domain)
+
+ }
+
+ return fmt.Sprintf("%v", hostedZone.Id), hostedZone.Name, nil
+}
+
+func (c *DNSProvider) findTxtRecords(domain, fqdn string) ([]dnsimple.Record, error) {
+ zoneID, zoneName, err := c.getHostedZone(domain)
+ if err != nil {
+ return nil, err
+ }
+
+ var records []dnsimple.Record
+ result, _, err := c.client.Domains.ListRecords(zoneID, "", "TXT")
+ if err != nil {
+ return records, fmt.Errorf("DNSimple API call has failed: %v", err)
+ }
+
+ recordName := c.extractRecordName(fqdn, zoneName)
+ for _, record := range result {
+ if record.Name == recordName {
+ records = append(records, record)
+ }
+ }
+
+ return records, nil
+}
+
+func (c *DNSProvider) newTxtRecord(zone, fqdn, value string, ttl int) *dnsimple.Record {
+ name := c.extractRecordName(fqdn, zone)
+
+ return &dnsimple.Record{
+ Type: "TXT",
+ Name: name,
+ Content: value,
+ TTL: ttl,
+ }
+}
+
+func (c *DNSProvider) extractRecordName(fqdn, domain string) string {
+ name := acme.UnFqdn(fqdn)
+ if idx := strings.Index(name, "."+domain); idx != -1 {
+ return name[:idx]
+ }
+ return name
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go
new file mode 100644
index 000000000..4926b3df9
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple_test.go
@@ -0,0 +1,79 @@
+package dnsimple
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ dnsimpleLiveTest bool
+ dnsimpleEmail string
+ dnsimpleAPIKey string
+ dnsimpleDomain string
+)
+
+func init() {
+ dnsimpleEmail = os.Getenv("DNSIMPLE_EMAIL")
+ dnsimpleAPIKey = os.Getenv("DNSIMPLE_API_KEY")
+ dnsimpleDomain = os.Getenv("DNSIMPLE_DOMAIN")
+ if len(dnsimpleEmail) > 0 && len(dnsimpleAPIKey) > 0 && len(dnsimpleDomain) > 0 {
+ dnsimpleLiveTest = true
+ }
+}
+
+func restoreDNSimpleEnv() {
+ os.Setenv("DNSIMPLE_EMAIL", dnsimpleEmail)
+ os.Setenv("DNSIMPLE_API_KEY", dnsimpleAPIKey)
+}
+
+func TestNewDNSProviderValid(t *testing.T) {
+ os.Setenv("DNSIMPLE_EMAIL", "")
+ os.Setenv("DNSIMPLE_API_KEY", "")
+ _, err := NewDNSProviderCredentials("example@example.com", "123")
+ assert.NoError(t, err)
+ restoreDNSimpleEnv()
+}
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ os.Setenv("DNSIMPLE_EMAIL", "example@example.com")
+ os.Setenv("DNSIMPLE_API_KEY", "123")
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+ restoreDNSimpleEnv()
+}
+
+func TestNewDNSProviderMissingCredErr(t *testing.T) {
+ os.Setenv("DNSIMPLE_EMAIL", "")
+ os.Setenv("DNSIMPLE_API_KEY", "")
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "DNSimple credentials missing")
+ restoreDNSimpleEnv()
+}
+
+func TestLiveDNSimplePresent(t *testing.T) {
+ if !dnsimpleLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProviderCredentials(dnsimpleEmail, dnsimpleAPIKey)
+ assert.NoError(t, err)
+
+ err = provider.Present(dnsimpleDomain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestLiveDNSimpleCleanUp(t *testing.T) {
+ if !dnsimpleLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 1)
+
+ provider, err := NewDNSProviderCredentials(dnsimpleEmail, dnsimpleAPIKey)
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(dnsimpleDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go
new file mode 100644
index 000000000..c4363a4eb
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go
@@ -0,0 +1,248 @@
+package dnsmadeeasy
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/tls"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses
+// DNSMadeEasy's DNS API to manage TXT records for a domain.
+type DNSProvider struct {
+ baseURL string
+ apiKey string
+ apiSecret string
+}
+
+// Domain holds the DNSMadeEasy API representation of a Domain
+type Domain struct {
+ ID int `json:"id"`
+ Name string `json:"name"`
+}
+
+// Record holds the DNSMadeEasy API representation of a Domain Record
+type Record struct {
+ ID int `json:"id"`
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Value string `json:"value"`
+ TTL int `json:"ttl"`
+ SourceID int `json:"sourceId"`
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for DNSMadeEasy DNS.
+// Credentials must be passed in the environment variables: DNSMADEEASY_API_KEY
+// and DNSMADEEASY_API_SECRET.
+func NewDNSProvider() (*DNSProvider, error) {
+ dnsmadeeasyAPIKey := os.Getenv("DNSMADEEASY_API_KEY")
+ dnsmadeeasyAPISecret := os.Getenv("DNSMADEEASY_API_SECRET")
+ dnsmadeeasySandbox := os.Getenv("DNSMADEEASY_SANDBOX")
+
+ var baseURL string
+
+ sandbox, _ := strconv.ParseBool(dnsmadeeasySandbox)
+ if sandbox {
+ baseURL = "https://api.sandbox.dnsmadeeasy.com/V2.0"
+ } else {
+ baseURL = "https://api.dnsmadeeasy.com/V2.0"
+ }
+
+ return NewDNSProviderCredentials(baseURL, dnsmadeeasyAPIKey, dnsmadeeasyAPISecret)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for DNSMadeEasy.
+func NewDNSProviderCredentials(baseURL, apiKey, apiSecret string) (*DNSProvider, error) {
+ if baseURL == "" || apiKey == "" || apiSecret == "" {
+ return nil, fmt.Errorf("DNS Made Easy credentials missing")
+ }
+
+ return &DNSProvider{
+ baseURL: baseURL,
+ apiKey: apiKey,
+ apiSecret: apiSecret,
+ }, nil
+}
+
+// Present creates a TXT record using the specified parameters
+func (d *DNSProvider) Present(domainName, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domainName, keyAuth)
+
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ // fetch the domain details
+ domain, err := d.getDomain(authZone)
+ if err != nil {
+ return err
+ }
+
+ // create the TXT record
+ name := strings.Replace(fqdn, "."+authZone, "", 1)
+ record := &Record{Type: "TXT", Name: name, Value: value, TTL: ttl}
+
+ err = d.createRecord(domain, record)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT records matching the specified parameters
+func (d *DNSProvider) CleanUp(domainName, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domainName, keyAuth)
+
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ // fetch the domain details
+ domain, err := d.getDomain(authZone)
+ if err != nil {
+ return err
+ }
+
+ // find matching records
+ name := strings.Replace(fqdn, "."+authZone, "", 1)
+ records, err := d.getRecords(domain, name, "TXT")
+ if err != nil {
+ return err
+ }
+
+ // delete records
+ for _, record := range *records {
+ err = d.deleteRecord(record)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DNSProvider) getDomain(authZone string) (*Domain, error) {
+ domainName := authZone[0 : len(authZone)-1]
+ resource := fmt.Sprintf("%s%s", "/dns/managed/name?domainname=", domainName)
+
+ resp, err := d.sendRequest("GET", resource, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ domain := &Domain{}
+ err = json.NewDecoder(resp.Body).Decode(&domain)
+ if err != nil {
+ return nil, err
+ }
+
+ return domain, nil
+}
+
+func (d *DNSProvider) getRecords(domain *Domain, recordName, recordType string) (*[]Record, error) {
+ resource := fmt.Sprintf("%s/%d/%s%s%s%s", "/dns/managed", domain.ID, "records?recordName=", recordName, "&type=", recordType)
+
+ resp, err := d.sendRequest("GET", resource, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ type recordsResponse struct {
+ Records *[]Record `json:"data"`
+ }
+
+ records := &recordsResponse{}
+ err = json.NewDecoder(resp.Body).Decode(&records)
+ if err != nil {
+ return nil, err
+ }
+
+ return records.Records, nil
+}
+
+func (d *DNSProvider) createRecord(domain *Domain, record *Record) error {
+ url := fmt.Sprintf("%s/%d/%s", "/dns/managed", domain.ID, "records")
+
+ resp, err := d.sendRequest("POST", url, record)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (d *DNSProvider) deleteRecord(record Record) error {
+ resource := fmt.Sprintf("%s/%d/%s/%d", "/dns/managed", record.SourceID, "records", record.ID)
+
+ resp, err := d.sendRequest("DELETE", resource, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*http.Response, error) {
+ url := fmt.Sprintf("%s%s", d.baseURL, resource)
+
+ body, err := json.Marshal(payload)
+ if err != nil {
+ return nil, err
+ }
+
+ timestamp := time.Now().UTC().Format(time.RFC1123)
+ signature := computeHMAC(timestamp, d.apiSecret)
+
+ req, err := http.NewRequest(method, url, bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("x-dnsme-apiKey", d.apiKey)
+ req.Header.Set("x-dnsme-requestDate", timestamp)
+ req.Header.Set("x-dnsme-hmac", signature)
+ req.Header.Set("accept", "application/json")
+ req.Header.Set("content-type", "application/json")
+
+ transport := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ client := &http.Client{
+ Transport: transport,
+ Timeout: time.Duration(10 * time.Second),
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode > 299 {
+ return nil, fmt.Errorf("DNSMadeEasy API request failed with HTTP status code %d", resp.StatusCode)
+ }
+
+ return resp, nil
+}
+
+func computeHMAC(message string, secret string) string {
+ key := []byte(secret)
+ h := hmac.New(sha1.New, key)
+ h.Write([]byte(message))
+ return hex.EncodeToString(h.Sum(nil))
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go
new file mode 100644
index 000000000..e860ecb69
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy_test.go
@@ -0,0 +1,37 @@
+package dnsmadeeasy
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ testLive bool
+ testAPIKey string
+ testAPISecret string
+ testDomain string
+)
+
+func init() {
+ testAPIKey = os.Getenv("DNSMADEEASY_API_KEY")
+ testAPISecret = os.Getenv("DNSMADEEASY_API_SECRET")
+ testDomain = os.Getenv("DNSMADEEASY_DOMAIN")
+ os.Setenv("DNSMADEEASY_SANDBOX", "true")
+ testLive = len(testAPIKey) > 0 && len(testAPISecret) > 0
+}
+
+func TestPresentAndCleanup(t *testing.T) {
+ if !testLive {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProvider()
+
+ err = provider.Present(testDomain, "", "123d==")
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(testDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go
new file mode 100644
index 000000000..384bc850c
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go
@@ -0,0 +1,274 @@
+// Package dyn implements a DNS provider for solving the DNS-01 challenge
+// using Dyn Managed DNS.
+package dyn
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+var dynBaseURL = "https://api.dynect.net/REST"
+
+type dynResponse struct {
+ // One of 'success', 'failure', or 'incomplete'
+ Status string `json:"status"`
+
+ // The structure containing the actual results of the request
+ Data json.RawMessage `json:"data"`
+
+ // The ID of the job that was created in response to a request.
+ JobID int `json:"job_id"`
+
+ // A list of zero or more messages
+ Messages json.RawMessage `json:"msgs"`
+}
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses
+// Dyn's Managed DNS API to manage TXT records for a domain.
+type DNSProvider struct {
+ customerName string
+ userName string
+ password string
+ token string
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for Dyn DNS.
+// Credentials must be passed in the environment variables: DYN_CUSTOMER_NAME,
+// DYN_USER_NAME and DYN_PASSWORD.
+func NewDNSProvider() (*DNSProvider, error) {
+ customerName := os.Getenv("DYN_CUSTOMER_NAME")
+ userName := os.Getenv("DYN_USER_NAME")
+ password := os.Getenv("DYN_PASSWORD")
+ return NewDNSProviderCredentials(customerName, userName, password)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for Dyn DNS.
+func NewDNSProviderCredentials(customerName, userName, password string) (*DNSProvider, error) {
+ if customerName == "" || userName == "" || password == "" {
+ return nil, fmt.Errorf("DynDNS credentials missing")
+ }
+
+ return &DNSProvider{
+ customerName: customerName,
+ userName: userName,
+ password: password,
+ }, nil
+}
+
+func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*dynResponse, error) {
+ url := fmt.Sprintf("%s/%s", dynBaseURL, resource)
+
+ body, err := json.Marshal(payload)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest(method, url, bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ if len(d.token) > 0 {
+ req.Header.Set("Auth-Token", d.token)
+ }
+
+ client := &http.Client{Timeout: time.Duration(10 * time.Second)}
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("Dyn API request failed with HTTP status code %d", resp.StatusCode)
+ } else if resp.StatusCode == 307 {
+ // TODO add support for HTTP 307 response and long running jobs
+ return nil, fmt.Errorf("Dyn API request returned HTTP 307. This is currently unsupported")
+ }
+
+ var dynRes dynResponse
+ err = json.NewDecoder(resp.Body).Decode(&dynRes)
+ if err != nil {
+ return nil, err
+ }
+
+ if dynRes.Status == "failure" {
+ // TODO add better error handling
+ return nil, fmt.Errorf("Dyn API request failed: %s", dynRes.Messages)
+ }
+
+ return &dynRes, nil
+}
+
+// Starts a new Dyn API Session. Authenticates using customerName, userName,
+// password and receives a token to be used in for subsequent requests.
+func (d *DNSProvider) login() error {
+ type creds struct {
+ Customer string `json:"customer_name"`
+ User string `json:"user_name"`
+ Pass string `json:"password"`
+ }
+
+ type session struct {
+ Token string `json:"token"`
+ Version string `json:"version"`
+ }
+
+ payload := &creds{Customer: d.customerName, User: d.userName, Pass: d.password}
+ dynRes, err := d.sendRequest("POST", "Session", payload)
+ if err != nil {
+ return err
+ }
+
+ var s session
+ err = json.Unmarshal(dynRes.Data, &s)
+ if err != nil {
+ return err
+ }
+
+ d.token = s.Token
+
+ return nil
+}
+
+// Destroys Dyn Session
+func (d *DNSProvider) logout() error {
+ if len(d.token) == 0 {
+ // nothing to do
+ return nil
+ }
+
+ url := fmt.Sprintf("%s/Session", dynBaseURL)
+ req, err := http.NewRequest("DELETE", url, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Auth-Token", d.token)
+
+ client := &http.Client{Timeout: time.Duration(10 * time.Second)}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("Dyn API request failed to delete session with HTTP status code %d", resp.StatusCode)
+ }
+
+ d.token = ""
+
+ return nil
+}
+
+// Present creates a TXT record using the specified parameters
+func (d *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ err = d.login()
+ if err != nil {
+ return err
+ }
+
+ data := map[string]interface{}{
+ "rdata": map[string]string{
+ "txtdata": value,
+ },
+ "ttl": strconv.Itoa(ttl),
+ }
+
+ resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn)
+ _, err = d.sendRequest("POST", resource, data)
+ if err != nil {
+ return err
+ }
+
+ err = d.publish(authZone, "Added TXT record for ACME dns-01 challenge using lego client")
+ if err != nil {
+ return err
+ }
+
+ err = d.logout()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *DNSProvider) publish(zone, notes string) error {
+ type publish struct {
+ Publish bool `json:"publish"`
+ Notes string `json:"notes"`
+ }
+
+ pub := &publish{Publish: true, Notes: notes}
+ resource := fmt.Sprintf("Zone/%s/", zone)
+ _, err := d.sendRequest("PUT", resource, pub)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return err
+ }
+
+ err = d.login()
+ if err != nil {
+ return err
+ }
+
+ resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn)
+ url := fmt.Sprintf("%s/%s", dynBaseURL, resource)
+ req, err := http.NewRequest("DELETE", url, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Auth-Token", d.token)
+
+ client := &http.Client{Timeout: time.Duration(10 * time.Second)}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("Dyn API request failed to delete TXT record HTTP status code %d", resp.StatusCode)
+ }
+
+ err = d.publish(authZone, "Removed TXT record for ACME dns-01 challenge using lego client")
+ if err != nil {
+ return err
+ }
+
+ err = d.logout()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go
new file mode 100644
index 000000000..0d28d5d0e
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn_test.go
@@ -0,0 +1,53 @@
+package dyn
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ dynLiveTest bool
+ dynCustomerName string
+ dynUserName string
+ dynPassword string
+ dynDomain string
+)
+
+func init() {
+ dynCustomerName = os.Getenv("DYN_CUSTOMER_NAME")
+ dynUserName = os.Getenv("DYN_USER_NAME")
+ dynPassword = os.Getenv("DYN_PASSWORD")
+ dynDomain = os.Getenv("DYN_DOMAIN")
+ if len(dynCustomerName) > 0 && len(dynUserName) > 0 && len(dynPassword) > 0 && len(dynDomain) > 0 {
+ dynLiveTest = true
+ }
+}
+
+func TestLiveDynPresent(t *testing.T) {
+ if !dynLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.Present(dynDomain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestLiveDynCleanUp(t *testing.T) {
+ if !dynLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 1)
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(dynDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go
new file mode 100644
index 000000000..422b02a21
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go
@@ -0,0 +1,472 @@
+// Package gandi implements a DNS provider for solving the DNS-01
+// challenge using Gandi DNS.
+package gandi
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// Gandi API reference: http://doc.rpc.gandi.net/index.html
+// Gandi API domain examples: http://doc.rpc.gandi.net/domain/faq.html
+
+var (
+ // endpoint is the Gandi XML-RPC endpoint used by Present and
+ // CleanUp. It is overridden during tests.
+ endpoint = "https://rpc.gandi.net/xmlrpc/"
+ // findZoneByFqdn determines the DNS zone of an fqdn. It is overridden
+ // during tests.
+ findZoneByFqdn = acme.FindZoneByFqdn
+)
+
+// inProgressInfo contains information about an in-progress challenge
+type inProgressInfo struct {
+ zoneID int // zoneID of gandi zone to restore in CleanUp
+ newZoneID int // zoneID of temporary gandi zone containing TXT record
+ authZone string // the domain name registered at gandi with trailing "."
+}
+
+// DNSProvider is an implementation of the
+// acme.ChallengeProviderTimeout interface that uses Gandi's XML-RPC
+// API to manage TXT records for a domain.
+type DNSProvider struct {
+ apiKey string
+ inProgressFQDNs map[string]inProgressInfo
+ inProgressAuthZones map[string]struct{}
+ inProgressMu sync.Mutex
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for Gandi.
+// Credentials must be passed in the environment variable: GANDI_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ apiKey := os.Getenv("GANDI_API_KEY")
+ return NewDNSProviderCredentials(apiKey)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for Gandi.
+func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) {
+ if apiKey == "" {
+ return nil, fmt.Errorf("No Gandi API Key given")
+ }
+ return &DNSProvider{
+ apiKey: apiKey,
+ inProgressFQDNs: make(map[string]inProgressInfo),
+ inProgressAuthZones: make(map[string]struct{}),
+ }, nil
+}
+
+// Present creates a TXT record using the specified parameters. It
+// does this by creating and activating a new temporary Gandi DNS
+// zone. This new zone contains the TXT record.
+func (d *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+ if ttl < 300 {
+ ttl = 300 // 300 is gandi minimum value for ttl
+ }
+ // find authZone and Gandi zone_id for fqdn
+ authZone, err := findZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return fmt.Errorf("Gandi DNS: findZoneByFqdn failure: %v", err)
+ }
+ zoneID, err := d.getZoneID(authZone)
+ if err != nil {
+ return err
+ }
+ // determine name of TXT record
+ if !strings.HasSuffix(
+ strings.ToLower(fqdn), strings.ToLower("."+authZone)) {
+ return fmt.Errorf(
+ "Gandi DNS: unexpected authZone %s for fqdn %s", authZone, fqdn)
+ }
+ name := fqdn[:len(fqdn)-len("."+authZone)]
+ // acquire lock and check there is not a challenge already in
+ // progress for this value of authZone
+ d.inProgressMu.Lock()
+ defer d.inProgressMu.Unlock()
+ if _, ok := d.inProgressAuthZones[authZone]; ok {
+ return fmt.Errorf(
+ "Gandi DNS: challenge already in progress for authZone %s",
+ authZone)
+ }
+ // perform API actions to create and activate new gandi zone
+ // containing the required TXT record
+ newZoneName := fmt.Sprintf(
+ "%s [ACME Challenge %s]",
+ acme.UnFqdn(authZone), time.Now().Format(time.RFC822Z))
+ newZoneID, err := d.cloneZone(zoneID, newZoneName)
+ if err != nil {
+ return err
+ }
+ newZoneVersion, err := d.newZoneVersion(newZoneID)
+ if err != nil {
+ return err
+ }
+ err = d.addTXTRecord(newZoneID, newZoneVersion, name, value, ttl)
+ if err != nil {
+ return err
+ }
+ err = d.setZoneVersion(newZoneID, newZoneVersion)
+ if err != nil {
+ return err
+ }
+ err = d.setZone(authZone, newZoneID)
+ if err != nil {
+ return err
+ }
+ // save data necessary for CleanUp
+ d.inProgressFQDNs[fqdn] = inProgressInfo{
+ zoneID: zoneID,
+ newZoneID: newZoneID,
+ authZone: authZone,
+ }
+ d.inProgressAuthZones[authZone] = struct{}{}
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified
+// parameters. It does this by restoring the old Gandi DNS zone and
+// removing the temporary one created by Present.
+func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+ // acquire lock and retrieve zoneID, newZoneID and authZone
+ d.inProgressMu.Lock()
+ defer d.inProgressMu.Unlock()
+ if _, ok := d.inProgressFQDNs[fqdn]; !ok {
+ // if there is no cleanup information then just return
+ return nil
+ }
+ zoneID := d.inProgressFQDNs[fqdn].zoneID
+ newZoneID := d.inProgressFQDNs[fqdn].newZoneID
+ authZone := d.inProgressFQDNs[fqdn].authZone
+ delete(d.inProgressFQDNs, fqdn)
+ delete(d.inProgressAuthZones, authZone)
+ // perform API actions to restore old gandi zone for authZone
+ err := d.setZone(authZone, zoneID)
+ if err != nil {
+ return err
+ }
+ err = d.deleteZone(newZoneID)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Timeout returns the values (40*time.Minute, 60*time.Second) which
+// are used by the acme package as timeout and check interval values
+// when checking for DNS record propagation with Gandi.
+func (d *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ return 40 * time.Minute, 60 * time.Second
+}
+
+// types for XML-RPC method calls and parameters
+
+type param interface {
+ param()
+}
+type paramString struct {
+ XMLName xml.Name `xml:"param"`
+ Value string `xml:"value>string"`
+}
+type paramInt struct {
+ XMLName xml.Name `xml:"param"`
+ Value int `xml:"value>int"`
+}
+
+type structMember interface {
+ structMember()
+}
+type structMemberString struct {
+ Name string `xml:"name"`
+ Value string `xml:"value>string"`
+}
+type structMemberInt struct {
+ Name string `xml:"name"`
+ Value int `xml:"value>int"`
+}
+type paramStruct struct {
+ XMLName xml.Name `xml:"param"`
+ StructMembers []structMember `xml:"value>struct>member"`
+}
+
+func (p paramString) param() {}
+func (p paramInt) param() {}
+func (m structMemberString) structMember() {}
+func (m structMemberInt) structMember() {}
+func (p paramStruct) param() {}
+
+type methodCall struct {
+ XMLName xml.Name `xml:"methodCall"`
+ MethodName string `xml:"methodName"`
+ Params []param `xml:"params"`
+}
+
+// types for XML-RPC responses
+
+type response interface {
+ faultCode() int
+ faultString() string
+}
+
+type responseFault struct {
+ FaultCode int `xml:"fault>value>struct>member>value>int"`
+ FaultString string `xml:"fault>value>struct>member>value>string"`
+}
+
+func (r responseFault) faultCode() int { return r.FaultCode }
+func (r responseFault) faultString() string { return r.FaultString }
+
+type responseStruct struct {
+ responseFault
+ StructMembers []struct {
+ Name string `xml:"name"`
+ ValueInt int `xml:"value>int"`
+ } `xml:"params>param>value>struct>member"`
+}
+
+type responseInt struct {
+ responseFault
+ Value int `xml:"params>param>value>int"`
+}
+
+type responseBool struct {
+ responseFault
+ Value bool `xml:"params>param>value>boolean"`
+}
+
+// POSTing/Marshalling/Unmarshalling
+
+type rpcError struct {
+ faultCode int
+ faultString string
+}
+
+func (e rpcError) Error() string {
+ return fmt.Sprintf(
+ "Gandi DNS: RPC Error: (%d) %s", e.faultCode, e.faultString)
+}
+
+func httpPost(url string, bodyType string, body io.Reader) ([]byte, error) {
+ client := http.Client{Timeout: 60 * time.Second}
+ resp, err := client.Post(url, bodyType, body)
+ if err != nil {
+ return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err)
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err)
+ }
+ return b, nil
+}
+
+// rpcCall makes an XML-RPC call to Gandi's RPC endpoint by
+// marshalling the data given in the call argument to XML and sending
+// that via HTTP Post to Gandi. The response is then unmarshalled into
+// the resp argument.
+func rpcCall(call *methodCall, resp response) error {
+ // marshal
+ b, err := xml.MarshalIndent(call, "", " ")
+ if err != nil {
+ return fmt.Errorf("Gandi DNS: Marshal Error: %v", err)
+ }
+ // post
+ b = append([]byte(`<?xml version="1.0"?>`+"\n"), b...)
+ respBody, err := httpPost(endpoint, "text/xml", bytes.NewReader(b))
+ if err != nil {
+ return err
+ }
+ // unmarshal
+ err = xml.Unmarshal(respBody, resp)
+ if err != nil {
+ return fmt.Errorf("Gandi DNS: Unmarshal Error: %v", err)
+ }
+ if resp.faultCode() != 0 {
+ return rpcError{
+ faultCode: resp.faultCode(), faultString: resp.faultString()}
+ }
+ return nil
+}
+
+// functions to perform API actions
+
+func (d *DNSProvider) getZoneID(domain string) (int, error) {
+ resp := &responseStruct{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.info",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramString{Value: domain},
+ },
+ }, resp)
+ if err != nil {
+ return 0, err
+ }
+ var zoneID int
+ for _, member := range resp.StructMembers {
+ if member.Name == "zone_id" {
+ zoneID = member.ValueInt
+ }
+ }
+ if zoneID == 0 {
+ return 0, fmt.Errorf(
+ "Gandi DNS: Could not determine zone_id for %s", domain)
+ }
+ return zoneID, nil
+}
+
+func (d *DNSProvider) cloneZone(zoneID int, name string) (int, error) {
+ resp := &responseStruct{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.clone",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramInt{Value: zoneID},
+ paramInt{Value: 0},
+ paramStruct{
+ StructMembers: []structMember{
+ structMemberString{
+ Name: "name",
+ Value: name,
+ }},
+ },
+ },
+ }, resp)
+ if err != nil {
+ return 0, err
+ }
+ var newZoneID int
+ for _, member := range resp.StructMembers {
+ if member.Name == "id" {
+ newZoneID = member.ValueInt
+ }
+ }
+ if newZoneID == 0 {
+ return 0, fmt.Errorf("Gandi DNS: Could not determine cloned zone_id")
+ }
+ return newZoneID, nil
+}
+
+func (d *DNSProvider) newZoneVersion(zoneID int) (int, error) {
+ resp := &responseInt{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.version.new",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramInt{Value: zoneID},
+ },
+ }, resp)
+ if err != nil {
+ return 0, err
+ }
+ if resp.Value == 0 {
+ return 0, fmt.Errorf("Gandi DNS: Could not create new zone version")
+ }
+ return resp.Value, nil
+}
+
+func (d *DNSProvider) addTXTRecord(zoneID int, version int, name string, value string, ttl int) error {
+ resp := &responseStruct{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.record.add",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramInt{Value: zoneID},
+ paramInt{Value: version},
+ paramStruct{
+ StructMembers: []structMember{
+ structMemberString{
+ Name: "type",
+ Value: "TXT",
+ }, structMemberString{
+ Name: "name",
+ Value: name,
+ }, structMemberString{
+ Name: "value",
+ Value: value,
+ }, structMemberInt{
+ Name: "ttl",
+ Value: ttl,
+ }},
+ },
+ },
+ }, resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (d *DNSProvider) setZoneVersion(zoneID int, version int) error {
+ resp := &responseBool{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.version.set",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramInt{Value: zoneID},
+ paramInt{Value: version},
+ },
+ }, resp)
+ if err != nil {
+ return err
+ }
+ if !resp.Value {
+ return fmt.Errorf("Gandi DNS: could not set zone version")
+ }
+ return nil
+}
+
+func (d *DNSProvider) setZone(domain string, zoneID int) error {
+ resp := &responseStruct{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.set",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramString{Value: domain},
+ paramInt{Value: zoneID},
+ },
+ }, resp)
+ if err != nil {
+ return err
+ }
+ var respZoneID int
+ for _, member := range resp.StructMembers {
+ if member.Name == "zone_id" {
+ respZoneID = member.ValueInt
+ }
+ }
+ if respZoneID != zoneID {
+ return fmt.Errorf(
+ "Gandi DNS: Could not set new zone_id for %s", domain)
+ }
+ return nil
+}
+
+func (d *DNSProvider) deleteZone(zoneID int) error {
+ resp := &responseBool{}
+ err := rpcCall(&methodCall{
+ MethodName: "domain.zone.delete",
+ Params: []param{
+ paramString{Value: d.apiKey},
+ paramInt{Value: zoneID},
+ },
+ }, resp)
+ if err != nil {
+ return err
+ }
+ if !resp.Value {
+ return fmt.Errorf("Gandi DNS: could not delete zone_id")
+ }
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go
new file mode 100644
index 000000000..15919e2eb
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi_test.go
@@ -0,0 +1,939 @@
+package gandi
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// stagingServer is the Let's Encrypt staging server used by the live test
+const stagingServer = "https://acme-staging.api.letsencrypt.org/directory"
+
+// user implements acme.User and is used by the live test
+type user struct {
+ Email string
+ Registration *acme.RegistrationResource
+ key crypto.PrivateKey
+}
+
+func (u *user) GetEmail() string {
+ return u.Email
+}
+func (u *user) GetRegistration() *acme.RegistrationResource {
+ return u.Registration
+}
+func (u *user) GetPrivateKey() crypto.PrivateKey {
+ return u.key
+}
+
+// TestDNSProvider runs Present and CleanUp against a fake Gandi RPC
+// Server, whose responses are predetermined for particular requests.
+func TestDNSProvider(t *testing.T) {
+ fakeAPIKey := "123412341234123412341234"
+ fakeKeyAuth := "XXXX"
+ provider, err := NewDNSProviderCredentials(fakeAPIKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ regexpDate, err := regexp.Compile(`\[ACME Challenge [^\]:]*:[^\]]*\]`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // start fake RPC server
+ fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Content-Type") != "text/xml" {
+ t.Fatalf("Content-Type: text/xml header not found")
+ }
+ req, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req = regexpDate.ReplaceAllLiteral(
+ req, []byte(`[ACME Challenge 01 Jan 16 00:00 +0000]`))
+ resp, ok := serverResponses[string(req)]
+ if !ok {
+ t.Fatalf("Server response for request not found")
+ }
+ _, err = io.Copy(w, strings.NewReader(resp))
+ if err != nil {
+ t.Fatal(err)
+ }
+ }))
+ defer fakeServer.Close()
+ // define function to override findZoneByFqdn with
+ fakeFindZoneByFqdn := func(fqdn string, nameserver []string) (string, error) {
+ return "example.com.", nil
+ }
+ // override gandi endpoint and findZoneByFqdn function
+ savedEndpoint, savedFindZoneByFqdn := endpoint, findZoneByFqdn
+ defer func() {
+ endpoint, findZoneByFqdn = savedEndpoint, savedFindZoneByFqdn
+ }()
+ endpoint, findZoneByFqdn = fakeServer.URL+"/", fakeFindZoneByFqdn
+ // run Present
+ err = provider.Present("abc.def.example.com", "", fakeKeyAuth)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // run CleanUp
+ err = provider.CleanUp("abc.def.example.com", "", fakeKeyAuth)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestDNSProviderLive performs a live test to obtain a certificate
+// using the Let's Encrypt staging server. It runs provided that both
+// the environment variables GANDI_API_KEY and GANDI_TEST_DOMAIN are
+// set. Otherwise the test is skipped.
+//
+// To complete this test, go test must be run with the -timeout=40m
+// flag, since the default timeout of 10m is insufficient.
+func TestDNSProviderLive(t *testing.T) {
+ apiKey := os.Getenv("GANDI_API_KEY")
+ domain := os.Getenv("GANDI_TEST_DOMAIN")
+ if apiKey == "" || domain == "" {
+ t.Skip("skipping live test")
+ }
+ // create a user.
+ const rsaKeySize = 2048
+ privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize)
+ if err != nil {
+ t.Fatal(err)
+ }
+ myUser := user{
+ Email: "test@example.com",
+ key: privateKey,
+ }
+ // create a client using staging server
+ client, err := acme.NewClient(stagingServer, &myUser, acme.RSA2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ provider, err := NewDNSProviderCredentials(apiKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = client.SetChallengeProvider(acme.DNS01, provider)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01})
+ // register and agree tos
+ reg, err := client.Register()
+ if err != nil {
+ t.Fatal(err)
+ }
+ myUser.Registration = reg
+ err = client.AgreeToTOS()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // complete the challenge
+ bundle := false
+ _, failures := client.ObtainCertificate([]string{domain}, bundle, nil)
+ if len(failures) > 0 {
+ t.Fatal(failures)
+ }
+}
+
+// serverResponses is the XML-RPC Request->Response map used by the
+// fake RPC server. It was generated by recording a real RPC session
+// which resulted in the successful issue of a cert, and then
+// anonymizing the RPC data.
+var serverResponses = map[string]string{
+ // Present Request->Response 1 (getZoneID)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.info</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <string>example.com.</string>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>date_updated</name>
+<value><dateTime.iso8601>20160216T16:14:23</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_delete</name>
+<value><dateTime.iso8601>20170331T16:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>is_premium</name>
+<value><boolean>0</boolean></value>
+</member>
+<member>
+<name>date_hold_begin</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_registry_end</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>authinfo_expiration_date</name>
+<value><dateTime.iso8601>20161211T21:31:20</dateTime.iso8601></value>
+</member>
+<member>
+<name>contacts</name>
+<value><struct>
+<member>
+<name>owner</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>admin</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>bill</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>tech</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>reseller</name>
+<value><nil/></value></member>
+</struct></value>
+</member>
+<member>
+<name>nameservers</name>
+<value><array><data>
+<value><string>a.dns.gandi.net</string></value>
+<value><string>b.dns.gandi.net</string></value>
+<value><string>c.dns.gandi.net</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_restore_end</name>
+<value><dateTime.iso8601>20170501T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>id</name>
+<value><int>2222222</int></value>
+</member>
+<member>
+<name>authinfo</name>
+<value><string>ABCDABCDAB</string></value>
+</member>
+<member>
+<name>status</name>
+<value><array><data>
+<value><string>clientTransferProhibited</string></value>
+<value><string>serverTransferProhibited</string></value>
+</data></array></value>
+</member>
+<member>
+<name>tags</name>
+<value><array><data>
+</data></array></value>
+</member>
+<member>
+<name>date_hold_end</name>
+<value><dateTime.iso8601>20170401T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>services</name>
+<value><array><data>
+<value><string>gandidns</string></value>
+<value><string>gandimail</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_pending_delete_end</name>
+<value><dateTime.iso8601>20170506T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>zone_id</name>
+<value><int>1234567</int></value>
+</member>
+<member>
+<name>date_renew_begin</name>
+<value><dateTime.iso8601>20120101T00:00:00</dateTime.iso8601></value>
+</member>
+<member>
+<name>fqdn</name>
+<value><string>example.com</string></value>
+</member>
+<member>
+<name>autorenew</name>
+<value><nil/></value></member>
+<member>
+<name>date_registry_creation</name>
+<value><dateTime.iso8601>20150215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>tld</name>
+<value><string>org</string></value>
+</member>
+<member>
+<name>date_created</name>
+<value><dateTime.iso8601>20150215T03:04:06</dateTime.iso8601></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // Present Request->Response 2 (cloneZone)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.clone</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>1234567</int>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>0</int>
+ </value>
+ </param>
+ <param>
+ <value>
+ <struct>
+ <member>
+ <name>name</name>
+ <value>
+ <string>example.com [ACME Challenge 01 Jan 16 00:00 +0000]</string>
+ </value>
+ </member>
+ </struct>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>name</name>
+<value><string>example.com [ACME Challenge 01 Jan 16 00:00 +0000]</string></value>
+</member>
+<member>
+<name>versions</name>
+<value><array><data>
+<value><int>1</int></value>
+</data></array></value>
+</member>
+<member>
+<name>date_updated</name>
+<value><dateTime.iso8601>20160216T16:24:29</dateTime.iso8601></value>
+</member>
+<member>
+<name>id</name>
+<value><int>7654321</int></value>
+</member>
+<member>
+<name>owner</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>version</name>
+<value><int>1</int></value>
+</member>
+<member>
+<name>domains</name>
+<value><int>0</int></value>
+</member>
+<member>
+<name>public</name>
+<value><boolean>0</boolean></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // Present Request->Response 3 (newZoneVersion)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.version.new</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>7654321</int>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><int>2</int></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // Present Request->Response 4 (addTXTRecord)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.record.add</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>7654321</int>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>2</int>
+ </value>
+ </param>
+ <param>
+ <value>
+ <struct>
+ <member>
+ <name>type</name>
+ <value>
+ <string>TXT</string>
+ </value>
+ </member>
+ <member>
+ <name>name</name>
+ <value>
+ <string>_acme-challenge.abc.def</string>
+ </value>
+ </member>
+ <member>
+ <name>value</name>
+ <value>
+ <string>ezRpBPY8wH8djMLYjX2uCKPwiKDkFZ1SFMJ6ZXGlHrQ</string>
+ </value>
+ </member>
+ <member>
+ <name>ttl</name>
+ <value>
+ <int>300</int>
+ </value>
+ </member>
+ </struct>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>name</name>
+<value><string>_acme-challenge.abc.def</string></value>
+</member>
+<member>
+<name>type</name>
+<value><string>TXT</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>3333333333</int></value>
+</member>
+<member>
+<name>value</name>
+<value><string>"ezRpBPY8wH8djMLYjX2uCKPwiKDkFZ1SFMJ6ZXGlHrQ"</string></value>
+</member>
+<member>
+<name>ttl</name>
+<value><int>300</int></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // Present Request->Response 5 (setZoneVersion)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.version.set</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>7654321</int>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>2</int>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><boolean>1</boolean></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // Present Request->Response 6 (setZone)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.set</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <string>example.com.</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>7654321</int>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>date_updated</name>
+<value><dateTime.iso8601>20160216T16:14:23</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_delete</name>
+<value><dateTime.iso8601>20170331T16:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>is_premium</name>
+<value><boolean>0</boolean></value>
+</member>
+<member>
+<name>date_hold_begin</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_registry_end</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>authinfo_expiration_date</name>
+<value><dateTime.iso8601>20161211T21:31:20</dateTime.iso8601></value>
+</member>
+<member>
+<name>contacts</name>
+<value><struct>
+<member>
+<name>owner</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>admin</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>bill</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>tech</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>reseller</name>
+<value><nil/></value></member>
+</struct></value>
+</member>
+<member>
+<name>nameservers</name>
+<value><array><data>
+<value><string>a.dns.gandi.net</string></value>
+<value><string>b.dns.gandi.net</string></value>
+<value><string>c.dns.gandi.net</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_restore_end</name>
+<value><dateTime.iso8601>20170501T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>id</name>
+<value><int>2222222</int></value>
+</member>
+<member>
+<name>authinfo</name>
+<value><string>ABCDABCDAB</string></value>
+</member>
+<member>
+<name>status</name>
+<value><array><data>
+<value><string>clientTransferProhibited</string></value>
+<value><string>serverTransferProhibited</string></value>
+</data></array></value>
+</member>
+<member>
+<name>tags</name>
+<value><array><data>
+</data></array></value>
+</member>
+<member>
+<name>date_hold_end</name>
+<value><dateTime.iso8601>20170401T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>services</name>
+<value><array><data>
+<value><string>gandidns</string></value>
+<value><string>gandimail</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_pending_delete_end</name>
+<value><dateTime.iso8601>20170506T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>zone_id</name>
+<value><int>7654321</int></value>
+</member>
+<member>
+<name>date_renew_begin</name>
+<value><dateTime.iso8601>20120101T00:00:00</dateTime.iso8601></value>
+</member>
+<member>
+<name>fqdn</name>
+<value><string>example.com</string></value>
+</member>
+<member>
+<name>autorenew</name>
+<value><nil/></value></member>
+<member>
+<name>date_registry_creation</name>
+<value><dateTime.iso8601>20150215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>tld</name>
+<value><string>org</string></value>
+</member>
+<member>
+<name>date_created</name>
+<value><dateTime.iso8601>20150215T03:04:06</dateTime.iso8601></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // CleanUp Request->Response 1 (setZone)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.set</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <string>example.com.</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>1234567</int>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>date_updated</name>
+<value><dateTime.iso8601>20160216T16:24:38</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_delete</name>
+<value><dateTime.iso8601>20170331T16:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>is_premium</name>
+<value><boolean>0</boolean></value>
+</member>
+<member>
+<name>date_hold_begin</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>date_registry_end</name>
+<value><dateTime.iso8601>20170215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>authinfo_expiration_date</name>
+<value><dateTime.iso8601>20161211T21:31:20</dateTime.iso8601></value>
+</member>
+<member>
+<name>contacts</name>
+<value><struct>
+<member>
+<name>owner</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>admin</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>bill</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>tech</name>
+<value><struct>
+<member>
+<name>handle</name>
+<value><string>LEGO-GANDI</string></value>
+</member>
+<member>
+<name>id</name>
+<value><int>111111</int></value>
+</member>
+</struct></value>
+</member>
+<member>
+<name>reseller</name>
+<value><nil/></value></member>
+</struct></value>
+</member>
+<member>
+<name>nameservers</name>
+<value><array><data>
+<value><string>a.dns.gandi.net</string></value>
+<value><string>b.dns.gandi.net</string></value>
+<value><string>c.dns.gandi.net</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_restore_end</name>
+<value><dateTime.iso8601>20170501T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>id</name>
+<value><int>2222222</int></value>
+</member>
+<member>
+<name>authinfo</name>
+<value><string>ABCDABCDAB</string></value>
+</member>
+<member>
+<name>status</name>
+<value><array><data>
+<value><string>clientTransferProhibited</string></value>
+<value><string>serverTransferProhibited</string></value>
+</data></array></value>
+</member>
+<member>
+<name>tags</name>
+<value><array><data>
+</data></array></value>
+</member>
+<member>
+<name>date_hold_end</name>
+<value><dateTime.iso8601>20170401T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>services</name>
+<value><array><data>
+<value><string>gandidns</string></value>
+<value><string>gandimail</string></value>
+</data></array></value>
+</member>
+<member>
+<name>date_pending_delete_end</name>
+<value><dateTime.iso8601>20170506T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>zone_id</name>
+<value><int>1234567</int></value>
+</member>
+<member>
+<name>date_renew_begin</name>
+<value><dateTime.iso8601>20120101T00:00:00</dateTime.iso8601></value>
+</member>
+<member>
+<name>fqdn</name>
+<value><string>example.com</string></value>
+</member>
+<member>
+<name>autorenew</name>
+<value><nil/></value></member>
+<member>
+<name>date_registry_creation</name>
+<value><dateTime.iso8601>20150215T02:04:06</dateTime.iso8601></value>
+</member>
+<member>
+<name>tld</name>
+<value><string>org</string></value>
+</member>
+<member>
+<name>date_created</name>
+<value><dateTime.iso8601>20150215T03:04:06</dateTime.iso8601></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+`,
+ // CleanUp Request->Response 2 (deleteZone)
+ `<?xml version="1.0"?>
+<methodCall>
+ <methodName>domain.zone.delete</methodName>
+ <param>
+ <value>
+ <string>123412341234123412341234</string>
+ </value>
+ </param>
+ <param>
+ <value>
+ <int>7654321</int>
+ </value>
+ </param>
+</methodCall>`: `<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><boolean>1</boolean></value>
+</param>
+</params>
+</methodResponse>
+`,
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go
new file mode 100644
index 000000000..b8d9951c9
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go
@@ -0,0 +1,158 @@
+// Package googlecloud implements a DNS provider for solving the DNS-01
+// challenge using Google Cloud DNS.
+package googlecloud
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/google"
+
+ "google.golang.org/api/dns/v1"
+)
+
+// DNSProvider is an implementation of the DNSProvider interface.
+type DNSProvider struct {
+ project string
+ client *dns.Service
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for Google Cloud
+// DNS. Credentials must be passed in the environment variable: GCE_PROJECT.
+func NewDNSProvider() (*DNSProvider, error) {
+ project := os.Getenv("GCE_PROJECT")
+ return NewDNSProviderCredentials(project)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for Google Cloud DNS.
+func NewDNSProviderCredentials(project string) (*DNSProvider, error) {
+ if project == "" {
+ return nil, fmt.Errorf("Google Cloud project name missing")
+ }
+
+ client, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to get Google Cloud client: %v", err)
+ }
+ svc, err := dns.New(client)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to create Google Cloud DNS service: %v", err)
+ }
+ return &DNSProvider{
+ project: project,
+ client: svc,
+ }, nil
+}
+
+// Present creates a TXT record to fulfil the dns-01 challenge.
+func (c *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+
+ zone, err := c.getHostedZone(domain)
+ if err != nil {
+ return err
+ }
+
+ rec := &dns.ResourceRecordSet{
+ Name: fqdn,
+ Rrdatas: []string{value},
+ Ttl: int64(ttl),
+ Type: "TXT",
+ }
+ change := &dns.Change{
+ Additions: []*dns.ResourceRecordSet{rec},
+ }
+
+ chg, err := c.client.Changes.Create(c.project, zone, change).Do()
+ if err != nil {
+ return err
+ }
+
+ // wait for change to be acknowledged
+ for chg.Status == "pending" {
+ time.Sleep(time.Second)
+
+ chg, err = c.client.Changes.Get(c.project, zone, chg.Id).Do()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters.
+func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ zone, err := c.getHostedZone(domain)
+ if err != nil {
+ return err
+ }
+
+ records, err := c.findTxtRecords(zone, fqdn)
+ if err != nil {
+ return err
+ }
+
+ for _, rec := range records {
+ change := &dns.Change{
+ Deletions: []*dns.ResourceRecordSet{rec},
+ }
+ _, err = c.client.Changes.Create(c.project, zone, change).Do()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Timeout customizes the timeout values used by the ACME package for checking
+// DNS record validity.
+func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ return 180 * time.Second, 5 * time.Second
+}
+
+// getHostedZone returns the managed-zone
+func (c *DNSProvider) getHostedZone(domain string) (string, error) {
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return "", err
+ }
+
+ zones, err := c.client.ManagedZones.
+ List(c.project).
+ DnsName(authZone).
+ Do()
+ if err != nil {
+ return "", fmt.Errorf("GoogleCloud API call failed: %v", err)
+ }
+
+ if len(zones.ManagedZones) == 0 {
+ return "", fmt.Errorf("No matching GoogleCloud domain found for domain %s", authZone)
+ }
+
+ return zones.ManagedZones[0].Name, nil
+}
+
+func (c *DNSProvider) findTxtRecords(zone, fqdn string) ([]*dns.ResourceRecordSet, error) {
+
+ recs, err := c.client.ResourceRecordSets.List(c.project, zone).Do()
+ if err != nil {
+ return nil, err
+ }
+
+ found := []*dns.ResourceRecordSet{}
+ for _, r := range recs.Rrsets {
+ if r.Type == "TXT" && r.Name == fqdn {
+ found = append(found, r)
+ }
+ }
+
+ return found, nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go
new file mode 100644
index 000000000..d73788163
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud_test.go
@@ -0,0 +1,85 @@
+package googlecloud
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/dns/v1"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ gcloudLiveTest bool
+ gcloudProject string
+ gcloudDomain string
+)
+
+func init() {
+ gcloudProject = os.Getenv("GCE_PROJECT")
+ gcloudDomain = os.Getenv("GCE_DOMAIN")
+ _, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope)
+ if err == nil && len(gcloudProject) > 0 && len(gcloudDomain) > 0 {
+ gcloudLiveTest = true
+ }
+}
+
+func restoreGCloudEnv() {
+ os.Setenv("GCE_PROJECT", gcloudProject)
+}
+
+func TestNewDNSProviderValid(t *testing.T) {
+ if !gcloudLiveTest {
+ t.Skip("skipping live test (requires credentials)")
+ }
+ os.Setenv("GCE_PROJECT", "")
+ _, err := NewDNSProviderCredentials("my-project")
+ assert.NoError(t, err)
+ restoreGCloudEnv()
+}
+
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ if !gcloudLiveTest {
+ t.Skip("skipping live test (requires credentials)")
+ }
+ os.Setenv("GCE_PROJECT", "my-project")
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+ restoreGCloudEnv()
+}
+
+func TestNewDNSProviderMissingCredErr(t *testing.T) {
+ os.Setenv("GCE_PROJECT", "")
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "Google Cloud project name missing")
+ restoreGCloudEnv()
+}
+
+func TestLiveGoogleCloudPresent(t *testing.T) {
+ if !gcloudLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProviderCredentials(gcloudProject)
+ assert.NoError(t, err)
+
+ err = provider.Present(gcloudDomain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestLiveGoogleCloudCleanUp(t *testing.T) {
+ if !gcloudLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 1)
+
+ provider, err := NewDNSProviderCredentials(gcloudProject)
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(gcloudDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go b/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go
new file mode 100644
index 000000000..a91d2b489
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/linode/linode.go
@@ -0,0 +1,131 @@
+// Package linode implements a DNS provider for solving the DNS-01 challenge
+// using Linode DNS.
+package linode
+
+import (
+ "errors"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/timewasted/linode/dns"
+ "github.com/xenolf/lego/acme"
+)
+
+const (
+ dnsMinTTLSecs = 300
+ dnsUpdateFreqMins = 15
+ dnsUpdateFudgeSecs = 120
+)
+
+type hostedZoneInfo struct {
+ domainId int
+ resourceName string
+}
+
+// DNSProvider implements the acme.ChallengeProvider interface.
+type DNSProvider struct {
+ linode *dns.DNS
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for Linode.
+// Credentials must be passed in the environment variable: LINODE_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ apiKey := os.Getenv("LINODE_API_KEY")
+ return NewDNSProviderCredentials(apiKey)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for Linode.
+func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) {
+ if len(apiKey) == 0 {
+ return nil, errors.New("Linode credentials missing")
+ }
+
+ return &DNSProvider{
+ linode: dns.New(apiKey),
+ }, nil
+}
+
+// Timeout returns the timeout and interval to use when checking for DNS
+// propagation. Adjusting here to cope with spikes in propagation times.
+func (p *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ // Since Linode only updates their zone files every X minutes, we need
+ // to figure out how many minutes we have to wait until we hit the next
+ // interval of X. We then wait another couple of minutes, just to be
+ // safe. Hopefully at some point during all of this, the record will
+ // have propagated throughout Linode's network.
+ minsRemaining := dnsUpdateFreqMins - (time.Now().Minute() % dnsUpdateFreqMins)
+
+ timeout = (time.Duration(minsRemaining) * time.Minute) +
+ (dnsMinTTLSecs * time.Second) +
+ (dnsUpdateFudgeSecs * time.Second)
+ interval = 15 * time.Second
+ return
+}
+
+// Present creates a TXT record using the specified parameters.
+func (p *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ zone, err := p.getHostedZoneInfo(fqdn)
+ if err != nil {
+ return err
+ }
+
+ if _, err = p.linode.CreateDomainResourceTXT(zone.domainId, acme.UnFqdn(fqdn), value, 60); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters.
+func (p *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ zone, err := p.getHostedZoneInfo(fqdn)
+ if err != nil {
+ return err
+ }
+
+ // Get all TXT records for the specified domain.
+ resources, err := p.linode.GetResourcesByType(zone.domainId, "TXT")
+ if err != nil {
+ return err
+ }
+
+ // Remove the specified resource, if it exists.
+ for _, resource := range resources {
+ if resource.Name == zone.resourceName && resource.Target == value {
+ resp, err := p.linode.DeleteDomainResource(resource.DomainID, resource.ResourceID)
+ if err != nil {
+ return err
+ }
+ if resp.ResourceID != resource.ResourceID {
+ return errors.New("Error deleting resource: resource IDs do not match!")
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+func (p *DNSProvider) getHostedZoneInfo(fqdn string) (*hostedZoneInfo, error) {
+ // Lookup the zone that handles the specified FQDN.
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return nil, err
+ }
+ resourceName := strings.TrimSuffix(fqdn, "."+authZone)
+
+ // Query the authority zone.
+ domain, err := p.linode.GetDomain(acme.UnFqdn(authZone))
+ if err != nil {
+ return nil, err
+ }
+
+ return &hostedZoneInfo{
+ domainId: domain.DomainID,
+ resourceName: resourceName,
+ }, nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go b/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go
new file mode 100644
index 000000000..d9713a275
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/linode/linode_test.go
@@ -0,0 +1,317 @@
+package linode
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/timewasted/linode"
+ "github.com/timewasted/linode/dns"
+)
+
+type (
+ LinodeResponse struct {
+ Action string `json:"ACTION"`
+ Data interface{} `json:"DATA"`
+ Errors []linode.ResponseError `json:"ERRORARRAY"`
+ }
+ MockResponse struct {
+ Response interface{}
+ Errors []linode.ResponseError
+ }
+ MockResponseMap map[string]MockResponse
+)
+
+var (
+ apiKey string
+ isTestLive bool
+)
+
+func init() {
+ apiKey = os.Getenv("LINODE_API_KEY")
+ isTestLive = len(apiKey) != 0
+}
+
+func restoreEnv() {
+ os.Setenv("LINODE_API_KEY", apiKey)
+}
+
+func newMockServer(t *testing.T, responses MockResponseMap) *httptest.Server {
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Ensure that we support the requested action.
+ action := r.URL.Query().Get("api_action")
+ resp, ok := responses[action]
+ if !ok {
+ msg := fmt.Sprintf("Unsupported mock action: %s", action)
+ require.FailNow(t, msg)
+ }
+
+ // Build the response that the server will return.
+ linodeResponse := LinodeResponse{
+ Action: action,
+ Data: resp.Response,
+ Errors: resp.Errors,
+ }
+ rawResponse, err := json.Marshal(linodeResponse)
+ if err != nil {
+ msg := fmt.Sprintf("Failed to JSON encode response: %v", err)
+ require.FailNow(t, msg)
+ }
+
+ // Send the response.
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write(rawResponse)
+ }))
+
+ time.Sleep(100 * time.Millisecond)
+ return srv
+}
+
+func TestNewDNSProviderWithEnv(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+}
+
+func TestNewDNSProviderWithoutEnv(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "Linode credentials missing")
+}
+
+func TestNewDNSProviderCredentialsWithKey(t *testing.T) {
+ _, err := NewDNSProviderCredentials("testing")
+ assert.NoError(t, err)
+}
+
+func TestNewDNSProviderCredentialsWithoutKey(t *testing.T) {
+ _, err := NewDNSProviderCredentials("")
+ assert.EqualError(t, err, "Linode credentials missing")
+}
+
+func TestDNSProvider_Present(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: domain,
+ DomainID: 1234,
+ },
+ },
+ },
+ "domain.resource.create": MockResponse{
+ Response: dns.ResourceResponse{
+ ResourceID: 1234,
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.Present(domain, "", keyAuth)
+ assert.NoError(t, err)
+}
+
+func TestDNSProvider_PresentNoDomain(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: "foobar.com",
+ DomainID: 1234,
+ },
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.Present(domain, "", keyAuth)
+ assert.EqualError(t, err, "dns: requested domain not found")
+}
+
+func TestDNSProvider_PresentCreateFailed(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: domain,
+ DomainID: 1234,
+ },
+ },
+ },
+ "domain.resource.create": MockResponse{
+ Response: nil,
+ Errors: []linode.ResponseError{
+ linode.ResponseError{
+ Code: 1234,
+ Message: "Failed to create domain resource",
+ },
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.Present(domain, "", keyAuth)
+ assert.EqualError(t, err, "Failed to create domain resource")
+}
+
+func TestDNSProvider_PresentLive(t *testing.T) {
+ if !isTestLive {
+ t.Skip("Skipping live test")
+ }
+}
+
+func TestDNSProvider_CleanUp(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: domain,
+ DomainID: 1234,
+ },
+ },
+ },
+ "domain.resource.list": MockResponse{
+ Response: []dns.Resource{
+ dns.Resource{
+ DomainID: 1234,
+ Name: "_acme-challenge",
+ ResourceID: 1234,
+ Target: "ElbOJKOkFWiZLQeoxf-wb3IpOsQCdvoM0y_wn0TEkxM",
+ Type: "TXT",
+ },
+ },
+ },
+ "domain.resource.delete": MockResponse{
+ Response: dns.ResourceResponse{
+ ResourceID: 1234,
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.CleanUp(domain, "", keyAuth)
+ assert.NoError(t, err)
+}
+
+func TestDNSProvider_CleanUpNoDomain(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: "foobar.com",
+ DomainID: 1234,
+ },
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.CleanUp(domain, "", keyAuth)
+ assert.EqualError(t, err, "dns: requested domain not found")
+}
+
+func TestDNSProvider_CleanUpDeleteFailed(t *testing.T) {
+ os.Setenv("LINODE_API_KEY", "testing")
+ defer restoreEnv()
+ p, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ domain := "example.com"
+ keyAuth := "dGVzdGluZw=="
+ mockResponses := MockResponseMap{
+ "domain.list": MockResponse{
+ Response: []dns.Domain{
+ dns.Domain{
+ Domain: domain,
+ DomainID: 1234,
+ },
+ },
+ },
+ "domain.resource.list": MockResponse{
+ Response: []dns.Resource{
+ dns.Resource{
+ DomainID: 1234,
+ Name: "_acme-challenge",
+ ResourceID: 1234,
+ Target: "ElbOJKOkFWiZLQeoxf-wb3IpOsQCdvoM0y_wn0TEkxM",
+ Type: "TXT",
+ },
+ },
+ },
+ "domain.resource.delete": MockResponse{
+ Response: nil,
+ Errors: []linode.ResponseError{
+ linode.ResponseError{
+ Code: 1234,
+ Message: "Failed to delete domain resource",
+ },
+ },
+ },
+ }
+ mockSrv := newMockServer(t, mockResponses)
+ defer mockSrv.Close()
+ p.linode.ToLinode().SetEndpoint(mockSrv.URL)
+
+ err = p.CleanUp(domain, "", keyAuth)
+ assert.EqualError(t, err, "Failed to delete domain resource")
+}
+
+func TestDNSProvider_CleanUpLive(t *testing.T) {
+ if !isTestLive {
+ t.Skip("Skipping live test")
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go
new file mode 100644
index 000000000..d7eb40935
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go
@@ -0,0 +1,416 @@
+// Package namecheap implements a DNS provider for solving the DNS-01
+// challenge using namecheap DNS.
+package namecheap
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// Notes about namecheap's tool API:
+// 1. Using the API requires registration. Once registered, use your account
+// name and API key to access the API.
+// 2. There is no API to add or modify a single DNS record. Instead you must
+// read the entire list of records, make modifications, and then write the
+// entire updated list of records. (Yuck.)
+// 3. Namecheap's DNS updates can be slow to propagate. I've seen them take
+// as long as an hour.
+// 4. Namecheap requires you to whitelist the IP address from which you call
+// its APIs. It also requires all API calls to include the whitelisted IP
+// address as a form or query string value. This code uses a namecheap
+// service to query the client's IP address.
+
+var (
+ debug = false
+ defaultBaseURL = "https://api.namecheap.com/xml.response"
+ getIPURL = "https://dynamicdns.park-your-domain.com/getip"
+ httpClient = http.Client{Timeout: 60 * time.Second}
+)
+
+// DNSProvider is an implementation of the ChallengeProviderTimeout interface
+// that uses Namecheap's tool API to manage TXT records for a domain.
+type DNSProvider struct {
+ baseURL string
+ apiUser string
+ apiKey string
+ clientIP string
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for namecheap.
+// Credentials must be passed in the environment variables: NAMECHEAP_API_USER
+// and NAMECHEAP_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ apiUser := os.Getenv("NAMECHEAP_API_USER")
+ apiKey := os.Getenv("NAMECHEAP_API_KEY")
+ return NewDNSProviderCredentials(apiUser, apiKey)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for namecheap.
+func NewDNSProviderCredentials(apiUser, apiKey string) (*DNSProvider, error) {
+ if apiUser == "" || apiKey == "" {
+ return nil, fmt.Errorf("Namecheap credentials missing")
+ }
+
+ clientIP, err := getClientIP()
+ if err != nil {
+ return nil, err
+ }
+
+ return &DNSProvider{
+ baseURL: defaultBaseURL,
+ apiUser: apiUser,
+ apiKey: apiKey,
+ clientIP: clientIP,
+ }, nil
+}
+
+// Timeout returns the timeout and interval to use when checking for DNS
+// propagation. Namecheap can sometimes take a long time to complete an
+// update, so wait up to 60 minutes for the update to propagate.
+func (d *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ return 60 * time.Minute, 15 * time.Second
+}
+
+// host describes a DNS record returned by the Namecheap DNS gethosts API.
+// Namecheap uses the term "host" to refer to all DNS records that include
+// a host field (A, AAAA, CNAME, NS, TXT, URL).
+type host struct {
+ Type string `xml:",attr"`
+ Name string `xml:",attr"`
+ Address string `xml:",attr"`
+ MXPref string `xml:",attr"`
+ TTL string `xml:",attr"`
+}
+
+// apierror describes an error record in a namecheap API response.
+type apierror struct {
+ Number int `xml:",attr"`
+ Description string `xml:",innerxml"`
+}
+
+// getClientIP returns the client's public IP address. It uses namecheap's
+// IP discovery service to perform the lookup.
+func getClientIP() (addr string, err error) {
+ resp, err := httpClient.Get(getIPURL)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ clientIP, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ if debug {
+ fmt.Println("Client IP:", string(clientIP))
+ }
+ return string(clientIP), nil
+}
+
+// A challenge repesents all the data needed to specify a dns-01 challenge
+// to lets-encrypt.
+type challenge struct {
+ domain string
+ key string
+ keyFqdn string
+ keyValue string
+ tld string
+ sld string
+ host string
+}
+
+// newChallenge builds a challenge record from a domain name, a challenge
+// authentication key, and a map of available TLDs.
+func newChallenge(domain, keyAuth string, tlds map[string]string) (*challenge, error) {
+ domain = acme.UnFqdn(domain)
+ parts := strings.Split(domain, ".")
+
+ // Find the longest matching TLD.
+ longest := -1
+ for i := len(parts); i > 0; i-- {
+ t := strings.Join(parts[i-1:], ".")
+ if _, found := tlds[t]; found {
+ longest = i - 1
+ }
+ }
+ if longest < 1 {
+ return nil, fmt.Errorf("Invalid domain name '%s'", domain)
+ }
+
+ tld := strings.Join(parts[longest:], ".")
+ sld := parts[longest-1]
+
+ var host string
+ if longest >= 1 {
+ host = strings.Join(parts[:longest-1], ".")
+ }
+
+ key, keyValue, _ := acme.DNS01Record(domain, keyAuth)
+
+ return &challenge{
+ domain: domain,
+ key: "_acme-challenge." + host,
+ keyFqdn: key,
+ keyValue: keyValue,
+ tld: tld,
+ sld: sld,
+ host: host,
+ }, nil
+}
+
+// setGlobalParams adds the namecheap global parameters to the provided url
+// Values record.
+func (d *DNSProvider) setGlobalParams(v *url.Values, cmd string) {
+ v.Set("ApiUser", d.apiUser)
+ v.Set("ApiKey", d.apiKey)
+ v.Set("UserName", d.apiUser)
+ v.Set("ClientIp", d.clientIP)
+ v.Set("Command", cmd)
+}
+
+// getTLDs requests the list of available TLDs from namecheap.
+func (d *DNSProvider) getTLDs() (tlds map[string]string, err error) {
+ values := make(url.Values)
+ d.setGlobalParams(&values, "namecheap.domains.getTldList")
+
+ reqURL, _ := url.Parse(d.baseURL)
+ reqURL.RawQuery = values.Encode()
+
+ resp, err := httpClient.Get(reqURL.String())
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ type GetTldsResponse struct {
+ XMLName xml.Name `xml:"ApiResponse"`
+ Errors []apierror `xml:"Errors>Error"`
+ Result []struct {
+ Name string `xml:",attr"`
+ } `xml:"CommandResponse>Tlds>Tld"`
+ }
+
+ var gtr GetTldsResponse
+ if err := xml.Unmarshal(body, &gtr); err != nil {
+ return nil, err
+ }
+ if len(gtr.Errors) > 0 {
+ return nil, fmt.Errorf("Namecheap error: %s [%d]",
+ gtr.Errors[0].Description, gtr.Errors[0].Number)
+ }
+
+ tlds = make(map[string]string)
+ for _, t := range gtr.Result {
+ tlds[t.Name] = t.Name
+ }
+ return tlds, nil
+}
+
+// getHosts reads the full list of DNS host records using the Namecheap API.
+func (d *DNSProvider) getHosts(ch *challenge) (hosts []host, err error) {
+ values := make(url.Values)
+ d.setGlobalParams(&values, "namecheap.domains.dns.getHosts")
+ values.Set("SLD", ch.sld)
+ values.Set("TLD", ch.tld)
+
+ reqURL, _ := url.Parse(d.baseURL)
+ reqURL.RawQuery = values.Encode()
+
+ resp, err := httpClient.Get(reqURL.String())
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ type GetHostsResponse struct {
+ XMLName xml.Name `xml:"ApiResponse"`
+ Status string `xml:"Status,attr"`
+ Errors []apierror `xml:"Errors>Error"`
+ Hosts []host `xml:"CommandResponse>DomainDNSGetHostsResult>host"`
+ }
+
+ var ghr GetHostsResponse
+ if err = xml.Unmarshal(body, &ghr); err != nil {
+ return nil, err
+ }
+ if len(ghr.Errors) > 0 {
+ return nil, fmt.Errorf("Namecheap error: %s [%d]",
+ ghr.Errors[0].Description, ghr.Errors[0].Number)
+ }
+
+ return ghr.Hosts, nil
+}
+
+// setHosts writes the full list of DNS host records using the Namecheap API.
+func (d *DNSProvider) setHosts(ch *challenge, hosts []host) error {
+ values := make(url.Values)
+ d.setGlobalParams(&values, "namecheap.domains.dns.setHosts")
+ values.Set("SLD", ch.sld)
+ values.Set("TLD", ch.tld)
+
+ for i, h := range hosts {
+ ind := fmt.Sprintf("%d", i+1)
+ values.Add("HostName"+ind, h.Name)
+ values.Add("RecordType"+ind, h.Type)
+ values.Add("Address"+ind, h.Address)
+ values.Add("MXPref"+ind, h.MXPref)
+ values.Add("TTL"+ind, h.TTL)
+ }
+
+ resp, err := httpClient.PostForm(d.baseURL, values)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return fmt.Errorf("setHosts HTTP error %d", resp.StatusCode)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ type SetHostsResponse struct {
+ XMLName xml.Name `xml:"ApiResponse"`
+ Status string `xml:"Status,attr"`
+ Errors []apierror `xml:"Errors>Error"`
+ Result struct {
+ IsSuccess string `xml:",attr"`
+ } `xml:"CommandResponse>DomainDNSSetHostsResult"`
+ }
+
+ var shr SetHostsResponse
+ if err := xml.Unmarshal(body, &shr); err != nil {
+ return err
+ }
+ if len(shr.Errors) > 0 {
+ return fmt.Errorf("Namecheap error: %s [%d]",
+ shr.Errors[0].Description, shr.Errors[0].Number)
+ }
+ if shr.Result.IsSuccess != "true" {
+ return fmt.Errorf("Namecheap setHosts failed.")
+ }
+
+ return nil
+}
+
+// addChallengeRecord adds a DNS challenge TXT record to a list of namecheap
+// host records.
+func (d *DNSProvider) addChallengeRecord(ch *challenge, hosts *[]host) {
+ host := host{
+ Name: ch.key,
+ Type: "TXT",
+ Address: ch.keyValue,
+ MXPref: "10",
+ TTL: "120",
+ }
+
+ // If there's already a TXT record with the same name, replace it.
+ for i, h := range *hosts {
+ if h.Name == ch.key && h.Type == "TXT" {
+ (*hosts)[i] = host
+ return
+ }
+ }
+
+ // No record was replaced, so add a new one.
+ *hosts = append(*hosts, host)
+}
+
+// removeChallengeRecord removes a DNS challenge TXT record from a list of
+// namecheap host records. Return true if a record was removed.
+func (d *DNSProvider) removeChallengeRecord(ch *challenge, hosts *[]host) bool {
+ // Find the challenge TXT record and remove it if found.
+ for i, h := range *hosts {
+ if h.Name == ch.key && h.Type == "TXT" {
+ *hosts = append((*hosts)[:i], (*hosts)[i+1:]...)
+ return true
+ }
+ }
+
+ return false
+}
+
+// Present installs a TXT record for the DNS challenge.
+func (d *DNSProvider) Present(domain, token, keyAuth string) error {
+ tlds, err := d.getTLDs()
+ if err != nil {
+ return err
+ }
+
+ ch, err := newChallenge(domain, keyAuth, tlds)
+ if err != nil {
+ return err
+ }
+
+ hosts, err := d.getHosts(ch)
+ if err != nil {
+ return err
+ }
+
+ d.addChallengeRecord(ch, &hosts)
+
+ if debug {
+ for _, h := range hosts {
+ fmt.Printf(
+ "%-5.5s %-30.30s %-6s %-70.70s\n",
+ h.Type, h.Name, h.TTL, h.Address)
+ }
+ }
+
+ return d.setHosts(ch, hosts)
+}
+
+// CleanUp removes a TXT record used for a previous DNS challenge.
+func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ tlds, err := d.getTLDs()
+ if err != nil {
+ return err
+ }
+
+ ch, err := newChallenge(domain, keyAuth, tlds)
+ if err != nil {
+ return err
+ }
+
+ hosts, err := d.getHosts(ch)
+ if err != nil {
+ return err
+ }
+
+ if removed := d.removeChallengeRecord(ch, &hosts); !removed {
+ return nil
+ }
+
+ return d.setHosts(ch, hosts)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go
new file mode 100644
index 000000000..0631d4a3e
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap_test.go
@@ -0,0 +1,402 @@
+package namecheap
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+)
+
+var (
+ fakeUser = "foo"
+ fakeKey = "bar"
+ fakeClientIP = "10.0.0.1"
+
+ tlds = map[string]string{
+ "com.au": "com.au",
+ "com": "com",
+ "co.uk": "co.uk",
+ "uk": "uk",
+ "edu": "edu",
+ "co.com": "co.com",
+ "za.com": "za.com",
+ }
+)
+
+func assertEq(t *testing.T, variable, got, want string) {
+ if got != want {
+ t.Errorf("Expected %s to be '%s' but got '%s'", variable, want, got)
+ }
+}
+
+func assertHdr(tc *testcase, t *testing.T, values *url.Values) {
+ ch, _ := newChallenge(tc.domain, "", tlds)
+
+ assertEq(t, "ApiUser", values.Get("ApiUser"), fakeUser)
+ assertEq(t, "ApiKey", values.Get("ApiKey"), fakeKey)
+ assertEq(t, "UserName", values.Get("UserName"), fakeUser)
+ assertEq(t, "ClientIp", values.Get("ClientIp"), fakeClientIP)
+ assertEq(t, "SLD", values.Get("SLD"), ch.sld)
+ assertEq(t, "TLD", values.Get("TLD"), ch.tld)
+}
+
+func mockServer(tc *testcase, t *testing.T, w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+
+ case "GET":
+ values := r.URL.Query()
+ cmd := values.Get("Command")
+ switch cmd {
+ case "namecheap.domains.dns.getHosts":
+ assertHdr(tc, t, &values)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, tc.getHostsResponse)
+ case "namecheap.domains.getTldList":
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, responseGetTlds)
+ default:
+ t.Errorf("Unexpected GET command: %s", cmd)
+ }
+
+ case "POST":
+ r.ParseForm()
+ values := r.Form
+ cmd := values.Get("Command")
+ switch cmd {
+ case "namecheap.domains.dns.setHosts":
+ assertHdr(tc, t, &values)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, tc.setHostsResponse)
+ default:
+ t.Errorf("Unexpected POST command: %s", cmd)
+ }
+
+ default:
+ t.Errorf("Unexpected http method: %s", r.Method)
+
+ }
+}
+
+func testGetHosts(tc *testcase, t *testing.T) {
+ mock := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ mockServer(tc, t, w, r)
+ }))
+ defer mock.Close()
+
+ prov := &DNSProvider{
+ baseURL: mock.URL,
+ apiUser: fakeUser,
+ apiKey: fakeKey,
+ clientIP: fakeClientIP,
+ }
+
+ ch, _ := newChallenge(tc.domain, "", tlds)
+ hosts, err := prov.getHosts(ch)
+ if tc.errString != "" {
+ if err == nil || err.Error() != tc.errString {
+ t.Errorf("Namecheap getHosts case %s expected error", tc.name)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
+ }
+ }
+
+next1:
+ for _, h := range hosts {
+ for _, th := range tc.hosts {
+ if h == th {
+ continue next1
+ }
+ }
+ t.Errorf("getHosts case %s unexpected record [%s:%s:%s]",
+ tc.name, h.Type, h.Name, h.Address)
+ }
+
+next2:
+ for _, th := range tc.hosts {
+ for _, h := range hosts {
+ if h == th {
+ continue next2
+ }
+ }
+ t.Errorf("getHosts case %s missing record [%s:%s:%s]",
+ tc.name, th.Type, th.Name, th.Address)
+ }
+}
+
+func mockDNSProvider(url string) *DNSProvider {
+ return &DNSProvider{
+ baseURL: url,
+ apiUser: fakeUser,
+ apiKey: fakeKey,
+ clientIP: fakeClientIP,
+ }
+}
+
+func testSetHosts(tc *testcase, t *testing.T) {
+ mock := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ mockServer(tc, t, w, r)
+ }))
+ defer mock.Close()
+
+ prov := mockDNSProvider(mock.URL)
+ ch, _ := newChallenge(tc.domain, "", tlds)
+ hosts, err := prov.getHosts(ch)
+ if tc.errString != "" {
+ if err == nil || err.Error() != tc.errString {
+ t.Errorf("Namecheap getHosts case %s expected error", tc.name)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ err = prov.setHosts(ch, hosts)
+ if err != nil {
+ t.Errorf("Namecheap setHosts case %s failed", tc.name)
+ }
+}
+
+func testPresent(tc *testcase, t *testing.T) {
+ mock := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ mockServer(tc, t, w, r)
+ }))
+ defer mock.Close()
+
+ prov := mockDNSProvider(mock.URL)
+ err := prov.Present(tc.domain, "", "dummyKey")
+ if tc.errString != "" {
+ if err == nil || err.Error() != tc.errString {
+ t.Errorf("Namecheap Present case %s expected error", tc.name)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Namecheap Present case %s failed\n%v", tc.name, err)
+ }
+ }
+}
+
+func testCleanUp(tc *testcase, t *testing.T) {
+ mock := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ mockServer(tc, t, w, r)
+ }))
+ defer mock.Close()
+
+ prov := mockDNSProvider(mock.URL)
+ err := prov.CleanUp(tc.domain, "", "dummyKey")
+ if tc.errString != "" {
+ if err == nil || err.Error() != tc.errString {
+ t.Errorf("Namecheap CleanUp case %s expected error", tc.name)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Namecheap CleanUp case %s failed\n%v", tc.name, err)
+ }
+ }
+}
+
+func TestNamecheap(t *testing.T) {
+ for _, tc := range testcases {
+ testGetHosts(&tc, t)
+ testSetHosts(&tc, t)
+ testPresent(&tc, t)
+ testCleanUp(&tc, t)
+ }
+}
+
+func TestNamecheapDomainSplit(t *testing.T) {
+ tests := []struct {
+ domain string
+ valid bool
+ tld string
+ sld string
+ host string
+ }{
+ {"a.b.c.test.co.uk", true, "co.uk", "test", "a.b.c"},
+ {"test.co.uk", true, "co.uk", "test", ""},
+ {"test.com", true, "com", "test", ""},
+ {"test.co.com", true, "co.com", "test", ""},
+ {"www.test.com.au", true, "com.au", "test", "www"},
+ {"www.za.com", true, "za.com", "www", ""},
+ {"", false, "", "", ""},
+ {"a", false, "", "", ""},
+ {"com", false, "", "", ""},
+ {"co.com", false, "", "", ""},
+ {"co.uk", false, "", "", ""},
+ {"test.au", false, "", "", ""},
+ {"za.com", false, "", "", ""},
+ {"www.za", false, "", "", ""},
+ {"www.test.au", false, "", "", ""},
+ {"www.test.unk", false, "", "", ""},
+ }
+
+ for _, test := range tests {
+ valid := true
+ ch, err := newChallenge(test.domain, "", tlds)
+ if err != nil {
+ valid = false
+ }
+
+ if test.valid && !valid {
+ t.Errorf("Expected '%s' to split", test.domain)
+ } else if !test.valid && valid {
+ t.Errorf("Expected '%s' to produce error", test.domain)
+ }
+
+ if test.valid && valid {
+ assertEq(t, "domain", ch.domain, test.domain)
+ assertEq(t, "tld", ch.tld, test.tld)
+ assertEq(t, "sld", ch.sld, test.sld)
+ assertEq(t, "host", ch.host, test.host)
+ }
+ }
+}
+
+type testcase struct {
+ name string
+ domain string
+ hosts []host
+ errString string
+ getHostsResponse string
+ setHostsResponse string
+}
+
+var testcases = []testcase{
+ {
+ "Test:Success:1",
+ "test.example.com",
+ []host{
+ {"A", "home", "10.0.0.1", "10", "1799"},
+ {"A", "www", "10.0.0.2", "10", "1200"},
+ {"AAAA", "a", "::0", "10", "1799"},
+ {"CNAME", "*", "example.com.", "10", "1799"},
+ {"MXE", "example.com", "10.0.0.5", "10", "1800"},
+ {"URL", "xyz", "https://google.com", "10", "1799"},
+ },
+ "",
+ responseGetHostsSuccess1,
+ responseSetHostsSuccess1,
+ },
+ {
+ "Test:Success:2",
+ "example.com",
+ []host{
+ {"A", "@", "10.0.0.2", "10", "1200"},
+ {"A", "www", "10.0.0.3", "10", "60"},
+ },
+ "",
+ responseGetHostsSuccess2,
+ responseSetHostsSuccess2,
+ },
+ {
+ "Test:Error:BadApiKey:1",
+ "test.example.com",
+ nil,
+ "Namecheap error: API Key is invalid or API access has not been enabled [1011102]",
+ responseGetHostsErrorBadAPIKey1,
+ "",
+ },
+}
+
+var responseGetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
+ <Errors />
+ <Warnings />
+ <RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
+ <CommandResponse Type="namecheap.domains.dns.getHosts">
+ <DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
+ <host HostId="217076" Name="www" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217069" Name="home" Type="A" Address="10.0.0.1" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217071" Name="a" Type="AAAA" Address="::0" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217075" Name="*" Type="CNAME" Address="example.com." MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217073" Name="example.com" Type="MXE" Address="10.0.0.5" MXPref="10" TTL="1800" AssociatedAppTitle="MXE" FriendlyName="MXE1" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217077" Name="xyz" Type="URL" Address="https://google.com" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ </DomainDNSGetHostsResult>
+ </CommandResponse>
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>3.338</ExecutionTime>
+</ApiResponse>`
+
+var responseSetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
+ <Errors />
+ <Warnings />
+ <RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
+ <CommandResponse Type="namecheap.domains.dns.setHosts">
+ <DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
+ <Warnings />
+ </DomainDNSSetHostsResult>
+ </CommandResponse>
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>2.347</ExecutionTime>
+</ApiResponse>`
+
+var responseGetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
+ <Errors />
+ <Warnings />
+ <RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
+ <CommandResponse Type="namecheap.domains.dns.getHosts">
+ <DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
+ <host HostId="217076" Name="@" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ <host HostId="217069" Name="www" Type="A" Address="10.0.0.3" MXPref="10" TTL="60" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
+ </DomainDNSGetHostsResult>
+ </CommandResponse>
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>3.338</ExecutionTime>
+</ApiResponse>`
+
+var responseSetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
+ <Errors />
+ <Warnings />
+ <RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
+ <CommandResponse Type="namecheap.domains.dns.setHosts">
+ <DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
+ <Warnings />
+ </DomainDNSSetHostsResult>
+ </CommandResponse>
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>2.347</ExecutionTime>
+</ApiResponse>`
+
+var responseGetHostsErrorBadAPIKey1 = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="ERROR" xmlns="http://api.namecheap.com/xml.response">
+ <Errors>
+ <Error Number="1011102">API Key is invalid or API access has not been enabled</Error>
+ </Errors>
+ <Warnings />
+ <RequestedCommand />
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>0</ExecutionTime>
+</ApiResponse>`
+
+var responseGetTlds = `<?xml version="1.0" encoding="utf-8"?>
+<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
+ <Errors />
+ <Warnings />
+ <RequestedCommand>namecheap.domains.getTldList</RequestedCommand>
+ <CommandResponse Type="namecheap.domains.getTldList">
+ <Tlds>
+ <Tld Name="com" NonRealTime="false" MinRegisterYears="1" MaxRegisterYears="10" MinRenewYears="1" MaxRenewYears="10" RenewalMinDays="0" RenewalMaxDays="4000" ReactivateMaxDays="27" MinTransferYears="1" MaxTransferYears="1" IsApiRegisterable="true" IsApiRenewable="true" IsApiTransferable="true" IsEppRequired="true" IsDisableModContact="false" IsDisableWGAllot="false" IsIncludeInExtendedSearchOnly="false" SequenceNumber="10" Type="GTLD" SubType="" IsSupportsIDN="true" Category="A" SupportsRegistrarLock="true" AddGracePeriodDays="5" WhoisVerification="false" ProviderApiDelete="true" TldState="" SearchGroup="" Registry="">Most recognized top level domain<Categories><TldCategory Name="popular" SequenceNumber="10" /></Categories></Tld>
+ </Tlds>
+ </CommandResponse>
+ <Server>PHX01SBAPI01</Server>
+ <GMTTimeDifference>--5:00</GMTTimeDifference>
+ <ExecutionTime>0.004</ExecutionTime>
+</ApiResponse>`
diff --git a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go
new file mode 100644
index 000000000..290a8d7df
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh.go
@@ -0,0 +1,159 @@
+// Package OVH implements a DNS provider for solving the DNS-01
+// challenge using OVH DNS.
+package ovh
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/ovh/go-ovh/ovh"
+ "github.com/xenolf/lego/acme"
+)
+
+// OVH API reference: https://eu.api.ovh.com/
+// Create a Token: https://eu.api.ovh.com/createToken/
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface
+// that uses OVH's REST API to manage TXT records for a domain.
+type DNSProvider struct {
+ client *ovh.Client
+ recordIDs map[string]int
+ recordIDsMu sync.Mutex
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for OVH
+// Credentials must be passed in the environment variable:
+// OVH_ENDPOINT : it must be ovh-eu or ovh-ca
+// OVH_APPLICATION_KEY
+// OVH_APPLICATION_SECRET
+// OVH_CONSUMER_KEY
+func NewDNSProvider() (*DNSProvider, error) {
+ apiEndpoint := os.Getenv("OVH_ENDPOINT")
+ applicationKey := os.Getenv("OVH_APPLICATION_KEY")
+ applicationSecret := os.Getenv("OVH_APPLICATION_SECRET")
+ consumerKey := os.Getenv("OVH_CONSUMER_KEY")
+ return NewDNSProviderCredentials(apiEndpoint, applicationKey, applicationSecret, consumerKey)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for OVH.
+func NewDNSProviderCredentials(apiEndpoint, applicationKey, applicationSecret, consumerKey string) (*DNSProvider, error) {
+ if apiEndpoint == "" || applicationKey == "" || applicationSecret == "" || consumerKey == "" {
+ return nil, fmt.Errorf("OVH credentials missing")
+ }
+
+ ovhClient, _ := ovh.NewClient(
+ apiEndpoint,
+ applicationKey,
+ applicationSecret,
+ consumerKey,
+ )
+
+ return &DNSProvider{
+ client: ovhClient,
+ recordIDs: make(map[string]int),
+ }, nil
+}
+
+// Present creates a TXT record to fulfil the dns-01 challenge.
+func (d *DNSProvider) Present(domain, token, keyAuth string) error {
+
+ // txtRecordRequest represents the request body to DO's API to make a TXT record
+ type txtRecordRequest struct {
+ FieldType string `json:"fieldType"`
+ SubDomain string `json:"subDomain"`
+ Target string `json:"target"`
+ TTL int `json:"ttl"`
+ }
+
+ // txtRecordResponse represents a response from DO's API after making a TXT record
+ type txtRecordResponse struct {
+ ID int `json:"id"`
+ FieldType string `json:"fieldType"`
+ SubDomain string `json:"subDomain"`
+ Target string `json:"target"`
+ TTL int `json:"ttl"`
+ Zone string `json:"zone"`
+ }
+
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+
+ // Parse domain name
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err)
+ }
+
+ authZone = acme.UnFqdn(authZone)
+ subDomain := d.extractRecordName(fqdn, authZone)
+
+ reqURL := fmt.Sprintf("/domain/zone/%s/record", authZone)
+ reqData := txtRecordRequest{FieldType: "TXT", SubDomain: subDomain, Target: value, TTL: ttl}
+ var respData txtRecordResponse
+
+ // Create TXT record
+ err = d.client.Post(reqURL, reqData, &respData)
+ if err != nil {
+ fmt.Printf("Error when call OVH api to add record : %q \n", err)
+ return err
+ }
+
+ // Apply the change
+ reqURL = fmt.Sprintf("/domain/zone/%s/refresh", authZone)
+ err = d.client.Post(reqURL, nil, nil)
+ if err != nil {
+ fmt.Printf("Error when call OVH api to refresh zone : %q \n", err)
+ return err
+ }
+
+ d.recordIDsMu.Lock()
+ d.recordIDs[fqdn] = respData.ID
+ d.recordIDsMu.Unlock()
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ // get the record's unique ID from when we created it
+ d.recordIDsMu.Lock()
+ recordID, ok := d.recordIDs[fqdn]
+ d.recordIDsMu.Unlock()
+ if !ok {
+ return fmt.Errorf("unknown record ID for '%s'", fqdn)
+ }
+
+ authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers)
+ if err != nil {
+ return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err)
+ }
+
+ authZone = acme.UnFqdn(authZone)
+
+ reqURL := fmt.Sprintf("/domain/zone/%s/record/%d", authZone, recordID)
+
+ err = d.client.Delete(reqURL, nil)
+ if err != nil {
+ fmt.Printf("Error when call OVH api to delete challenge record : %q \n", err)
+ return err
+ }
+
+ // Delete record ID from map
+ d.recordIDsMu.Lock()
+ delete(d.recordIDs, fqdn)
+ d.recordIDsMu.Unlock()
+
+ return nil
+}
+
+func (d *DNSProvider) extractRecordName(fqdn, domain string) string {
+ name := acme.UnFqdn(fqdn)
+ if idx := strings.Index(name, "."+domain); idx != -1 {
+ return name[:idx]
+ }
+ return name
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go
new file mode 100644
index 000000000..47da60e57
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/ovh/ovh_test.go
@@ -0,0 +1,103 @@
+package ovh
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ liveTest bool
+ apiEndpoint string
+ applicationKey string
+ applicationSecret string
+ consumerKey string
+ domain string
+)
+
+func init() {
+ apiEndpoint = os.Getenv("OVH_ENDPOINT")
+ applicationKey = os.Getenv("OVH_APPLICATION_KEY")
+ applicationSecret = os.Getenv("OVH_APPLICATION_SECRET")
+ consumerKey = os.Getenv("OVH_CONSUMER_KEY")
+ liveTest = len(apiEndpoint) > 0 && len(applicationKey) > 0 && len(applicationSecret) > 0 && len(consumerKey) > 0
+}
+
+func restoreEnv() {
+ os.Setenv("OVH_ENDPOINT", apiEndpoint)
+ os.Setenv("OVH_APPLICATION_KEY", applicationKey)
+ os.Setenv("OVH_APPLICATION_SECRET", applicationSecret)
+ os.Setenv("OVH_CONSUMER_KEY", consumerKey)
+}
+
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ os.Setenv("OVH_ENDPOINT", "ovh-eu")
+ os.Setenv("OVH_APPLICATION_KEY", "1234")
+ os.Setenv("OVH_APPLICATION_SECRET", "5678")
+ os.Setenv("OVH_CONSUMER_KEY", "abcde")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+}
+
+func TestNewDNSProviderMissingCredErr(t *testing.T) {
+ os.Setenv("OVH_ENDPOINT", "")
+ os.Setenv("OVH_APPLICATION_KEY", "1234")
+ os.Setenv("OVH_APPLICATION_SECRET", "5678")
+ os.Setenv("OVH_CONSUMER_KEY", "abcde")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "OVH credentials missing")
+
+ os.Setenv("OVH_ENDPOINT", "ovh-eu")
+ os.Setenv("OVH_APPLICATION_KEY", "")
+ os.Setenv("OVH_APPLICATION_SECRET", "5678")
+ os.Setenv("OVH_CONSUMER_KEY", "abcde")
+ defer restoreEnv()
+ _, err = NewDNSProvider()
+ assert.EqualError(t, err, "OVH credentials missing")
+
+ os.Setenv("OVH_ENDPOINT", "ovh-eu")
+ os.Setenv("OVH_APPLICATION_KEY", "1234")
+ os.Setenv("OVH_APPLICATION_SECRET", "")
+ os.Setenv("OVH_CONSUMER_KEY", "abcde")
+ defer restoreEnv()
+ _, err = NewDNSProvider()
+ assert.EqualError(t, err, "OVH credentials missing")
+
+ os.Setenv("OVH_ENDPOINT", "ovh-eu")
+ os.Setenv("OVH_APPLICATION_KEY", "1234")
+ os.Setenv("OVH_APPLICATION_SECRET", "5678")
+ os.Setenv("OVH_CONSUMER_KEY", "")
+ defer restoreEnv()
+ _, err = NewDNSProvider()
+ assert.EqualError(t, err, "OVH credentials missing")
+}
+
+func TestLivePresent(t *testing.T) {
+ if !liveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.Present(domain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestLiveCleanUp(t *testing.T) {
+ if !liveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 1)
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(domain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md b/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md
new file mode 100644
index 000000000..23abb7669
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/pdns/README.md
@@ -0,0 +1,7 @@
+## PowerDNS provider
+
+Tested and confirmed to work with PowerDNS authoratative server 3.4.8 and 4.0.1. Refer to [PowerDNS documentation](https://doc.powerdns.com/md/httpapi/README/) instructions on how to enable the built-in API interface.
+
+PowerDNS Notes:
+- PowerDNS API does not currently support SSL, therefore you should take care to ensure that traffic between lego and the PowerDNS API is over a trusted network, VPN etc.
+- In order to have the SOA serial automatically increment each time the `_acme-challenge` record is added/modified via the API, set `SOA-API-EDIT` to `INCEPTION-INCREMENT` for the zone in the `domainmetadata` table
diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go
new file mode 100644
index 000000000..a4fd22b0c
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns.go
@@ -0,0 +1,343 @@
+// Package pdns implements a DNS provider for solving the DNS-01
+// challenge using PowerDNS nameserver.
+package pdns
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface
+type DNSProvider struct {
+ apiKey string
+ host *url.URL
+ apiVersion int
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for pdns.
+// Credentials must be passed in the environment variable:
+// PDNS_API_URL and PDNS_API_KEY.
+func NewDNSProvider() (*DNSProvider, error) {
+ key := os.Getenv("PDNS_API_KEY")
+ hostUrl, err := url.Parse(os.Getenv("PDNS_API_URL"))
+ if err != nil {
+ return nil, err
+ }
+
+ return NewDNSProviderCredentials(hostUrl, key)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for pdns.
+func NewDNSProviderCredentials(host *url.URL, key string) (*DNSProvider, error) {
+ if key == "" {
+ return nil, fmt.Errorf("PDNS API key missing")
+ }
+
+ if host == nil || host.Host == "" {
+ return nil, fmt.Errorf("PDNS API URL missing")
+ }
+
+ provider := &DNSProvider{
+ host: host,
+ apiKey: key,
+ }
+ provider.getAPIVersion()
+
+ return provider, nil
+}
+
+// Timeout returns the timeout and interval to use when checking for DNS
+// propagation. Adjusting here to cope with spikes in propagation times.
+func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {
+ return 120 * time.Second, 2 * time.Second
+}
+
+// Present creates a TXT record to fulfil the dns-01 challenge
+func (c *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ zone, err := c.getHostedZone(fqdn)
+ if err != nil {
+ return err
+ }
+
+ name := fqdn
+
+ // pre-v1 API wants non-fqdn
+ if c.apiVersion == 0 {
+ name = acme.UnFqdn(fqdn)
+ }
+
+ rec := pdnsRecord{
+ Content: "\"" + value + "\"",
+ Disabled: false,
+
+ // pre-v1 API
+ Type: "TXT",
+ Name: name,
+ TTL: 120,
+ }
+
+ rrsets := rrSets{
+ RRSets: []rrSet{
+ rrSet{
+ Name: name,
+ ChangeType: "REPLACE",
+ Type: "TXT",
+ Kind: "Master",
+ TTL: 120,
+ Records: []pdnsRecord{rec},
+ },
+ },
+ }
+
+ body, err := json.Marshal(rrsets)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.makeRequest("PATCH", zone.URL, bytes.NewReader(body))
+ if err != nil {
+ fmt.Println("here")
+ return err
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ zone, err := c.getHostedZone(fqdn)
+ if err != nil {
+ return err
+ }
+
+ set, err := c.findTxtRecord(fqdn)
+ if err != nil {
+ return err
+ }
+
+ rrsets := rrSets{
+ RRSets: []rrSet{
+ rrSet{
+ Name: set.Name,
+ Type: set.Type,
+ ChangeType: "DELETE",
+ },
+ },
+ }
+ body, err := json.Marshal(rrsets)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.makeRequest("PATCH", zone.URL, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *DNSProvider) getHostedZone(fqdn string) (*hostedZone, error) {
+ var zone hostedZone
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return nil, err
+ }
+
+ url := "/servers/localhost/zones"
+ result, err := c.makeRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ zones := []hostedZone{}
+ err = json.Unmarshal(result, &zones)
+ if err != nil {
+ return nil, err
+ }
+
+ url = ""
+ for _, zone := range zones {
+ if acme.UnFqdn(zone.Name) == acme.UnFqdn(authZone) {
+ url = zone.URL
+ }
+ }
+
+ result, err = c.makeRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = json.Unmarshal(result, &zone)
+ if err != nil {
+ return nil, err
+ }
+
+ // convert pre-v1 API result
+ if len(zone.Records) > 0 {
+ zone.RRSets = []rrSet{}
+ for _, record := range zone.Records {
+ set := rrSet{
+ Name: record.Name,
+ Type: record.Type,
+ Records: []pdnsRecord{record},
+ }
+ zone.RRSets = append(zone.RRSets, set)
+ }
+ }
+
+ return &zone, nil
+}
+
+func (c *DNSProvider) findTxtRecord(fqdn string) (*rrSet, error) {
+ zone, err := c.getHostedZone(fqdn)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = c.makeRequest("GET", zone.URL, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, set := range zone.RRSets {
+ if (set.Name == acme.UnFqdn(fqdn) || set.Name == fqdn) && set.Type == "TXT" {
+ return &set, nil
+ }
+ }
+
+ return nil, fmt.Errorf("No existing record found for %s", fqdn)
+}
+
+func (c *DNSProvider) getAPIVersion() {
+ type APIVersion struct {
+ URL string `json:"url"`
+ Version int `json:"version"`
+ }
+
+ result, err := c.makeRequest("GET", "/api", nil)
+ if err != nil {
+ return
+ }
+
+ var versions []APIVersion
+ err = json.Unmarshal(result, &versions)
+ if err != nil {
+ return
+ }
+
+ latestVersion := 0
+ for _, v := range versions {
+ if v.Version > latestVersion {
+ latestVersion = v.Version
+ }
+ }
+ c.apiVersion = latestVersion
+}
+
+func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) {
+ type APIError struct {
+ Error string `json:"error"`
+ }
+ var path = ""
+ if c.host.Path != "/" {
+ path = c.host.Path
+ }
+ if c.apiVersion > 0 {
+ if !strings.HasPrefix(uri, "api/v") {
+ uri = "/api/v" + strconv.Itoa(c.apiVersion) + uri
+ } else {
+ uri = "/" + uri
+ }
+ }
+ url := c.host.Scheme + "://" + c.host.Host + path + uri
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("X-API-Key", c.apiKey)
+
+ client := http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("Error talking to PDNS API -> %v", err)
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 422 && (resp.StatusCode < 200 || resp.StatusCode >= 300) {
+ return nil, fmt.Errorf("Unexpected HTTP status code %d when fetching '%s'", resp.StatusCode, url)
+ }
+
+ var msg json.RawMessage
+ err = json.NewDecoder(resp.Body).Decode(&msg)
+ switch {
+ case err == io.EOF:
+ // empty body
+ return nil, nil
+ case err != nil:
+ // other error
+ return nil, err
+ }
+
+ // check for PowerDNS error message
+ if len(msg) > 0 && msg[0] == '{' {
+ var apiError APIError
+ err = json.Unmarshal(msg, &apiError)
+ if err != nil {
+ return nil, err
+ }
+ if apiError.Error != "" {
+ return nil, fmt.Errorf("Error talking to PDNS API -> %v", apiError.Error)
+ }
+ }
+ return msg, nil
+}
+
+type pdnsRecord struct {
+ Content string `json:"content"`
+ Disabled bool `json:"disabled"`
+
+ // pre-v1 API
+ Name string `json:"name"`
+ Type string `json:"type"`
+ TTL int `json:"ttl,omitempty"`
+}
+
+type hostedZone struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ URL string `json:"url"`
+ RRSets []rrSet `json:"rrsets"`
+
+ // pre-v1 API
+ Records []pdnsRecord `json:"records"`
+}
+
+type rrSet struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Kind string `json:"kind"`
+ ChangeType string `json:"changetype"`
+ Records []pdnsRecord `json:"records"`
+ TTL int `json:"ttl,omitempty"`
+}
+
+type rrSets struct {
+ RRSets []rrSet `json:"rrsets"`
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go
new file mode 100644
index 000000000..70e7670ed
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/pdns/pdns_test.go
@@ -0,0 +1,80 @@
+package pdns
+
+import (
+ "net/url"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ pdnsLiveTest bool
+ pdnsURL *url.URL
+ pdnsURLStr string
+ pdnsAPIKey string
+ pdnsDomain string
+)
+
+func init() {
+ pdnsURLStr = os.Getenv("PDNS_API_URL")
+ pdnsURL, _ = url.Parse(pdnsURLStr)
+ pdnsAPIKey = os.Getenv("PDNS_API_KEY")
+ pdnsDomain = os.Getenv("PDNS_DOMAIN")
+ if len(pdnsURLStr) > 0 && len(pdnsAPIKey) > 0 && len(pdnsDomain) > 0 {
+ pdnsLiveTest = true
+ }
+}
+
+func restorePdnsEnv() {
+ os.Setenv("PDNS_API_URL", pdnsURLStr)
+ os.Setenv("PDNS_API_KEY", pdnsAPIKey)
+}
+
+func TestNewDNSProviderValid(t *testing.T) {
+ os.Setenv("PDNS_API_URL", "")
+ os.Setenv("PDNS_API_KEY", "")
+ tmpURL, _ := url.Parse("http://localhost:8081")
+ _, err := NewDNSProviderCredentials(tmpURL, "123")
+ assert.NoError(t, err)
+ restorePdnsEnv()
+}
+
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ os.Setenv("PDNS_API_URL", "http://localhost:8081")
+ os.Setenv("PDNS_API_KEY", "123")
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+ restorePdnsEnv()
+}
+
+func TestNewDNSProviderMissingHostErr(t *testing.T) {
+ os.Setenv("PDNS_API_URL", "")
+ os.Setenv("PDNS_API_KEY", "123")
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "PDNS API URL missing")
+ restorePdnsEnv()
+}
+
+func TestNewDNSProviderMissingKeyErr(t *testing.T) {
+ os.Setenv("PDNS_API_URL", pdnsURLStr)
+ os.Setenv("PDNS_API_KEY", "")
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "PDNS API key missing")
+ restorePdnsEnv()
+}
+
+func TestPdnsPresentAndCleanup(t *testing.T) {
+ if !pdnsLiveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProviderCredentials(pdnsURL, pdnsAPIKey)
+ assert.NoError(t, err)
+
+ err = provider.Present(pdnsDomain, "", "123d==")
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(pdnsDomain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go
new file mode 100644
index 000000000..43a95f18c
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go
@@ -0,0 +1,129 @@
+// Package rfc2136 implements a DNS provider for solving the DNS-01 challenge
+// using the rfc2136 dynamic update.
+package rfc2136
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/miekg/dns"
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface that
+// uses dynamic DNS updates (RFC 2136) to create TXT records on a nameserver.
+type DNSProvider struct {
+ nameserver string
+ tsigAlgorithm string
+ tsigKey string
+ tsigSecret string
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for rfc2136
+// dynamic update. Credentials must be passed in the environment variables:
+// RFC2136_NAMESERVER, RFC2136_TSIG_ALGORITHM, RFC2136_TSIG_KEY and
+// RFC2136_TSIG_SECRET. To disable TSIG authentication, leave the TSIG
+// variables unset. RFC2136_NAMESERVER must be a network address in the form
+// "host" or "host:port".
+func NewDNSProvider() (*DNSProvider, error) {
+ nameserver := os.Getenv("RFC2136_NAMESERVER")
+ tsigAlgorithm := os.Getenv("RFC2136_TSIG_ALGORITHM")
+ tsigKey := os.Getenv("RFC2136_TSIG_KEY")
+ tsigSecret := os.Getenv("RFC2136_TSIG_SECRET")
+ return NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a
+// DNSProvider instance configured for rfc2136 dynamic update. To disable TSIG
+// authentication, leave the TSIG parameters as empty strings.
+// nameserver must be a network address in the form "host" or "host:port".
+func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret string) (*DNSProvider, error) {
+ if nameserver == "" {
+ return nil, fmt.Errorf("RFC2136 nameserver missing")
+ }
+
+ // Append the default DNS port if none is specified.
+ if _, _, err := net.SplitHostPort(nameserver); err != nil {
+ if strings.Contains(err.Error(), "missing port") {
+ nameserver = net.JoinHostPort(nameserver, "53")
+ } else {
+ return nil, err
+ }
+ }
+ d := &DNSProvider{
+ nameserver: nameserver,
+ }
+ if tsigAlgorithm == "" {
+ tsigAlgorithm = dns.HmacMD5
+ }
+ d.tsigAlgorithm = tsigAlgorithm
+ if len(tsigKey) > 0 && len(tsigSecret) > 0 {
+ d.tsigKey = tsigKey
+ d.tsigSecret = tsigSecret
+ }
+
+ return d, nil
+}
+
+// Present creates a TXT record using the specified parameters
+func (r *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+ return r.changeRecord("INSERT", fqdn, value, ttl)
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+ return r.changeRecord("REMOVE", fqdn, value, ttl)
+}
+
+func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error {
+ // Find the zone for the given fqdn
+ zone, err := acme.FindZoneByFqdn(fqdn, []string{r.nameserver})
+ if err != nil {
+ return err
+ }
+
+ // Create RR
+ rr := new(dns.TXT)
+ rr.Hdr = dns.RR_Header{Name: fqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: uint32(ttl)}
+ rr.Txt = []string{value}
+ rrs := []dns.RR{rr}
+
+ // Create dynamic update packet
+ m := new(dns.Msg)
+ m.SetUpdate(zone)
+ switch action {
+ case "INSERT":
+ // Always remove old challenge left over from who knows what.
+ m.RemoveRRset(rrs)
+ m.Insert(rrs)
+ case "REMOVE":
+ m.Remove(rrs)
+ default:
+ return fmt.Errorf("Unexpected action: %s", action)
+ }
+
+ // Setup client
+ c := new(dns.Client)
+ c.SingleInflight = true
+ // TSIG authentication / msg signing
+ if len(r.tsigKey) > 0 && len(r.tsigSecret) > 0 {
+ m.SetTsig(dns.Fqdn(r.tsigKey), r.tsigAlgorithm, 300, time.Now().Unix())
+ c.TsigSecret = map[string]string{dns.Fqdn(r.tsigKey): r.tsigSecret}
+ }
+
+ // Send the query
+ reply, _, err := c.Exchange(m, r.nameserver)
+ if err != nil {
+ return fmt.Errorf("DNS update failed: %v", err)
+ }
+ if reply != nil && reply.Rcode != dns.RcodeSuccess {
+ return fmt.Errorf("DNS update failed. Server replied: %s", dns.RcodeToString[reply.Rcode])
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go
new file mode 100644
index 000000000..a2515e995
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136_test.go
@@ -0,0 +1,244 @@
+package rfc2136
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/miekg/dns"
+ "github.com/xenolf/lego/acme"
+)
+
+var (
+ rfc2136TestDomain = "123456789.www.example.com"
+ rfc2136TestKeyAuth = "123d=="
+ rfc2136TestValue = "Now36o-3BmlB623-0c1qCIUmgWVVmDJb88KGl24pqpo"
+ rfc2136TestFqdn = "_acme-challenge.123456789.www.example.com."
+ rfc2136TestZone = "example.com."
+ rfc2136TestTTL = 120
+ rfc2136TestTsigKey = "example.com."
+ rfc2136TestTsigSecret = "IwBTJx9wrDp4Y1RyC3H0gA=="
+)
+
+var reqChan = make(chan *dns.Msg, 10)
+
+func TestRFC2136CanaryLocalTestServer(t *testing.T) {
+ acme.ClearFqdnCache()
+ dns.HandleFunc("example.com.", serverHandlerHello)
+ defer dns.HandleRemove("example.com.")
+
+ server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false)
+ if err != nil {
+ t.Fatalf("Failed to start test server: %v", err)
+ }
+ defer server.Shutdown()
+
+ c := new(dns.Client)
+ m := new(dns.Msg)
+ m.SetQuestion("example.com.", dns.TypeTXT)
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil || len(r.Extra) == 0 {
+ t.Fatalf("Failed to communicate with test server: %v", err)
+ }
+ txt := r.Extra[0].(*dns.TXT).Txt[0]
+ if txt != "Hello world" {
+ t.Error("Expected test server to return 'Hello world' but got: ", txt)
+ }
+}
+
+func TestRFC2136ServerSuccess(t *testing.T) {
+ acme.ClearFqdnCache()
+ dns.HandleFunc(rfc2136TestZone, serverHandlerReturnSuccess)
+ defer dns.HandleRemove(rfc2136TestZone)
+
+ server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false)
+ if err != nil {
+ t.Fatalf("Failed to start test server: %v", err)
+ }
+ defer server.Shutdown()
+
+ provider, err := NewDNSProviderCredentials(addrstr, "", "", "")
+ if err != nil {
+ t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err)
+ }
+ if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err != nil {
+ t.Errorf("Expected Present() to return no error but the error was -> %v", err)
+ }
+}
+
+func TestRFC2136ServerError(t *testing.T) {
+ acme.ClearFqdnCache()
+ dns.HandleFunc(rfc2136TestZone, serverHandlerReturnErr)
+ defer dns.HandleRemove(rfc2136TestZone)
+
+ server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false)
+ if err != nil {
+ t.Fatalf("Failed to start test server: %v", err)
+ }
+ defer server.Shutdown()
+
+ provider, err := NewDNSProviderCredentials(addrstr, "", "", "")
+ if err != nil {
+ t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err)
+ }
+ if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err == nil {
+ t.Errorf("Expected Present() to return an error but it did not.")
+ } else if !strings.Contains(err.Error(), "NOTZONE") {
+ t.Errorf("Expected Present() to return an error with the 'NOTZONE' rcode string but it did not.")
+ }
+}
+
+func TestRFC2136TsigClient(t *testing.T) {
+ acme.ClearFqdnCache()
+ dns.HandleFunc(rfc2136TestZone, serverHandlerReturnSuccess)
+ defer dns.HandleRemove(rfc2136TestZone)
+
+ server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", true)
+ if err != nil {
+ t.Fatalf("Failed to start test server: %v", err)
+ }
+ defer server.Shutdown()
+
+ provider, err := NewDNSProviderCredentials(addrstr, "", rfc2136TestTsigKey, rfc2136TestTsigSecret)
+ if err != nil {
+ t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err)
+ }
+ if err := provider.Present(rfc2136TestDomain, "", rfc2136TestKeyAuth); err != nil {
+ t.Errorf("Expected Present() to return no error but the error was -> %v", err)
+ }
+}
+
+func TestRFC2136ValidUpdatePacket(t *testing.T) {
+ acme.ClearFqdnCache()
+ dns.HandleFunc(rfc2136TestZone, serverHandlerPassBackRequest)
+ defer dns.HandleRemove(rfc2136TestZone)
+
+ server, addrstr, err := runLocalDNSTestServer("127.0.0.1:0", false)
+ if err != nil {
+ t.Fatalf("Failed to start test server: %v", err)
+ }
+ defer server.Shutdown()
+
+ txtRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN TXT %s", rfc2136TestFqdn, rfc2136TestTTL, rfc2136TestValue))
+ rrs := []dns.RR{txtRR}
+ m := new(dns.Msg)
+ m.SetUpdate(rfc2136TestZone)
+ m.RemoveRRset(rrs)
+ m.Insert(rrs)
+ expectstr := m.String()
+ expect, err := m.Pack()
+ if err != nil {
+ t.Fatalf("Error packing expect msg: %v", err)
+ }
+
+ provider, err := NewDNSProviderCredentials(addrstr, "", "", "")
+ if err != nil {
+ t.Fatalf("Expected NewDNSProviderCredentials() to return no error but the error was -> %v", err)
+ }
+
+ if err := provider.Present(rfc2136TestDomain, "", "1234d=="); err != nil {
+ t.Errorf("Expected Present() to return no error but the error was -> %v", err)
+ }
+
+ rcvMsg := <-reqChan
+ rcvMsg.Id = m.Id
+ actual, err := rcvMsg.Pack()
+ if err != nil {
+ t.Fatalf("Error packing actual msg: %v", err)
+ }
+
+ if !bytes.Equal(actual, expect) {
+ tmp := new(dns.Msg)
+ if err := tmp.Unpack(actual); err != nil {
+ t.Fatalf("Error unpacking actual msg: %v", err)
+ }
+ t.Errorf("Expected msg:\n%s", expectstr)
+ t.Errorf("Actual msg:\n%v", tmp)
+ }
+}
+
+func runLocalDNSTestServer(listenAddr string, tsig bool) (*dns.Server, string, error) {
+ pc, err := net.ListenPacket("udp", listenAddr)
+ if err != nil {
+ return nil, "", err
+ }
+ server := &dns.Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
+ if tsig {
+ server.TsigSecret = map[string]string{rfc2136TestTsigKey: rfc2136TestTsigSecret}
+ }
+
+ waitLock := sync.Mutex{}
+ waitLock.Lock()
+ server.NotifyStartedFunc = waitLock.Unlock
+
+ go func() {
+ server.ActivateAndServe()
+ pc.Close()
+ }()
+
+ waitLock.Lock()
+ return server, pc.LocalAddr().String(), nil
+}
+
+func serverHandlerHello(w dns.ResponseWriter, req *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(req)
+ m.Extra = make([]dns.RR, 1)
+ m.Extra[0] = &dns.TXT{
+ Hdr: dns.RR_Header{Name: m.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 0},
+ Txt: []string{"Hello world"},
+ }
+ w.WriteMsg(m)
+}
+
+func serverHandlerReturnSuccess(w dns.ResponseWriter, req *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(req)
+ if req.Opcode == dns.OpcodeQuery && req.Question[0].Qtype == dns.TypeSOA && req.Question[0].Qclass == dns.ClassINET {
+ // Return SOA to appease findZoneByFqdn()
+ soaRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN SOA ns1.%s admin.%s 2016022801 28800 7200 2419200 1200", rfc2136TestZone, rfc2136TestTTL, rfc2136TestZone, rfc2136TestZone))
+ m.Answer = []dns.RR{soaRR}
+ }
+
+ if t := req.IsTsig(); t != nil {
+ if w.TsigStatus() == nil {
+ // Validated
+ m.SetTsig(rfc2136TestZone, dns.HmacMD5, 300, time.Now().Unix())
+ }
+ }
+
+ w.WriteMsg(m)
+}
+
+func serverHandlerReturnErr(w dns.ResponseWriter, req *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetRcode(req, dns.RcodeNotZone)
+ w.WriteMsg(m)
+}
+
+func serverHandlerPassBackRequest(w dns.ResponseWriter, req *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(req)
+ if req.Opcode == dns.OpcodeQuery && req.Question[0].Qtype == dns.TypeSOA && req.Question[0].Qclass == dns.ClassINET {
+ // Return SOA to appease findZoneByFqdn()
+ soaRR, _ := dns.NewRR(fmt.Sprintf("%s %d IN SOA ns1.%s admin.%s 2016022801 28800 7200 2419200 1200", rfc2136TestZone, rfc2136TestTTL, rfc2136TestZone, rfc2136TestZone))
+ m.Answer = []dns.RR{soaRR}
+ }
+
+ if t := req.IsTsig(); t != nil {
+ if w.TsigStatus() == nil {
+ // Validated
+ m.SetTsig(rfc2136TestZone, dns.HmacMD5, 300, time.Now().Unix())
+ }
+ }
+
+ w.WriteMsg(m)
+ if req.Opcode != dns.OpcodeQuery || req.Question[0].Qtype != dns.TypeSOA || req.Question[0].Qclass != dns.ClassINET {
+ // Only talk back when it is not the SOA RR.
+ reqChan <- req
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go
new file mode 100644
index 000000000..a5cc9c878
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/route53/fixtures_test.go
@@ -0,0 +1,39 @@
+package route53
+
+var ChangeResourceRecordSetsResponse = `<?xml version="1.0" encoding="UTF-8"?>
+<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+<ChangeInfo>
+ <Id>/change/123456</Id>
+ <Status>PENDING</Status>
+ <SubmittedAt>2016-02-10T01:36:41.958Z</SubmittedAt>
+</ChangeInfo>
+</ChangeResourceRecordSetsResponse>`
+
+var ListHostedZonesByNameResponse = `<?xml version="1.0" encoding="UTF-8"?>
+<ListHostedZonesByNameResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <HostedZones>
+ <HostedZone>
+ <Id>/hostedzone/ABCDEFG</Id>
+ <Name>example.com.</Name>
+ <CallerReference>D2224C5B-684A-DB4A-BB9A-E09E3BAFEA7A</CallerReference>
+ <Config>
+ <Comment>Test comment</Comment>
+ <PrivateZone>false</PrivateZone>
+ </Config>
+ <ResourceRecordSetCount>10</ResourceRecordSetCount>
+ </HostedZone>
+ </HostedZones>
+ <IsTruncated>true</IsTruncated>
+ <NextDNSName>example2.com</NextDNSName>
+ <NextHostedZoneId>ZLT12321321124</NextHostedZoneId>
+ <MaxItems>1</MaxItems>
+</ListHostedZonesByNameResponse>`
+
+var GetChangeResponse = `<?xml version="1.0" encoding="UTF-8"?>
+<GetChangeResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <ChangeInfo>
+ <Id>123456</Id>
+ <Status>INSYNC</Status>
+ <SubmittedAt>2016-02-10T01:36:41.958Z</SubmittedAt>
+ </ChangeInfo>
+</GetChangeResponse>`
diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go
new file mode 100644
index 000000000..f3e53a8e5
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go
@@ -0,0 +1,171 @@
+// Package route53 implements a DNS provider for solving the DNS-01 challenge
+// using AWS Route 53 DNS.
+package route53
+
+import (
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/route53"
+ "github.com/xenolf/lego/acme"
+)
+
+const (
+ maxRetries = 5
+ route53TTL = 10
+)
+
+// DNSProvider implements the acme.ChallengeProvider interface
+type DNSProvider struct {
+ client *route53.Route53
+}
+
+// customRetryer implements the client.Retryer interface by composing the
+// DefaultRetryer. It controls the logic for retrying recoverable request
+// errors (e.g. when rate limits are exceeded).
+type customRetryer struct {
+ client.DefaultRetryer
+}
+
+// RetryRules overwrites the DefaultRetryer's method.
+// It uses a basic exponential backoff algorithm that returns an initial
+// delay of ~400ms with an upper limit of ~30 seconds which should prevent
+// causing a high number of consecutive throttling errors.
+// For reference: Route 53 enforces an account-wide(!) 5req/s query limit.
+func (d customRetryer) RetryRules(r *request.Request) time.Duration {
+ retryCount := r.RetryCount
+ if retryCount > 7 {
+ retryCount = 7
+ }
+
+ delay := (1 << uint(retryCount)) * (rand.Intn(50) + 200)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// NewDNSProvider returns a DNSProvider instance configured for the AWS
+// Route 53 service.
+//
+// AWS Credentials are automatically detected in the following locations
+// and prioritized in the following order:
+// 1. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
+// AWS_REGION, [AWS_SESSION_TOKEN]
+// 2. Shared credentials file (defaults to ~/.aws/credentials)
+// 3. Amazon EC2 IAM role
+//
+// See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk
+func NewDNSProvider() (*DNSProvider, error) {
+ r := customRetryer{}
+ r.NumMaxRetries = maxRetries
+ config := request.WithRetryer(aws.NewConfig(), r)
+ client := route53.New(session.New(config))
+
+ return &DNSProvider{client: client}, nil
+}
+
+// Present creates a TXT record using the specified parameters
+func (r *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ value = `"` + value + `"`
+ return r.changeRecord("UPSERT", fqdn, value, route53TTL)
+}
+
+// CleanUp removes the TXT record matching the specified parameters
+func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
+ value = `"` + value + `"`
+ return r.changeRecord("DELETE", fqdn, value, route53TTL)
+}
+
+func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error {
+ hostedZoneID, err := getHostedZoneID(fqdn, r.client)
+ if err != nil {
+ return fmt.Errorf("Failed to determine Route 53 hosted zone ID: %v", err)
+ }
+
+ recordSet := newTXTRecordSet(fqdn, value, ttl)
+ reqParams := &route53.ChangeResourceRecordSetsInput{
+ HostedZoneId: aws.String(hostedZoneID),
+ ChangeBatch: &route53.ChangeBatch{
+ Comment: aws.String("Managed by Lego"),
+ Changes: []*route53.Change{
+ {
+ Action: aws.String(action),
+ ResourceRecordSet: recordSet,
+ },
+ },
+ },
+ }
+
+ resp, err := r.client.ChangeResourceRecordSets(reqParams)
+ if err != nil {
+ return fmt.Errorf("Failed to change Route 53 record set: %v", err)
+ }
+
+ statusID := resp.ChangeInfo.Id
+
+ return acme.WaitFor(120*time.Second, 4*time.Second, func() (bool, error) {
+ reqParams := &route53.GetChangeInput{
+ Id: statusID,
+ }
+ resp, err := r.client.GetChange(reqParams)
+ if err != nil {
+ return false, fmt.Errorf("Failed to query Route 53 change status: %v", err)
+ }
+ if *resp.ChangeInfo.Status == route53.ChangeStatusInsync {
+ return true, nil
+ }
+ return false, nil
+ })
+}
+
+func getHostedZoneID(fqdn string, client *route53.Route53) (string, error) {
+ authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers)
+ if err != nil {
+ return "", err
+ }
+
+ // .DNSName should not have a trailing dot
+ reqParams := &route53.ListHostedZonesByNameInput{
+ DNSName: aws.String(acme.UnFqdn(authZone)),
+ }
+ resp, err := client.ListHostedZonesByName(reqParams)
+ if err != nil {
+ return "", err
+ }
+
+ var hostedZoneID string
+ for _, hostedZone := range resp.HostedZones {
+ // .Name has a trailing dot
+ if !*hostedZone.Config.PrivateZone && *hostedZone.Name == authZone {
+ hostedZoneID = *hostedZone.Id
+ break
+ }
+ }
+
+ if len(hostedZoneID) == 0 {
+ return "", fmt.Errorf("Zone %s not found in Route 53 for domain %s", authZone, fqdn)
+ }
+
+ if strings.HasPrefix(hostedZoneID, "/hostedzone/") {
+ hostedZoneID = strings.TrimPrefix(hostedZoneID, "/hostedzone/")
+ }
+
+ return hostedZoneID, nil
+}
+
+func newTXTRecordSet(fqdn, value string, ttl int) *route53.ResourceRecordSet {
+ return &route53.ResourceRecordSet{
+ Name: aws.String(fqdn),
+ Type: aws.String("TXT"),
+ TTL: aws.Int64(int64(ttl)),
+ ResourceRecords: []*route53.ResourceRecord{
+ {Value: aws.String(value)},
+ },
+ }
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go
new file mode 100644
index 000000000..64678906a
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_integration_test.go
@@ -0,0 +1,70 @@
+package route53
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/route53"
+)
+
+func TestRoute53TTL(t *testing.T) {
+
+ m, err := testGetAndPreCheck()
+ if err != nil {
+ t.Skip(err.Error())
+ }
+
+ provider, err := NewDNSProvider()
+ if err != nil {
+ t.Fatalf("Fatal: %s", err.Error())
+ }
+
+ err = provider.Present(m["route53Domain"], "foo", "bar")
+ if err != nil {
+ t.Fatalf("Fatal: %s", err.Error())
+ }
+ // we need a separate R53 client here as the one in the DNS provider is
+ // unexported.
+ fqdn := "_acme-challenge." + m["route53Domain"] + "."
+ svc := route53.New(session.New())
+ zoneID, err := getHostedZoneID(fqdn, svc)
+ if err != nil {
+ provider.CleanUp(m["route53Domain"], "foo", "bar")
+ t.Fatalf("Fatal: %s", err.Error())
+ }
+ params := &route53.ListResourceRecordSetsInput{
+ HostedZoneId: aws.String(zoneID),
+ }
+ resp, err := svc.ListResourceRecordSets(params)
+ if err != nil {
+ provider.CleanUp(m["route53Domain"], "foo", "bar")
+ t.Fatalf("Fatal: %s", err.Error())
+ }
+
+ for _, v := range resp.ResourceRecordSets {
+ if *v.Name == fqdn && *v.Type == "TXT" && *v.TTL == 10 {
+ provider.CleanUp(m["route53Domain"], "foo", "bar")
+ return
+ }
+ }
+ provider.CleanUp(m["route53Domain"], "foo", "bar")
+ t.Fatalf("Could not find a TXT record for _acme-challenge.%s with a TTL of 10", m["route53Domain"])
+}
+
+func testGetAndPreCheck() (map[string]string, error) {
+ m := map[string]string{
+ "route53Key": os.Getenv("AWS_ACCESS_KEY_ID"),
+ "route53Secret": os.Getenv("AWS_SECRET_ACCESS_KEY"),
+ "route53Region": os.Getenv("AWS_REGION"),
+ "route53Domain": os.Getenv("R53_DOMAIN"),
+ }
+ for _, v := range m {
+ if v == "" {
+ return nil, fmt.Errorf("AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, and R53_DOMAIN are needed to run this test")
+ }
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go
new file mode 100644
index 000000000..ab8739a58
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/route53/route53_test.go
@@ -0,0 +1,87 @@
+package route53
+
+import (
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/route53"
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ route53Secret string
+ route53Key string
+ route53Region string
+)
+
+func init() {
+ route53Key = os.Getenv("AWS_ACCESS_KEY_ID")
+ route53Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
+ route53Region = os.Getenv("AWS_REGION")
+}
+
+func restoreRoute53Env() {
+ os.Setenv("AWS_ACCESS_KEY_ID", route53Key)
+ os.Setenv("AWS_SECRET_ACCESS_KEY", route53Secret)
+ os.Setenv("AWS_REGION", route53Region)
+}
+
+func makeRoute53Provider(ts *httptest.Server) *DNSProvider {
+ config := &aws.Config{
+ Credentials: credentials.NewStaticCredentials("abc", "123", " "),
+ Endpoint: aws.String(ts.URL),
+ Region: aws.String("mock-region"),
+ MaxRetries: aws.Int(1),
+ }
+
+ client := route53.New(session.New(config))
+ return &DNSProvider{client: client}
+}
+
+func TestCredentialsFromEnv(t *testing.T) {
+ os.Setenv("AWS_ACCESS_KEY_ID", "123")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "123")
+ os.Setenv("AWS_REGION", "us-east-1")
+
+ config := &aws.Config{
+ CredentialsChainVerboseErrors: aws.Bool(true),
+ }
+
+ sess := session.New(config)
+ _, err := sess.Config.Credentials.Get()
+ assert.NoError(t, err, "Expected credentials to be set from environment")
+
+ restoreRoute53Env()
+}
+
+func TestRegionFromEnv(t *testing.T) {
+ os.Setenv("AWS_REGION", "us-east-1")
+
+ sess := session.New(aws.NewConfig())
+ assert.Equal(t, "us-east-1", *sess.Config.Region, "Expected Region to be set from environment")
+
+ restoreRoute53Env()
+}
+
+func TestRoute53Present(t *testing.T) {
+ mockResponses := MockResponseMap{
+ "/2013-04-01/hostedzonesbyname": MockResponse{StatusCode: 200, Body: ListHostedZonesByNameResponse},
+ "/2013-04-01/hostedzone/ABCDEFG/rrset/": MockResponse{StatusCode: 200, Body: ChangeResourceRecordSetsResponse},
+ "/2013-04-01/change/123456": MockResponse{StatusCode: 200, Body: GetChangeResponse},
+ }
+
+ ts := newMockServer(t, mockResponses)
+ defer ts.Close()
+
+ provider := makeRoute53Provider(ts)
+
+ domain := "example.com"
+ keyAuth := "123456d=="
+
+ err := provider.Present(domain, "", keyAuth)
+ assert.NoError(t, err, "Expected Present to return no error")
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go b/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go
new file mode 100644
index 000000000..e448a6858
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/route53/testutil_test.go
@@ -0,0 +1,38 @@
+package route53
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+// MockResponse represents a predefined response used by a mock server
+type MockResponse struct {
+ StatusCode int
+ Body string
+}
+
+// MockResponseMap maps request paths to responses
+type MockResponseMap map[string]MockResponse
+
+func newMockServer(t *testing.T, responses MockResponseMap) *httptest.Server {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ path := r.URL.Path
+ resp, ok := responses[path]
+ if !ok {
+ msg := fmt.Sprintf("Requested path not found in response map: %s", path)
+ require.FailNow(t, msg)
+ }
+
+ w.Header().Set("Content-Type", "application/xml")
+ w.WriteHeader(resp.StatusCode)
+ w.Write([]byte(resp.Body))
+ }))
+
+ time.Sleep(100 * time.Millisecond)
+ return ts
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go
new file mode 100644
index 000000000..53804e270
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go
@@ -0,0 +1,127 @@
+// Package vultr implements a DNS provider for solving the DNS-01 challenge using
+// the vultr DNS.
+// See https://www.vultr.com/api/#dns
+package vultr
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ vultr "github.com/JamesClonk/vultr/lib"
+ "github.com/xenolf/lego/acme"
+)
+
+// DNSProvider is an implementation of the acme.ChallengeProvider interface.
+type DNSProvider struct {
+ client *vultr.Client
+}
+
+// NewDNSProvider returns a DNSProvider instance with a configured Vultr client.
+// Authentication uses the VULTR_API_KEY environment variable.
+func NewDNSProvider() (*DNSProvider, error) {
+ apiKey := os.Getenv("VULTR_API_KEY")
+ return NewDNSProviderCredentials(apiKey)
+}
+
+// NewDNSProviderCredentials uses the supplied credentials to return a DNSProvider
+// instance configured for Vultr.
+func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) {
+ if apiKey == "" {
+ return nil, fmt.Errorf("Vultr credentials missing")
+ }
+
+ c := &DNSProvider{
+ client: vultr.NewClient(apiKey, nil),
+ }
+
+ return c, nil
+}
+
+// Present creates a TXT record to fulfil the DNS-01 challenge.
+func (c *DNSProvider) Present(domain, token, keyAuth string) error {
+ fqdn, value, ttl := acme.DNS01Record(domain, keyAuth)
+
+ zoneDomain, err := c.getHostedZone(domain)
+ if err != nil {
+ return err
+ }
+
+ name := c.extractRecordName(fqdn, zoneDomain)
+
+ err = c.client.CreateDnsRecord(zoneDomain, name, "TXT", `"`+value+`"`, 0, ttl)
+ if err != nil {
+ return fmt.Errorf("Vultr API call failed: %v", err)
+ }
+
+ return nil
+}
+
+// CleanUp removes the TXT record matching the specified parameters.
+func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
+ fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
+
+ zoneDomain, records, err := c.findTxtRecords(domain, fqdn)
+ if err != nil {
+ return err
+ }
+
+ for _, rec := range records {
+ err := c.client.DeleteDnsRecord(zoneDomain, rec.RecordID)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *DNSProvider) getHostedZone(domain string) (string, error) {
+ domains, err := c.client.GetDnsDomains()
+ if err != nil {
+ return "", fmt.Errorf("Vultr API call failed: %v", err)
+ }
+
+ var hostedDomain vultr.DnsDomain
+ for _, d := range domains {
+ if strings.HasSuffix(domain, d.Domain) {
+ if len(d.Domain) > len(hostedDomain.Domain) {
+ hostedDomain = d
+ }
+ }
+ }
+ if hostedDomain.Domain == "" {
+ return "", fmt.Errorf("No matching Vultr domain found for domain %s", domain)
+ }
+
+ return hostedDomain.Domain, nil
+}
+
+func (c *DNSProvider) findTxtRecords(domain, fqdn string) (string, []vultr.DnsRecord, error) {
+ zoneDomain, err := c.getHostedZone(domain)
+ if err != nil {
+ return "", nil, err
+ }
+
+ var records []vultr.DnsRecord
+ result, err := c.client.GetDnsRecords(zoneDomain)
+ if err != nil {
+ return "", records, fmt.Errorf("Vultr API call has failed: %v", err)
+ }
+
+ recordName := c.extractRecordName(fqdn, zoneDomain)
+ for _, record := range result {
+ if record.Type == "TXT" && record.Name == recordName {
+ records = append(records, record)
+ }
+ }
+
+ return zoneDomain, records, nil
+}
+
+func (c *DNSProvider) extractRecordName(fqdn, domain string) string {
+ name := acme.UnFqdn(fqdn)
+ if idx := strings.Index(name, "."+domain); idx != -1 {
+ return name[:idx]
+ }
+ return name
+}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go
new file mode 100644
index 000000000..7c8cdaf1e
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr_test.go
@@ -0,0 +1,65 @@
+package vultr
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ liveTest bool
+ apiKey string
+ domain string
+)
+
+func init() {
+ apiKey = os.Getenv("VULTR_API_KEY")
+ domain = os.Getenv("VULTR_TEST_DOMAIN")
+ liveTest = len(apiKey) > 0 && len(domain) > 0
+}
+
+func restoreEnv() {
+ os.Setenv("VULTR_API_KEY", apiKey)
+}
+
+func TestNewDNSProviderValidEnv(t *testing.T) {
+ os.Setenv("VULTR_API_KEY", "123")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.NoError(t, err)
+}
+
+func TestNewDNSProviderMissingCredErr(t *testing.T) {
+ os.Setenv("VULTR_API_KEY", "")
+ defer restoreEnv()
+ _, err := NewDNSProvider()
+ assert.EqualError(t, err, "Vultr credentials missing")
+}
+
+func TestLivePresent(t *testing.T) {
+ if !liveTest {
+ t.Skip("skipping live test")
+ }
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.Present(domain, "", "123d==")
+ assert.NoError(t, err)
+}
+
+func TestLiveCleanUp(t *testing.T) {
+ if !liveTest {
+ t.Skip("skipping live test")
+ }
+
+ time.Sleep(time.Second * 1)
+
+ provider, err := NewDNSProvider()
+ assert.NoError(t, err)
+
+ err = provider.CleanUp(domain, "", "123d==")
+ assert.NoError(t, err)
+}
diff --git a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go
new file mode 100644
index 000000000..4bf211f39
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot.go
@@ -0,0 +1,58 @@
+// Package webroot implements a HTTP provider for solving the HTTP-01 challenge using web server's root path.
+package webroot
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/xenolf/lego/acme"
+)
+
+// HTTPProvider implements ChallengeProvider for `http-01` challenge
+type HTTPProvider struct {
+ path string
+}
+
+// NewHTTPProvider returns a HTTPProvider instance with a configured webroot path
+func NewHTTPProvider(path string) (*HTTPProvider, error) {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return nil, fmt.Errorf("Webroot path does not exist")
+ }
+
+ c := &HTTPProvider{
+ path: path,
+ }
+
+ return c, nil
+}
+
+// Present makes the token available at `HTTP01ChallengePath(token)` by creating a file in the given webroot path
+func (w *HTTPProvider) Present(domain, token, keyAuth string) error {
+ var err error
+
+ challengeFilePath := path.Join(w.path, acme.HTTP01ChallengePath(token))
+ err = os.MkdirAll(path.Dir(challengeFilePath), 0755)
+ if err != nil {
+ return fmt.Errorf("Could not create required directories in webroot for HTTP challenge -> %v", err)
+ }
+
+ err = ioutil.WriteFile(challengeFilePath, []byte(keyAuth), 0644)
+ if err != nil {
+ return fmt.Errorf("Could not write file in webroot for HTTP challenge -> %v", err)
+ }
+
+ return nil
+}
+
+// CleanUp removes the file created for the challenge
+func (w *HTTPProvider) CleanUp(domain, token, keyAuth string) error {
+ var err error
+ err = os.Remove(path.Join(w.path, acme.HTTP01ChallengePath(token)))
+ if err != nil {
+ return fmt.Errorf("Could not remove file in webroot after HTTP challenge -> %v", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go
new file mode 100644
index 000000000..99c930ed3
--- /dev/null
+++ b/vendor/github.com/xenolf/lego/providers/http/webroot/webroot_test.go
@@ -0,0 +1,46 @@
+package webroot
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestHTTPProvider(t *testing.T) {
+ webroot := "webroot"
+ domain := "domain"
+ token := "token"
+ keyAuth := "keyAuth"
+ challengeFilePath := webroot + "/.well-known/acme-challenge/" + token
+
+ os.MkdirAll(webroot+"/.well-known/acme-challenge", 0777)
+ defer os.RemoveAll(webroot)
+
+ provider, err := NewHTTPProvider(webroot)
+ if err != nil {
+ t.Errorf("Webroot provider error: got %v, want nil", err)
+ }
+
+ err = provider.Present(domain, token, keyAuth)
+ if err != nil {
+ t.Errorf("Webroot provider present() error: got %v, want nil", err)
+ }
+
+ if _, err := os.Stat(challengeFilePath); os.IsNotExist(err) {
+ t.Error("Challenge file was not created in webroot")
+ }
+
+ data, err := ioutil.ReadFile(challengeFilePath)
+ if err != nil {
+ t.Errorf("Webroot provider ReadFile() error: got %v, want nil", err)
+ }
+ dataStr := string(data)
+ if dataStr != keyAuth {
+ t.Errorf("Challenge file content: got %q, want %q", dataStr, keyAuth)
+ }
+
+ err = provider.CleanUp(domain, token, keyAuth)
+ if err != nil {
+ t.Errorf("Webroot provider CleanUp() error: got %v, want nil", err)
+ }
+}
diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes
new file mode 100644
index 000000000..d2f212e5d
--- /dev/null
+++ b/vendor/golang.org/x/net/.gitattributes
@@ -0,0 +1,10 @@
+# Treat all files in this repo as binary, with no git magic updating
+# line endings. Windows users contributing to Go will need to use a
+# modern version of git and editors capable of LF line endings.
+#
+# We'll prevent accidental CRLF line endings from entering the repo
+# via the git-review gofmt checks.
+#
+# See golang.org/issue/9281
+
+* -text
diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore
new file mode 100644
index 000000000..8339fd61d
--- /dev/null
+++ b/vendor/golang.org/x/net/.gitignore
@@ -0,0 +1,2 @@
+# Add no patterns to .hgignore except for files generated by the build.
+last-change
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md
new file mode 100644
index 000000000..88dff59bc
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README
new file mode 100644
index 000000000..6b13d8e50
--- /dev/null
+++ b/vendor/golang.org/x/net/README
@@ -0,0 +1,3 @@
+This repository holds supplementary Go networking libraries.
+
+To submit changes to this repository, see http://golang.org/doc/contribute.html.
diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go
new file mode 100644
index 000000000..15e21b181
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/asm.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// Assemble converts insts into raw instructions suitable for loading
+// into a BPF virtual machine.
+//
+// Currently, no optimization is attempted, the assembled program flow
+// is exactly as provided.
+func Assemble(insts []Instruction) ([]RawInstruction, error) {
+ ret := make([]RawInstruction, len(insts))
+ var err error
+ for i, inst := range insts {
+ ret[i], err = inst.Assemble()
+ if err != nil {
+ return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
+ }
+ }
+ return ret, nil
+}
+
+// Disassemble attempts to parse raw back into
+// Instructions. Unrecognized RawInstructions are assumed to be an
+// extension not implemented by this package, and are passed through
+// unchanged to the output. The allDecoded value reports whether insts
+// contains no RawInstructions.
+func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
+ insts = make([]Instruction, len(raw))
+ allDecoded = true
+ for i, r := range raw {
+ insts[i] = r.Disassemble()
+ if _, ok := insts[i].(RawInstruction); ok {
+ allDecoded = false
+ }
+ }
+ return insts, allDecoded
+}
diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go
new file mode 100644
index 000000000..2c8bbab7f
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/constants.go
@@ -0,0 +1,215 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+// A Register is a register of the BPF virtual machine.
+type Register uint16
+
+const (
+ // RegA is the accumulator register. RegA is always the
+ // destination register of ALU operations.
+ RegA Register = iota
+ // RegX is the indirection register, used by LoadIndirect
+ // operations.
+ RegX
+)
+
+// An ALUOp is an arithmetic or logic operation.
+type ALUOp uint16
+
+// ALU binary operation types.
+const (
+ ALUOpAdd ALUOp = iota << 4
+ ALUOpSub
+ ALUOpMul
+ ALUOpDiv
+ ALUOpOr
+ ALUOpAnd
+ ALUOpShiftLeft
+ ALUOpShiftRight
+ aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
+ ALUOpMod
+ ALUOpXor
+)
+
+// A JumpTest is a comparison operator used in conditional jumps.
+type JumpTest uint16
+
+// Supported operators for conditional jumps.
+const (
+ // K == A
+ JumpEqual JumpTest = iota
+ // K != A
+ JumpNotEqual
+ // K > A
+ JumpGreaterThan
+ // K < A
+ JumpLessThan
+ // K >= A
+ JumpGreaterOrEqual
+ // K <= A
+ JumpLessOrEqual
+ // K & A != 0
+ JumpBitsSet
+ // K & A == 0
+ JumpBitsNotSet
+)
+
+// An Extension is a function call provided by the kernel that
+// performs advanced operations that are expensive or impossible
+// within the BPF virtual machine.
+//
+// Extensions are only implemented by the Linux kernel.
+//
+// TODO: should we prune this list? Some of these extensions seem
+// either broken or near-impossible to use correctly, whereas other
+// (len, random, ifindex) are quite useful.
+type Extension int
+
+// Extension functions available in the Linux kernel.
+const (
+ // ExtLen returns the length of the packet.
+ ExtLen Extension = 1
+ // ExtProto returns the packet's L3 protocol type.
+ ExtProto = 0
+ // ExtType returns the packet's type (skb->pkt_type in the kernel)
+ //
+ // TODO: better documentation. How nice an API do we want to
+ // provide for these esoteric extensions?
+ ExtType = 4
+ // ExtPayloadOffset returns the offset of the packet payload, or
+ // the first protocol header that the kernel does not know how to
+ // parse.
+ ExtPayloadOffset = 52
+ // ExtInterfaceIndex returns the index of the interface on which
+ // the packet was received.
+ ExtInterfaceIndex = 8
+ // ExtNetlinkAttr returns the netlink attribute of type X at
+ // offset A.
+ ExtNetlinkAttr = 12
+ // ExtNetlinkAttrNested returns the nested netlink attribute of
+ // type X at offset A.
+ ExtNetlinkAttrNested = 16
+ // ExtMark returns the packet's mark value.
+ ExtMark = 20
+ // ExtQueue returns the packet's assigned hardware queue.
+ ExtQueue = 24
+ // ExtLinkLayerType returns the packet's hardware address type
+ // (e.g. Ethernet, Infiniband).
+ ExtLinkLayerType = 28
+ // ExtRXHash returns the packets receive hash.
+ //
+ // TODO: figure out what this rxhash actually is.
+ ExtRXHash = 32
+ // ExtCPUID returns the ID of the CPU processing the current
+ // packet.
+ ExtCPUID = 36
+ // ExtVLANTag returns the packet's VLAN tag.
+ ExtVLANTag = 44
+ // ExtVLANTagPresent returns non-zero if the packet has a VLAN
+ // tag.
+ //
+ // TODO: I think this might be a lie: it reads bit 0x1000 of the
+ // VLAN header, which changed meaning in recent revisions of the
+ // spec - this extension may now return meaningless information.
+ ExtVLANTagPresent = 48
+ // ExtVLANProto returns 0x8100 if the frame has a VLAN header,
+ // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
+ // other value if no VLAN information is present.
+ ExtVLANProto = 60
+ // ExtRand returns a uniformly random uint32.
+ ExtRand = 56
+)
+
+// The following gives names to various bit patterns used in opcode construction.
+
+const (
+ opMaskCls uint16 = 0x7
+ // opClsLoad masks
+ opMaskLoadDest = 0x01
+ opMaskLoadWidth = 0x18
+ opMaskLoadMode = 0xe0
+ // opClsALU
+ opMaskOperandSrc = 0x08
+ opMaskOperator = 0xf0
+ // opClsJump
+ opMaskJumpConst = 0x0f
+ opMaskJumpCond = 0xf0
+)
+
+const (
+ // +---------------+-----------------+---+---+---+
+ // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
+ // +---------------+-----------------+---+---+---+
+ opClsLoadA uint16 = iota
+ // +---------------+-----------------+---+---+---+
+ // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
+ // +---------------+-----------------+---+---+---+
+ opClsLoadX
+ // +---+---+---+---+---+---+---+---+
+ // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
+ // +---+---+---+---+---+---+---+---+
+ opClsStoreA
+ // +---+---+---+---+---+---+---+---+
+ // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
+ // +---+---+---+---+---+---+---+---+
+ opClsStoreX
+ // +---------------+-----------------+---+---+---+
+ // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
+ // +---------------+-----------------+---+---+---+
+ opClsALU
+ // +-----------------------------+---+---+---+---+
+ // | TestOperator (4b) | 0 | 1 | 0 | 1 |
+ // +-----------------------------+---+---+---+---+
+ opClsJump
+ // +---+-------------------------+---+---+---+---+
+ // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
+ // +---+-------------------------+---+---+---+---+
+ opClsReturn
+ // +---+-------------------------+---+---+---+---+
+ // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
+ // +---+-------------------------+---+---+---+---+
+ opClsMisc
+)
+
+const (
+ opAddrModeImmediate uint16 = iota << 5
+ opAddrModeAbsolute
+ opAddrModeIndirect
+ opAddrModeScratch
+ opAddrModePacketLen // actually an extension, not an addressing mode.
+ opAddrModeMemShift
+)
+
+const (
+ opLoadWidth4 uint16 = iota << 3
+ opLoadWidth2
+ opLoadWidth1
+)
+
+// Operator defined by ALUOp*
+
+const (
+ opALUSrcConstant uint16 = iota << 3
+ opALUSrcX
+)
+
+const (
+ opJumpAlways = iota << 4
+ opJumpEqual
+ opJumpGT
+ opJumpGE
+ opJumpSet
+)
+
+const (
+ opRetSrcConstant uint16 = iota << 4
+ opRetSrcA
+)
+
+const (
+ opMiscTAX = 0x00
+ opMiscTXA = 0x80
+)
diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go
new file mode 100644
index 000000000..ae62feb53
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package bpf implements marshaling and unmarshaling of programs for the
+Berkeley Packet Filter virtual machine, and provides a Go implementation
+of the virtual machine.
+
+BPF's main use is to specify a packet filter for network taps, so that
+the kernel doesn't have to expensively copy every packet it sees to
+userspace. However, it's been repurposed to other areas where running
+user code in-kernel is needed. For example, Linux's seccomp uses BPF
+to apply security policies to system calls. For simplicity, this
+documentation refers only to packets, but other uses of BPF have their
+own data payloads.
+
+BPF programs run in a restricted virtual machine. It has almost no
+access to kernel functions, and while conditional branches are
+allowed, they can only jump forwards, to guarantee that there are no
+infinite loops.
+
+The virtual machine
+
+The BPF VM is an accumulator machine. Its main register, called
+register A, is an implicit source and destination in all arithmetic
+and logic operations. The machine also has 16 scratch registers for
+temporary storage, and an indirection register (register X) for
+indirect memory access. All registers are 32 bits wide.
+
+Each run of a BPF program is given one packet, which is placed in the
+VM's read-only "main memory". LoadAbsolute and LoadIndirect
+instructions can fetch up to 32 bits at a time into register A for
+examination.
+
+The goal of a BPF program is to produce and return a verdict (uint32),
+which tells the kernel what to do with the packet. In the context of
+packet filtering, the returned value is the number of bytes of the
+packet to forward to userspace, or 0 to ignore the packet. Other
+contexts like seccomp define their own return values.
+
+In order to simplify programs, attempts to read past the end of the
+packet terminate the program execution with a verdict of 0 (ignore
+packet). This means that the vast majority of BPF programs don't need
+to do any explicit bounds checking.
+
+In addition to the bytes of the packet, some BPF programs have access
+to extensions, which are essentially calls to kernel utility
+functions. Currently, the only extensions supported by this package
+are the Linux packet filter extensions.
+
+Examples
+
+This packet filter selects all ARP packets.
+
+ bpf.Assemble([]bpf.Instruction{
+ // Load "EtherType" field from the ethernet header.
+ bpf.LoadAbsolute{Off: 12, Size: 2},
+ // Skip over the next instruction if EtherType is not ARP.
+ bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
+ // Verdict is "send up to 4k of the packet to userspace."
+ bpf.RetConstant{Val: 4096},
+ // Verdict is "ignore packet."
+ bpf.RetConstant{Val: 0},
+ })
+
+This packet filter captures a random 1% sample of traffic.
+
+ bpf.Assemble([]bpf.Instruction{
+ // Get a 32-bit random number from the Linux kernel.
+ bpf.LoadExtension{Num: bpf.ExtRand},
+ // 1% dice roll?
+ bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
+ // Capture.
+ bpf.RetConstant{Val: 4096},
+ // Ignore.
+ bpf.RetConstant{Val: 0},
+ })
+
+*/
+package bpf // import "golang.org/x/net/bpf"
diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go
new file mode 100644
index 000000000..68ae6f549
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/instructions.go
@@ -0,0 +1,434 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// An Instruction is one instruction executed by the BPF virtual
+// machine.
+type Instruction interface {
+ // Assemble assembles the Instruction into a RawInstruction.
+ Assemble() (RawInstruction, error)
+}
+
+// A RawInstruction is a raw BPF virtual machine instruction.
+type RawInstruction struct {
+ // Operation to execute.
+ Op uint16
+ // For conditional jump instructions, the number of instructions
+ // to skip if the condition is true/false.
+ Jt uint8
+ Jf uint8
+ // Constant parameter. The meaning depends on the Op.
+ K uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
+
+// Disassemble parses ri into an Instruction and returns it. If ri is
+// not recognized by this package, ri itself is returned.
+func (ri RawInstruction) Disassemble() Instruction {
+ switch ri.Op & opMaskCls {
+ case opClsLoadA, opClsLoadX:
+ reg := Register(ri.Op & opMaskLoadDest)
+ sz := 0
+ switch ri.Op & opMaskLoadWidth {
+ case opLoadWidth4:
+ sz = 4
+ case opLoadWidth2:
+ sz = 2
+ case opLoadWidth1:
+ sz = 1
+ default:
+ return ri
+ }
+ switch ri.Op & opMaskLoadMode {
+ case opAddrModeImmediate:
+ if sz != 4 {
+ return ri
+ }
+ return LoadConstant{Dst: reg, Val: ri.K}
+ case opAddrModeScratch:
+ if sz != 4 || ri.K > 15 {
+ return ri
+ }
+ return LoadScratch{Dst: reg, N: int(ri.K)}
+ case opAddrModeAbsolute:
+ return LoadAbsolute{Size: sz, Off: ri.K}
+ case opAddrModeIndirect:
+ return LoadIndirect{Size: sz, Off: ri.K}
+ case opAddrModePacketLen:
+ if sz != 4 {
+ return ri
+ }
+ return LoadExtension{Num: ExtLen}
+ case opAddrModeMemShift:
+ return LoadMemShift{Off: ri.K}
+ default:
+ return ri
+ }
+
+ case opClsStoreA:
+ if ri.Op != opClsStoreA || ri.K > 15 {
+ return ri
+ }
+ return StoreScratch{Src: RegA, N: int(ri.K)}
+
+ case opClsStoreX:
+ if ri.Op != opClsStoreX || ri.K > 15 {
+ return ri
+ }
+ return StoreScratch{Src: RegX, N: int(ri.K)}
+
+ case opClsALU:
+ switch op := ALUOp(ri.Op & opMaskOperator); op {
+ case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
+ if ri.Op&opMaskOperandSrc != 0 {
+ return ALUOpX{Op: op}
+ }
+ return ALUOpConstant{Op: op, Val: ri.K}
+ case aluOpNeg:
+ return NegateA{}
+ default:
+ return ri
+ }
+
+ case opClsJump:
+ if ri.Op&opMaskJumpConst != opClsJump {
+ return ri
+ }
+ switch ri.Op & opMaskJumpCond {
+ case opJumpAlways:
+ return Jump{Skip: ri.K}
+ case opJumpEqual:
+ return JumpIf{
+ Cond: JumpEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpGT:
+ return JumpIf{
+ Cond: JumpGreaterThan,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpGE:
+ return JumpIf{
+ Cond: JumpGreaterOrEqual,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ case opJumpSet:
+ return JumpIf{
+ Cond: JumpBitsSet,
+ Val: ri.K,
+ SkipTrue: ri.Jt,
+ SkipFalse: ri.Jf,
+ }
+ default:
+ return ri
+ }
+
+ case opClsReturn:
+ switch ri.Op {
+ case opClsReturn | opRetSrcA:
+ return RetA{}
+ case opClsReturn | opRetSrcConstant:
+ return RetConstant{Val: ri.K}
+ default:
+ return ri
+ }
+
+ case opClsMisc:
+ switch ri.Op {
+ case opClsMisc | opMiscTAX:
+ return TAX{}
+ case opClsMisc | opMiscTXA:
+ return TXA{}
+ default:
+ return ri
+ }
+
+ default:
+ panic("unreachable") // switch is exhaustive on the bit pattern
+ }
+}
+
+// LoadConstant loads Val into register Dst.
+type LoadConstant struct {
+ Dst Register
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadConstant) Assemble() (RawInstruction, error) {
+ return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
+}
+
+// LoadScratch loads scratch[N] into register Dst.
+type LoadScratch struct {
+ Dst Register
+ N int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadScratch) Assemble() (RawInstruction, error) {
+ if a.N < 0 || a.N > 15 {
+ return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+ }
+ return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
+}
+
+// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
+// register A.
+type LoadAbsolute struct {
+ Off uint32
+ Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadAbsolute) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
+}
+
+// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
+// into register A.
+type LoadIndirect struct {
+ Off uint32
+ Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadIndirect) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
+}
+
+// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
+// by 4 and stores the result in register X.
+//
+// This instruction is mainly useful to load into X the length of an
+// IPv4 packet header in a single instruction, rather than have to do
+// the arithmetic on the header's first byte by hand.
+type LoadMemShift struct {
+ Off uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadMemShift) Assemble() (RawInstruction, error) {
+ return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
+}
+
+// LoadExtension invokes a linux-specific extension and stores the
+// result in register A.
+type LoadExtension struct {
+ Num Extension
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadExtension) Assemble() (RawInstruction, error) {
+ if a.Num == ExtLen {
+ return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
+ }
+ return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(-0x1000+a.Num))
+}
+
+// StoreScratch stores register Src into scratch[N].
+type StoreScratch struct {
+ Src Register
+ N int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a StoreScratch) Assemble() (RawInstruction, error) {
+ if a.N < 0 || a.N > 15 {
+ return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+ }
+ var op uint16
+ switch a.Src {
+ case RegA:
+ op = opClsStoreA
+ case RegX:
+ op = opClsStoreX
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
+ }
+
+ return RawInstruction{
+ Op: op,
+ K: uint32(a.N),
+ }, nil
+}
+
+// ALUOpConstant executes A = A <Op> Val.
+type ALUOpConstant struct {
+ Op ALUOp
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpConstant) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | opALUSrcConstant | uint16(a.Op),
+ K: a.Val,
+ }, nil
+}
+
+// ALUOpX executes A = A <Op> X
+type ALUOpX struct {
+ Op ALUOp
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpX) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | opALUSrcX | uint16(a.Op),
+ }, nil
+}
+
+// NegateA executes A = -A.
+type NegateA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a NegateA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsALU | uint16(aluOpNeg),
+ }, nil
+}
+
+// Jump skips the following Skip instructions in the program.
+type Jump struct {
+ Skip uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a Jump) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsJump | opJumpAlways,
+ K: a.Skip,
+ }, nil
+}
+
+// JumpIf skips the following Skip instructions in the program if A
+// <Cond> Val is true.
+type JumpIf struct {
+ Cond JumpTest
+ Val uint32
+ SkipTrue uint8
+ SkipFalse uint8
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a JumpIf) Assemble() (RawInstruction, error) {
+ var (
+ cond uint16
+ flip bool
+ )
+ switch a.Cond {
+ case JumpEqual:
+ cond = opJumpEqual
+ case JumpNotEqual:
+ cond, flip = opJumpEqual, true
+ case JumpGreaterThan:
+ cond = opJumpGT
+ case JumpLessThan:
+ cond, flip = opJumpGE, true
+ case JumpGreaterOrEqual:
+ cond = opJumpGE
+ case JumpLessOrEqual:
+ cond, flip = opJumpGT, true
+ case JumpBitsSet:
+ cond = opJumpSet
+ case JumpBitsNotSet:
+ cond, flip = opJumpSet, true
+ default:
+ return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond)
+ }
+ jt, jf := a.SkipTrue, a.SkipFalse
+ if flip {
+ jt, jf = jf, jt
+ }
+ return RawInstruction{
+ Op: opClsJump | cond,
+ Jt: jt,
+ Jf: jf,
+ K: a.Val,
+ }, nil
+}
+
+// RetA exits the BPF program, returning the value of register A.
+type RetA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsReturn | opRetSrcA,
+ }, nil
+}
+
+// RetConstant exits the BPF program, returning a constant value.
+type RetConstant struct {
+ Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetConstant) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsReturn | opRetSrcConstant,
+ K: a.Val,
+ }, nil
+}
+
+// TXA copies the value of register X to register A.
+type TXA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TXA) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsMisc | opMiscTXA,
+ }, nil
+}
+
+// TAX copies the value of register A to register X.
+type TAX struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TAX) Assemble() (RawInstruction, error) {
+ return RawInstruction{
+ Op: opClsMisc | opMiscTAX,
+ }, nil
+}
+
+func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
+ var (
+ cls uint16
+ sz uint16
+ )
+ switch dst {
+ case RegA:
+ cls = opClsLoadA
+ case RegX:
+ cls = opClsLoadX
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
+ }
+ switch loadSize {
+ case 1:
+ sz = opLoadWidth1
+ case 2:
+ sz = opLoadWidth2
+ case 4:
+ sz = opLoadWidth4
+ default:
+ return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
+ }
+ return RawInstruction{
+ Op: cls | sz | mode,
+ K: k,
+ }, nil
+}
diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go
new file mode 100644
index 000000000..833d1e175
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/instructions_test.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "io/ioutil"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+// This is a direct translation of the program in
+// testdata/all_instructions.txt.
+var allInstructions = []Instruction{
+ LoadConstant{Dst: RegA, Val: 42},
+ LoadConstant{Dst: RegX, Val: 42},
+
+ LoadScratch{Dst: RegA, N: 3},
+ LoadScratch{Dst: RegX, N: 3},
+
+ LoadAbsolute{Off: 42, Size: 1},
+ LoadAbsolute{Off: 42, Size: 2},
+ LoadAbsolute{Off: 42, Size: 4},
+
+ LoadIndirect{Off: 42, Size: 1},
+ LoadIndirect{Off: 42, Size: 2},
+ LoadIndirect{Off: 42, Size: 4},
+
+ LoadMemShift{Off: 42},
+
+ LoadExtension{Num: ExtLen},
+ LoadExtension{Num: ExtProto},
+ LoadExtension{Num: ExtType},
+ LoadExtension{Num: ExtRand},
+
+ StoreScratch{Src: RegA, N: 3},
+ StoreScratch{Src: RegX, N: 3},
+
+ ALUOpConstant{Op: ALUOpAdd, Val: 42},
+ ALUOpConstant{Op: ALUOpSub, Val: 42},
+ ALUOpConstant{Op: ALUOpMul, Val: 42},
+ ALUOpConstant{Op: ALUOpDiv, Val: 42},
+ ALUOpConstant{Op: ALUOpOr, Val: 42},
+ ALUOpConstant{Op: ALUOpAnd, Val: 42},
+ ALUOpConstant{Op: ALUOpShiftLeft, Val: 42},
+ ALUOpConstant{Op: ALUOpShiftRight, Val: 42},
+ ALUOpConstant{Op: ALUOpMod, Val: 42},
+ ALUOpConstant{Op: ALUOpXor, Val: 42},
+
+ ALUOpX{Op: ALUOpAdd},
+ ALUOpX{Op: ALUOpSub},
+ ALUOpX{Op: ALUOpMul},
+ ALUOpX{Op: ALUOpDiv},
+ ALUOpX{Op: ALUOpOr},
+ ALUOpX{Op: ALUOpAnd},
+ ALUOpX{Op: ALUOpShiftLeft},
+ ALUOpX{Op: ALUOpShiftRight},
+ ALUOpX{Op: ALUOpMod},
+ ALUOpX{Op: ALUOpXor},
+
+ NegateA{},
+
+ Jump{Skip: 10},
+ JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},
+ JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},
+ JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},
+ JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},
+ JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},
+ JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},
+ JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},
+
+ TAX{},
+ TXA{},
+
+ RetA{},
+ RetConstant{Val: 42},
+}
+var allInstructionsExpected = "testdata/all_instructions.bpf"
+
+// Check that we produce the same output as the canonical bpf_asm
+// linux kernel tool.
+func TestInterop(t *testing.T) {
+ out, err := Assemble(allInstructions)
+ if err != nil {
+ t.Fatalf("assembly of allInstructions program failed: %s", err)
+ }
+ t.Logf("Assembled program is %d instructions long", len(out))
+
+ bs, err := ioutil.ReadFile(allInstructionsExpected)
+ if err != nil {
+ t.Fatalf("reading %s: %s", allInstructionsExpected, err)
+ }
+ // First statement is the number of statements, last statement is
+ // empty. We just ignore both and rely on slice length.
+ stmts := strings.Split(string(bs), ",")
+ if len(stmts)-2 != len(out) {
+ t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions))
+ }
+
+ for i, stmt := range stmts[1 : len(stmts)-2] {
+ nums := strings.Split(stmt, " ")
+ if len(nums) != 4 {
+ t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt)
+ }
+
+ actual := out[i]
+
+ op, err := strconv.ParseUint(nums[0], 10, 16)
+ if err != nil {
+ t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected)
+ }
+ if actual.Op != uint16(op) {
+ t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op)
+ }
+
+ jt, err := strconv.ParseUint(nums[1], 10, 8)
+ if err != nil {
+ t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected)
+ }
+ if actual.Jt != uint8(jt) {
+ t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt)
+ }
+
+ jf, err := strconv.ParseUint(nums[2], 10, 8)
+ if err != nil {
+ t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected)
+ }
+ if actual.Jf != uint8(jf) {
+ t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf)
+ }
+
+ k, err := strconv.ParseUint(nums[3], 10, 32)
+ if err != nil {
+ t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected)
+ }
+ if actual.K != uint32(k) {
+ t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k)
+ }
+ }
+}
+
+// Check that assembly and disassembly match each other.
+//
+// Because we offer "fake" jump conditions that don't appear in the
+// machine code, disassembly won't be a 1:1 match with the original
+// source, although the behavior will be identical. However,
+// reassembling the disassembly should produce an identical program.
+func TestAsmDisasm(t *testing.T) {
+ prog1, err := Assemble(allInstructions)
+ if err != nil {
+ t.Fatalf("assembly of allInstructions program failed: %s", err)
+ }
+ t.Logf("Assembled program is %d instructions long", len(prog1))
+
+ src, allDecoded := Disassemble(prog1)
+ if !allDecoded {
+ t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:")
+ for i, inst := range src {
+ if r, ok := inst.(RawInstruction); ok {
+ t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r)
+ }
+ }
+ }
+
+ prog2, err := Assemble(src)
+ if err != nil {
+ t.Fatalf("assembly of Disassemble(Assemble(allInstructions)) failed: %s", err)
+ }
+
+ if len(prog2) != len(prog1) {
+ t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(prog1), len(prog2))
+ }
+ if !reflect.DeepEqual(prog1, prog2) {
+ t.Errorf("program mutated by disassembly:")
+ for i := range prog2 {
+ if !reflect.DeepEqual(prog1[i], prog2[i]) {
+ t.Logf(" insn %d, s: %#v, p1: %#v, p2: %#v", i+1, allInstructions[i], prog1[i], prog2[i])
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
new file mode 100644
index 000000000..f87144064
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
@@ -0,0 +1 @@
+50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0,
diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
new file mode 100644
index 000000000..304550155
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
@@ -0,0 +1,79 @@
+# This filter is compiled to all_instructions.bpf by the `bpf_asm`
+# tool, which can be found in the linux kernel source tree under
+# tools/net.
+
+# Load immediate
+ld #42
+ldx #42
+
+# Load scratch
+ld M[3]
+ldx M[3]
+
+# Load absolute
+ldb [42]
+ldh [42]
+ld [42]
+
+# Load indirect
+ldb [x + 42]
+ldh [x + 42]
+ld [x + 42]
+
+# Load IPv4 header length
+ldx 4*([42]&0xf)
+
+# Run extension function
+ld #len
+ld #proto
+ld #type
+ld #rand
+
+# Store scratch
+st M[3]
+stx M[3]
+
+# A <op> constant
+add #42
+sub #42
+mul #42
+div #42
+or #42
+and #42
+lsh #42
+rsh #42
+mod #42
+xor #42
+
+# A <op> X
+add x
+sub x
+mul x
+div x
+or x
+and x
+lsh x
+rsh x
+mod x
+xor x
+
+# !A
+neg
+
+# Jumps
+ja end
+jeq #42,prev,end
+jne #42,end
+jlt #42,end
+jle #42,end
+jgt #42,prev,end
+jge #42,prev,end
+jset #42,prev,end
+
+# Register transfers
+tax
+txa
+
+# Returns
+prev: ret a
+end: ret #42
diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go
new file mode 100644
index 000000000..4c656f1e1
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "errors"
+ "fmt"
+)
+
+// A VM is an emulated BPF virtual machine.
+type VM struct {
+ filter []Instruction
+}
+
+// NewVM returns a new VM using the input BPF program.
+func NewVM(filter []Instruction) (*VM, error) {
+ if len(filter) == 0 {
+ return nil, errors.New("one or more Instructions must be specified")
+ }
+
+ for i, ins := range filter {
+ check := len(filter) - (i + 1)
+ switch ins := ins.(type) {
+ // Check for out-of-bounds jumps in instructions
+ case Jump:
+ if check <= int(ins.Skip) {
+ return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
+ }
+ case JumpIf:
+ if check <= int(ins.SkipTrue) {
+ return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
+ }
+ if check <= int(ins.SkipFalse) {
+ return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
+ }
+ // Check for division or modulus by zero
+ case ALUOpConstant:
+ if ins.Val != 0 {
+ break
+ }
+
+ switch ins.Op {
+ case ALUOpDiv, ALUOpMod:
+ return nil, errors.New("cannot divide by zero using ALUOpConstant")
+ }
+ // Check for unknown extensions
+ case LoadExtension:
+ switch ins.Num {
+ case ExtLen:
+ default:
+ return nil, fmt.Errorf("extension %d not implemented", ins.Num)
+ }
+ }
+ }
+
+ // Make sure last instruction is a return instruction
+ switch filter[len(filter)-1].(type) {
+ case RetA, RetConstant:
+ default:
+ return nil, errors.New("BPF program must end with RetA or RetConstant")
+ }
+
+ // Though our VM works using disassembled instructions, we
+ // attempt to assemble the input filter anyway to ensure it is compatible
+ // with an operating system VM.
+ _, err := Assemble(filter)
+
+ return &VM{
+ filter: filter,
+ }, err
+}
+
+// Run runs the VM's BPF program against the input bytes.
+// Run returns the number of bytes accepted by the BPF program, and any errors
+// which occurred while processing the program.
+func (v *VM) Run(in []byte) (int, error) {
+ var (
+ // Registers of the virtual machine
+ regA uint32
+ regX uint32
+ regScratch [16]uint32
+
+ // OK is true if the program should continue processing the next
+ // instruction, or false if not, causing the loop to break
+ ok = true
+ )
+
+ // TODO(mdlayher): implement:
+ // - NegateA:
+ // - would require a change from uint32 registers to int32
+ // registers
+
+ // TODO(mdlayher): add interop tests that check signedness of ALU
+ // operations against kernel implementation, and make sure Go
+ // implementation matches behavior
+
+ for i := 0; i < len(v.filter) && ok; i++ {
+ ins := v.filter[i]
+
+ switch ins := ins.(type) {
+ case ALUOpConstant:
+ regA = aluOpConstant(ins, regA)
+ case ALUOpX:
+ regA, ok = aluOpX(ins, regA, regX)
+ case Jump:
+ i += int(ins.Skip)
+ case JumpIf:
+ jump := jumpIf(ins, regA)
+ i += jump
+ case LoadAbsolute:
+ regA, ok = loadAbsolute(ins, in)
+ case LoadConstant:
+ regA, regX = loadConstant(ins, regA, regX)
+ case LoadExtension:
+ regA = loadExtension(ins, in)
+ case LoadIndirect:
+ regA, ok = loadIndirect(ins, in, regX)
+ case LoadMemShift:
+ regX, ok = loadMemShift(ins, in)
+ case LoadScratch:
+ regA, regX = loadScratch(ins, regScratch, regA, regX)
+ case RetA:
+ return int(regA), nil
+ case RetConstant:
+ return int(ins.Val), nil
+ case StoreScratch:
+ regScratch = storeScratch(ins, regScratch, regA, regX)
+ case TAX:
+ regX = regA
+ case TXA:
+ regA = regX
+ default:
+ return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
+ }
+ }
+
+ return 0, nil
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/golang.org/x/net/bpf/vm_aluop_test.go
new file mode 100644
index 000000000..16678244a
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_aluop_test.go
@@ -0,0 +1,512 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMALUOpAdd(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAdd,
+ Val: 3,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 8, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 3, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpSub(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ bpf.ALUOpX{
+ Op: bpf.ALUOpSub,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpMul(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMul,
+ Val: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 6, 2, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpDiv(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpDiv,
+ Val: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 20, 2, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpDiv,
+ Val: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot divide by zero using ALUOpConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMALUOpDivByZeroALUOpX(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 0 into X
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ // Load byte 1 into A
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Attempt to perform 1/0
+ bpf.ALUOpX{
+ Op: bpf.ALUOpDiv,
+ },
+ // Return 4 bytes if program does not terminate
+ bpf.LoadConstant{
+ Val: 12,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpOr(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpOr,
+ Val: 0x01,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x10, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0xff,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 9, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpAnd(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAnd,
+ Val: 0x0019,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xaa, 0x09,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpShiftLeft(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpShiftLeft,
+ Val: 0x01,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x02,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0xaa,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpShiftRight(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpShiftRight,
+ Val: 0x01,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x04,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x08, 0xff, 0xff,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpMod(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMod,
+ Val: 20,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 30, 0, 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpModByZeroALUOpConstant(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpMod,
+ Val: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot divide by zero using ALUOpConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMALUOpModByZeroALUOpX(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 0 into X
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.TAX{},
+ // Load byte 1 into A
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Attempt to perform 1%0
+ bpf.ALUOpX{
+ Op: bpf.ALUOpMod,
+ },
+ // Return 4 bytes if program does not terminate
+ bpf.LoadConstant{
+ Val: 12,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 3, 4,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpXor(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpXor,
+ Val: 0x0a,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x01,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x00, 0x00, 0x00,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMALUOpUnknown(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.ALUOpConstant{
+ Op: bpf.ALUOpAdd,
+ Val: 1,
+ },
+ // Verify that an unknown operation is a no-op
+ bpf.ALUOpConstant{
+ Op: 100,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 0x02,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go
new file mode 100644
index 000000000..426362361
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go
@@ -0,0 +1,192 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv4"
+)
+
+// A virtualMachine is a BPF virtual machine which can process an
+// input packet against a BPF program and render a verdict.
+type virtualMachine interface {
+ Run(in []byte) (int, error)
+}
+
+// canUseOSVM indicates if the OS BPF VM is available on this platform.
+func canUseOSVM() bool {
+ // OS BPF VM can only be used on platforms where x/net/ipv4 supports
+ // attaching a BPF program to a socket.
+ switch runtime.GOOS {
+ case "linux":
+ return true
+ }
+
+ return false
+}
+
+// All BPF tests against both the Go VM and OS VM are assumed to
+// be used with a UDP socket. As a result, the entire contents
+// of a UDP datagram is sent through the BPF program, but only
+// the body after the UDP header will ever be returned in output.
+
+// testVM sets up a Go BPF VM, and if available, a native OS BPF VM
+// for integration testing.
+func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) {
+ goVM, err := bpf.NewVM(filter)
+ if err != nil {
+ // Some tests expect an error, so this error must be returned
+ // instead of fatally exiting the test
+ return nil, nil, err
+ }
+
+ mvm := &multiVirtualMachine{
+ goVM: goVM,
+
+ t: t,
+ }
+
+ // If available, add the OS VM for tests which verify that both the Go
+ // VM and OS VM have exactly the same output for the same input program
+ // and packet.
+ done := func() {}
+ if canUseOSVM() {
+ osVM, osVMDone := testOSVM(t, filter)
+ done = func() { osVMDone() }
+ mvm.osVM = osVM
+ }
+
+ return mvm, done, nil
+}
+
+// udpHeaderLen is the length of a UDP header.
+const udpHeaderLen = 8
+
+// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM
+// and the native OS VM, if the OS VM is available.
+type multiVirtualMachine struct {
+ goVM virtualMachine
+ osVM virtualMachine
+
+ t *testing.T
+}
+
+func (mvm *multiVirtualMachine) Run(in []byte) (int, error) {
+ if len(in) < udpHeaderLen {
+ mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d",
+ udpHeaderLen, len(in))
+ }
+
+ // All tests have a UDP header as part of input, because the OS VM
+ // packets always will. For the Go VM, this output is trimmed before
+ // being sent back to tests.
+ goOut, goErr := mvm.goVM.Run(in)
+ if goOut >= udpHeaderLen {
+ goOut -= udpHeaderLen
+ }
+
+ // If Go output is larger than the size of the packet, packet filtering
+ // interop tests must trim the output bytes to the length of the packet.
+ // The BPF VM should not do this on its own, as other uses of it do
+ // not trim the output byte count.
+ trim := len(in) - udpHeaderLen
+ if goOut > trim {
+ goOut = trim
+ }
+
+ // When the OS VM is not available, process using the Go VM alone
+ if mvm.osVM == nil {
+ return goOut, goErr
+ }
+
+ // The OS VM will apply its own UDP header, so remove the pseudo header
+ // that the Go VM needs.
+ osOut, err := mvm.osVM.Run(in[udpHeaderLen:])
+ if err != nil {
+ mvm.t.Fatalf("error while running OS VM: %v", err)
+ }
+
+ // Verify both VMs return same number of bytes
+ var mismatch bool
+ if goOut != osOut {
+ mismatch = true
+ mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut)
+ }
+
+ if mismatch {
+ mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match")
+ }
+
+ return goOut, goErr
+}
+
+// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for
+// processing BPF programs.
+type osVirtualMachine struct {
+ l net.PacketConn
+ s net.Conn
+}
+
+// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting
+// packets into a UDP listener with a BPF program attached to it.
+func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) {
+ l, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("failed to open OS VM UDP listener: %v", err)
+ }
+
+ prog, err := bpf.Assemble(filter)
+ if err != nil {
+ t.Fatalf("failed to compile BPF program: %v", err)
+ }
+
+ p := ipv4.NewPacketConn(l)
+ if err = p.SetBPF(prog); err != nil {
+ t.Fatalf("failed to attach BPF program to listener: %v", err)
+ }
+
+ s, err := net.Dial("udp4", l.LocalAddr().String())
+ if err != nil {
+ t.Fatalf("failed to dial connection to listener: %v", err)
+ }
+
+ done := func() {
+ _ = s.Close()
+ _ = l.Close()
+ }
+
+ return &osVirtualMachine{
+ l: l,
+ s: s,
+ }, done
+}
+
+// Run sends the input bytes into the OS's BPF VM and returns its verdict.
+func (vm *osVirtualMachine) Run(in []byte) (int, error) {
+ go func() {
+ _, _ = vm.s.Write(in)
+ }()
+
+ vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond))
+
+ var b [512]byte
+ n, _, err := vm.l.ReadFrom(b[:])
+ if err != nil {
+ // A timeout indicates that BPF filtered out the packet, and thus,
+ // no input should be returned.
+ if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+ return n, nil
+ }
+
+ return n, err
+ }
+
+ return n, nil
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/golang.org/x/net/bpf/vm_extension_test.go
new file mode 100644
index 000000000..7a48c82f3
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_extension_test.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMLoadExtensionNotImplemented(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadExtension{
+ Num: 100,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "extension 100 not implemented" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadExtensionExtLen(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadExtension{
+ Num: bpf.ExtLen,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go
new file mode 100644
index 000000000..516f9462b
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_instructions.go
@@ -0,0 +1,174 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
+ return aluOpCommon(ins.Op, regA, ins.Val)
+}
+
+func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
+ // Guard against division or modulus by zero by terminating
+ // the program, as the OS BPF VM does
+ if regX == 0 {
+ switch ins.Op {
+ case ALUOpDiv, ALUOpMod:
+ return 0, false
+ }
+ }
+
+ return aluOpCommon(ins.Op, regA, regX), true
+}
+
+func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
+ switch op {
+ case ALUOpAdd:
+ return regA + value
+ case ALUOpSub:
+ return regA - value
+ case ALUOpMul:
+ return regA * value
+ case ALUOpDiv:
+ // Division by zero not permitted by NewVM and aluOpX checks
+ return regA / value
+ case ALUOpOr:
+ return regA | value
+ case ALUOpAnd:
+ return regA & value
+ case ALUOpShiftLeft:
+ return regA << value
+ case ALUOpShiftRight:
+ return regA >> value
+ case ALUOpMod:
+ // Modulus by zero not permitted by NewVM and aluOpX checks
+ return regA % value
+ case ALUOpXor:
+ return regA ^ value
+ default:
+ return regA
+ }
+}
+
+func jumpIf(ins JumpIf, value uint32) int {
+ var ok bool
+ inV := uint32(ins.Val)
+
+ switch ins.Cond {
+ case JumpEqual:
+ ok = value == inV
+ case JumpNotEqual:
+ ok = value != inV
+ case JumpGreaterThan:
+ ok = value > inV
+ case JumpLessThan:
+ ok = value < inV
+ case JumpGreaterOrEqual:
+ ok = value >= inV
+ case JumpLessOrEqual:
+ ok = value <= inV
+ case JumpBitsSet:
+ ok = (value & inV) != 0
+ case JumpBitsNotSet:
+ ok = (value & inV) == 0
+ }
+
+ if ok {
+ return int(ins.SkipTrue)
+ }
+
+ return int(ins.SkipFalse)
+}
+
+func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
+ offset := int(ins.Off)
+ size := int(ins.Size)
+
+ return loadCommon(in, offset, size)
+}
+
+func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
+ switch ins.Dst {
+ case RegA:
+ regA = ins.Val
+ case RegX:
+ regX = ins.Val
+ }
+
+ return regA, regX
+}
+
+func loadExtension(ins LoadExtension, in []byte) uint32 {
+ switch ins.Num {
+ case ExtLen:
+ return uint32(len(in))
+ default:
+ panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
+ }
+}
+
+func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
+ offset := int(ins.Off) + int(regX)
+ size := int(ins.Size)
+
+ return loadCommon(in, offset, size)
+}
+
+func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
+ offset := int(ins.Off)
+
+ if !inBounds(len(in), offset, 0) {
+ return 0, false
+ }
+
+ // Mask off high 4 bits and multiply low 4 bits by 4
+ return uint32(in[offset]&0x0f) * 4, true
+}
+
+func inBounds(inLen int, offset int, size int) bool {
+ return offset+size <= inLen
+}
+
+func loadCommon(in []byte, offset int, size int) (uint32, bool) {
+ if !inBounds(len(in), offset, size) {
+ return 0, false
+ }
+
+ switch size {
+ case 1:
+ return uint32(in[offset]), true
+ case 2:
+ return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
+ case 4:
+ return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
+ default:
+ panic(fmt.Sprintf("invalid load size: %d", size))
+ }
+}
+
+func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
+ switch ins.Dst {
+ case RegA:
+ regA = regScratch[ins.N]
+ case RegX:
+ regX = regScratch[ins.N]
+ }
+
+ return regA, regX
+}
+
+func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
+ switch ins.Src {
+ case RegA:
+ regScratch[ins.N] = regA
+ case RegX:
+ regScratch[ins.N] = regX
+ }
+
+ return regScratch
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/golang.org/x/net/bpf/vm_jump_test.go
new file mode 100644
index 000000000..e0a3a988b
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_jump_test.go
@@ -0,0 +1,380 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMJumpOne(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.Jump{
+ Skip: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.Jump{
+ Skip: 1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfTrueOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ SkipTrue: 2,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfFalseOutOfProgram(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ SkipFalse: 3,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMJumpIfEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 1,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfNotEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpNotEqual,
+ Val: 1,
+ SkipFalse: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfGreaterThan(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpGreaterThan,
+ Val: 0x00010202,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfLessThan(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpLessThan,
+ Val: 0xff010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfGreaterOrEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpGreaterOrEqual,
+ Val: 0x00010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfLessOrEqual(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 4,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpLessOrEqual,
+ Val: 0xff010203,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 12,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 4, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfBitsSet(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpBitsSet,
+ Val: 0x1122,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x02,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMJumpIfBitsNotSet(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.JumpIf{
+ Cond: bpf.JumpBitsNotSet,
+ Val: 0x1221,
+ SkipTrue: 1,
+ },
+ bpf.RetConstant{
+ Val: 0,
+ },
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x02,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go
new file mode 100644
index 000000000..04578b66b
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_load_test.go
@@ -0,0 +1,246 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "net"
+ "testing"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv4"
+)
+
+func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 100,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1, 2, 3,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Size: 5,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid load byte length 0" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadConstantOK(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegX,
+ Val: 9,
+ },
+ bpf.TXA{},
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadIndirectOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadIndirect{
+ Off: 100,
+ Size: 1,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadMemShiftOutOfBounds(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadMemShift{
+ Off: 100,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+const (
+ dhcp4Port = 53
+)
+
+func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) {
+ vm, in, done := testDHCPv4(t)
+ defer done()
+
+ // Append mostly empty UDP header with incorrect DHCPv4 port
+ in = append(in, []byte{
+ 0, 0,
+ 0, dhcp4Port + 1,
+ 0, 0,
+ 0, 0,
+ }...)
+
+ out, err := vm.Run(in)
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 0, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) {
+ vm, in, done := testDHCPv4(t)
+ defer done()
+
+ // Append mostly empty UDP header with correct DHCPv4 port
+ in = append(in, []byte{
+ 0, 0,
+ 0, dhcp4Port,
+ 0, 0,
+ 0, 0,
+ }...)
+
+ out, err := vm.Run(in)
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := len(in)-8, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) {
+ // DHCPv4 test data courtesy of David Anderson:
+ // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load IPv4 packet length
+ bpf.LoadMemShift{Off: 8},
+ // Get UDP dport
+ bpf.LoadIndirect{Off: 8 + 2, Size: 2},
+ // Correct dport?
+ bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1},
+ // Accept
+ bpf.RetConstant{Val: 1500},
+ // Ignore
+ bpf.RetConstant{Val: 0},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+
+ // Minimal requirements to make a valid IPv4 header
+ h := &ipv4.Header{
+ Len: ipv4.HeaderLen,
+ Src: net.IPv4(192, 168, 1, 1),
+ Dst: net.IPv4(192, 168, 1, 2),
+ }
+ hb, err := h.Marshal()
+ if err != nil {
+ t.Fatalf("failed to marshal IPv4 header: %v", err)
+ }
+
+ hb = append([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ }, hb...)
+
+ return vm, hb, done
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/golang.org/x/net/bpf/vm_ret_test.go
new file mode 100644
index 000000000..2d86eae3e
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_ret_test.go
@@ -0,0 +1,115 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMRetA(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 9,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetALargerThanInput(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 2,
+ },
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 255,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetConstant(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.RetConstant{
+ Val: 9,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 1, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMRetConstantLargerThanInput(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.RetConstant{
+ Val: 16,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/golang.org/x/net/bpf/vm_scratch_test.go
new file mode 100644
index 000000000..e600e3c28
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_scratch_test.go
@@ -0,0 +1,247 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: -1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 16,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchUnknownSourceRegister(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.StoreScratch{
+ Src: 100,
+ N: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid source register 100" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: -1,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: 16,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadScratch{
+ Dst: 100,
+ N: 0,
+ },
+ bpf.RetA{},
+ })
+ if errStr(err) != "assembling instruction 1: invalid target register 100" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMStoreScratchLoadScratchOneValue(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 255
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ // Copy to X and store in scratch[0]
+ bpf.TAX{},
+ bpf.StoreScratch{
+ Src: bpf.RegX,
+ N: 0,
+ },
+ // Load byte 1
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Overwrite 1 with 255 from scratch[0]
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 0,
+ },
+ // Return 255
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 255, 1, 2,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 3, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
+
+func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ // Load byte 10
+ bpf.LoadAbsolute{
+ Off: 8,
+ Size: 1,
+ },
+ // Store in scratch[0]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 0,
+ },
+ // Load byte 20
+ bpf.LoadAbsolute{
+ Off: 9,
+ Size: 1,
+ },
+ // Store in scratch[1]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 1,
+ },
+ // Load byte 30
+ bpf.LoadAbsolute{
+ Off: 10,
+ Size: 1,
+ },
+ // Store in scratch[2]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 2,
+ },
+ // Load byte 1
+ bpf.LoadAbsolute{
+ Off: 11,
+ Size: 1,
+ },
+ // Store in scratch[3]
+ bpf.StoreScratch{
+ Src: bpf.RegA,
+ N: 3,
+ },
+ // Load in byte 10 to X
+ bpf.LoadScratch{
+ Dst: bpf.RegX,
+ N: 0,
+ },
+ // Copy X -> A
+ bpf.TXA{},
+ // Verify value is 10
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 10,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Load in byte 20 to A
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 1,
+ },
+ // Verify value is 20
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 20,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Load in byte 30 to A
+ bpf.LoadScratch{
+ Dst: bpf.RegA,
+ N: 2,
+ },
+ // Verify value is 30
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: 30,
+ SkipTrue: 1,
+ },
+ // Fail test if incorrect
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // Return first two bytes on success
+ bpf.RetConstant{
+ Val: 10,
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to load BPF program: %v", err)
+ }
+ defer done()
+
+ out, err := vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 10, 20, 30, 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+ if want, got := 2, out; want != got {
+ t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
+ want, got)
+ }
+}
diff --git a/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/golang.org/x/net/bpf/vm_test.go
new file mode 100644
index 000000000..6bd4dd5c3
--- /dev/null
+++ b/vendor/golang.org/x/net/bpf/vm_test.go
@@ -0,0 +1,144 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/net/bpf"
+)
+
+var _ bpf.Instruction = unknown{}
+
+type unknown struct{}
+
+func (unknown) Assemble() (bpf.RawInstruction, error) {
+ return bpf.RawInstruction{}, nil
+}
+
+func TestVMUnknownInstruction(t *testing.T) {
+ vm, done, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegA,
+ Val: 100,
+ },
+ // Should terminate the program with an error immediately
+ unknown{},
+ bpf.RetA{},
+ })
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ defer done()
+
+ _, err = vm.Run([]byte{
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00,
+ })
+ if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" {
+ t.Fatalf("unexpected error while running program: %v", err)
+ }
+}
+
+func TestVMNoReturnInstruction(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{
+ bpf.LoadConstant{
+ Dst: bpf.RegA,
+ Val: 1,
+ },
+ })
+ if errStr(err) != "BPF program must end with RetA or RetConstant" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func TestVMNoInputInstructions(t *testing.T) {
+ _, _, err := testVM(t, []bpf.Instruction{})
+ if errStr(err) != "one or more Instructions must be specified" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame
+// as input and checking its EtherType to determine if it should be accepted.
+func ExampleNewVM() {
+ // Offset | Length | Comment
+ // -------------------------
+ // 00 | 06 | Ethernet destination MAC address
+ // 06 | 06 | Ethernet source MAC address
+ // 12 | 02 | Ethernet EtherType
+ const (
+ etOff = 12
+ etLen = 2
+
+ etARP = 0x0806
+ )
+
+ // Set up a VM to filter traffic based on if its EtherType
+ // matches the ARP EtherType.
+ vm, err := bpf.NewVM([]bpf.Instruction{
+ // Load EtherType value from Ethernet header
+ bpf.LoadAbsolute{
+ Off: etOff,
+ Size: etLen,
+ },
+ // If EtherType is equal to the ARP EtherType, jump to allow
+ // packet to be accepted
+ bpf.JumpIf{
+ Cond: bpf.JumpEqual,
+ Val: etARP,
+ SkipTrue: 1,
+ },
+ // EtherType does not match the ARP EtherType
+ bpf.RetConstant{
+ Val: 0,
+ },
+ // EtherType matches the ARP EtherType, accept up to 1500
+ // bytes of packet
+ bpf.RetConstant{
+ Val: 1500,
+ },
+ })
+ if err != nil {
+ panic(fmt.Sprintf("failed to load BPF program: %v", err))
+ }
+
+ // Create an Ethernet frame with the ARP EtherType for testing
+ frame := []byte{
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55,
+ 0x08, 0x06,
+ // Payload omitted for brevity
+ }
+
+ // Run our VM's BPF program using the Ethernet frame as input
+ out, err := vm.Run(frame)
+ if err != nil {
+ panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err))
+ }
+
+ // BPF VM can return a byte count greater than the number of input
+ // bytes, so trim the output to match the input byte length
+ if out > len(frame) {
+ out = len(frame)
+ }
+
+ fmt.Printf("out: %d bytes", out)
+
+ // Output:
+ // out: 14 bytes
+}
+
+// errStr returns the string representation of an error, or
+// "<nil>" if it is nil.
+func errStr(err error) string {
+ if err == nil {
+ return "<nil>"
+ }
+
+ return err.Error()
+}
diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg
new file mode 100644
index 000000000..3f8b14b64
--- /dev/null
+++ b/vendor/golang.org/x/net/codereview.cfg
@@ -0,0 +1 @@
+issuerepo: golang/go
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 000000000..134654cf7
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,156 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go
new file mode 100644
index 000000000..9554dcf71
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context_test.go
@@ -0,0 +1,577 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// otherContext is a Context that's not one of the types defined in context.go.
+// This lets us test code paths that differ based on the underlying type of the
+// Context.
+type otherContext struct {
+ Context
+}
+
+func TestBackground(t *testing.T) {
+ c := Background()
+ if c == nil {
+ t.Fatalf("Background returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.Background"; got != want {
+ t.Errorf("Background().String() = %q want %q", got, want)
+ }
+}
+
+func TestTODO(t *testing.T) {
+ c := TODO()
+ if c == nil {
+ t.Fatalf("TODO returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.TODO"; got != want {
+ t.Errorf("TODO().String() = %q want %q", got, want)
+ }
+}
+
+func TestWithCancel(t *testing.T) {
+ c1, cancel := WithCancel(Background())
+
+ if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
+ t.Errorf("c1.String() = %q want %q", got, want)
+ }
+
+ o := otherContext{c1}
+ c2, _ := WithCancel(o)
+ contexts := []Context{c1, o, c2}
+
+ for i, c := range contexts {
+ if d := c.Done(); d == nil {
+ t.Errorf("c[%d].Done() == %v want non-nil", i, d)
+ }
+ if e := c.Err(); e != nil {
+ t.Errorf("c[%d].Err() == %v want nil", i, e)
+ }
+
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ }
+
+ cancel()
+ time.Sleep(100 * time.Millisecond) // let cancelation propagate
+
+ for i, c := range contexts {
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
+ }
+ }
+}
+
+func TestParentFinishesChild(t *testing.T) {
+ // Context tree:
+ // parent -> cancelChild
+ // parent -> valueChild -> timerChild
+ parent, cancel := WithCancel(Background())
+ cancelChild, stop := WithCancel(parent)
+ defer stop()
+ valueChild := WithValue(parent, "key", "value")
+ timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
+ defer stop()
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-cancelChild.Done():
+ t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
+ case x := <-timerChild.Done():
+ t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
+ case x := <-valueChild.Done():
+ t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ // The parent's children should contain the two cancelable children.
+ pc := parent.(*cancelCtx)
+ cc := cancelChild.(*cancelCtx)
+ tc := timerChild.(*timerCtx)
+ pc.mu.Lock()
+ if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
+ t.Errorf("bad linkage: pc.children = %v, want %v and %v",
+ pc.children, cc, tc)
+ }
+ pc.mu.Unlock()
+
+ if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+ if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+
+ cancel()
+
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+
+ // parent and children should all be finished.
+ check := func(ctx Context, name string) {
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
+ }
+ if e := ctx.Err(); e != Canceled {
+ t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
+ }
+ }
+ check(parent, "parent")
+ check(cancelChild, "cancelChild")
+ check(valueChild, "valueChild")
+ check(timerChild, "timerChild")
+
+ // WithCancel should return a canceled context on a canceled parent.
+ precanceledChild := WithValue(parent, "key", "value")
+ select {
+ case <-precanceledChild.Done():
+ default:
+ t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
+ }
+ if e := precanceledChild.Err(); e != Canceled {
+ t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
+ }
+}
+
+func TestChildFinishesFirst(t *testing.T) {
+ cancelable, stop := WithCancel(Background())
+ defer stop()
+ for _, parent := range []Context{Background(), cancelable} {
+ child, cancel := WithCancel(parent)
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-child.Done():
+ t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ cc := child.(*cancelCtx)
+ pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
+ if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
+ t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
+ }
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 1 || !pc.children[cc] {
+ t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
+ }
+ pc.mu.Unlock()
+ }
+
+ cancel()
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+ }
+
+ // child should be finished.
+ select {
+ case <-child.Done():
+ default:
+ t.Errorf("<-child.Done() blocked, but shouldn't have")
+ }
+ if e := child.Err(); e != Canceled {
+ t.Errorf("child.Err() == %v want %v", e, Canceled)
+ }
+
+ // parent should not be finished.
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if e := parent.Err(); e != nil {
+ t.Errorf("parent.Err() == %v want nil", e)
+ }
+ }
+}
+
+func testDeadline(c Context, wait time.Duration, t *testing.T) {
+ select {
+ case <-time.After(wait):
+ t.Fatalf("context should have timed out")
+ case <-c.Done():
+ }
+ if e := c.Err(); e != DeadlineExceeded {
+ t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
+ }
+}
+
+func TestDeadline(t *testing.T) {
+ c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 200*time.Millisecond, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ o := otherContext{c}
+ testDeadline(o, 200*time.Millisecond, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ o = otherContext{c}
+ c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
+ testDeadline(c, 200*time.Millisecond, t)
+}
+
+func TestTimeout(t *testing.T) {
+ c, _ := WithTimeout(Background(), 100*time.Millisecond)
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 200*time.Millisecond, t)
+
+ c, _ = WithTimeout(Background(), 100*time.Millisecond)
+ o := otherContext{c}
+ testDeadline(o, 200*time.Millisecond, t)
+
+ c, _ = WithTimeout(Background(), 100*time.Millisecond)
+ o = otherContext{c}
+ c, _ = WithTimeout(o, 300*time.Millisecond)
+ testDeadline(c, 200*time.Millisecond, t)
+}
+
+func TestCanceledTimeout(t *testing.T) {
+ c, _ := WithTimeout(Background(), 200*time.Millisecond)
+ o := otherContext{c}
+ c, cancel := WithTimeout(o, 400*time.Millisecond)
+ cancel()
+ time.Sleep(100 * time.Millisecond) // let cancelation propagate
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c.Done() blocked, but shouldn't have")
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c.Err() == %v want %v", e, Canceled)
+ }
+}
+
+type key1 int
+type key2 int
+
+var k1 = key1(1)
+var k2 = key2(1) // same int as k1, different type
+var k3 = key2(3) // same type as k2, different int
+
+func TestValues(t *testing.T) {
+ check := func(c Context, nm, v1, v2, v3 string) {
+ if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
+ t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
+ }
+ if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
+ t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
+ }
+ if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
+ t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
+ }
+ }
+
+ c0 := Background()
+ check(c0, "c0", "", "", "")
+
+ c1 := WithValue(Background(), k1, "c1k1")
+ check(c1, "c1", "c1k1", "", "")
+
+ if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
+ t.Errorf("c.String() = %q want %q", got, want)
+ }
+
+ c2 := WithValue(c1, k2, "c2k2")
+ check(c2, "c2", "c1k1", "c2k2", "")
+
+ c3 := WithValue(c2, k3, "c3k3")
+ check(c3, "c2", "c1k1", "c2k2", "c3k3")
+
+ c4 := WithValue(c3, k1, nil)
+ check(c4, "c4", "", "c2k2", "c3k3")
+
+ o0 := otherContext{Background()}
+ check(o0, "o0", "", "", "")
+
+ o1 := otherContext{WithValue(Background(), k1, "c1k1")}
+ check(o1, "o1", "c1k1", "", "")
+
+ o2 := WithValue(o1, k2, "o2k2")
+ check(o2, "o2", "c1k1", "o2k2", "")
+
+ o3 := otherContext{c4}
+ check(o3, "o3", "", "c2k2", "c3k3")
+
+ o4 := WithValue(o3, k3, nil)
+ check(o4, "o4", "", "c2k2", "")
+}
+
+func TestAllocs(t *testing.T) {
+ bg := Background()
+ for _, test := range []struct {
+ desc string
+ f func()
+ limit float64
+ gccgoLimit float64
+ }{
+ {
+ desc: "Background()",
+ f: func() { Background() },
+ limit: 0,
+ gccgoLimit: 0,
+ },
+ {
+ desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
+ f: func() {
+ c := WithValue(bg, k1, nil)
+ c.Value(k1)
+ },
+ limit: 3,
+ gccgoLimit: 3,
+ },
+ {
+ desc: "WithTimeout(bg, 15*time.Millisecond)",
+ f: func() {
+ c, _ := WithTimeout(bg, 15*time.Millisecond)
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 16,
+ },
+ {
+ desc: "WithCancel(bg)",
+ f: func() {
+ c, cancel := WithCancel(bg)
+ cancel()
+ <-c.Done()
+ },
+ limit: 5,
+ gccgoLimit: 8,
+ },
+ {
+ desc: "WithTimeout(bg, 100*time.Millisecond)",
+ f: func() {
+ c, cancel := WithTimeout(bg, 100*time.Millisecond)
+ cancel()
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 25,
+ },
+ } {
+ limit := test.limit
+ if runtime.Compiler == "gccgo" {
+ // gccgo does not yet do escape analysis.
+ // TODO(iant): Remove this when gccgo does do escape analysis.
+ limit = test.gccgoLimit
+ }
+ if n := testing.AllocsPerRun(100, test.f); n > limit {
+ t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
+ }
+ }
+}
+
+func TestSimultaneousCancels(t *testing.T) {
+ root, cancel := WithCancel(Background())
+ m := map[Context]CancelFunc{root: cancel}
+ q := []Context{root}
+ // Create a tree of contexts.
+ for len(q) != 0 && len(m) < 100 {
+ parent := q[0]
+ q = q[1:]
+ for i := 0; i < 4; i++ {
+ ctx, cancel := WithCancel(parent)
+ m[ctx] = cancel
+ q = append(q, ctx)
+ }
+ }
+ // Start all the cancels in a random order.
+ var wg sync.WaitGroup
+ wg.Add(len(m))
+ for _, cancel := range m {
+ go func(cancel CancelFunc) {
+ cancel()
+ wg.Done()
+ }(cancel)
+ }
+ // Wait on all the contexts in a random order.
+ for ctx := range m {
+ select {
+ case <-ctx.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
+ }
+ }
+ // Wait for all the cancel functions to return.
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
+ }
+}
+
+func TestInterlockedCancels(t *testing.T) {
+ parent, cancelParent := WithCancel(Background())
+ child, cancelChild := WithCancel(parent)
+ go func() {
+ parent.Done()
+ cancelChild()
+ }()
+ cancelParent()
+ select {
+ case <-child.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
+ }
+}
+
+func TestLayersCancel(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), false)
+}
+
+func TestLayersTimeout(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), true)
+}
+
+func testLayers(t *testing.T, seed int64, testTimeout bool) {
+ rand.Seed(seed)
+ errorf := func(format string, a ...interface{}) {
+ t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
+ }
+ const (
+ timeout = 200 * time.Millisecond
+ minLayers = 30
+ )
+ type value int
+ var (
+ vals []*value
+ cancels []CancelFunc
+ numTimers int
+ ctx = Background()
+ )
+ for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
+ switch rand.Intn(3) {
+ case 0:
+ v := new(value)
+ ctx = WithValue(ctx, v, v)
+ vals = append(vals, v)
+ case 1:
+ var cancel CancelFunc
+ ctx, cancel = WithCancel(ctx)
+ cancels = append(cancels, cancel)
+ case 2:
+ var cancel CancelFunc
+ ctx, cancel = WithTimeout(ctx, timeout)
+ cancels = append(cancels, cancel)
+ numTimers++
+ }
+ }
+ checkValues := func(when string) {
+ for _, key := range vals {
+ if val := ctx.Value(key).(*value); key != val {
+ errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
+ }
+ }
+ }
+ select {
+ case <-ctx.Done():
+ errorf("ctx should not be canceled yet")
+ default:
+ }
+ if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
+ t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
+ }
+ t.Log(ctx)
+ checkValues("before cancel")
+ if testTimeout {
+ select {
+ case <-ctx.Done():
+ case <-time.After(timeout + 100*time.Millisecond):
+ errorf("ctx should have timed out")
+ }
+ checkValues("after timeout")
+ } else {
+ cancel := cancels[rand.Intn(len(cancels))]
+ cancel()
+ select {
+ case <-ctx.Done():
+ default:
+ errorf("ctx should be canceled")
+ }
+ checkValues("after cancel")
+ }
+}
+
+func TestCancelRemoves(t *testing.T) {
+ checkChildren := func(when string, ctx Context, want int) {
+ if got := len(ctx.(*cancelCtx).children); got != want {
+ t.Errorf("%s: context has %d children, want %d", when, got, want)
+ }
+ }
+
+ ctx, _ := WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel := WithCancel(ctx)
+ checkChildren("with WithCancel child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithCancel child", ctx, 0)
+
+ ctx, _ = WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel = WithTimeout(ctx, 60*time.Minute)
+ checkChildren("with WithTimeout child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithTimeout child", ctx, 0)
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 000000000..606cf1f97
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ default:
+ }
+ }
+ return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
new file mode 100644
index 000000000..9f0f90f1b
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,go1.7
+
+package ctxhttp
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "context"
+)
+
+func TestGo17Context(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ }))
+ ctx := context.Background()
+ resp, err := Get(ctx, http.DefaultClient, ts.URL)
+ if resp == nil || err != nil {
+ t.Fatalf("error received from client: %v %v", err, resp)
+ }
+ resp.Body.Close()
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
new file mode 100644
index 000000000..926870cc2
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+ testHookContextDoneBeforeHeaders = nop
+ testHookDoReturned = nop
+ testHookDidBodyClose = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // TODO(djd): Respect any existing value of req.Cancel.
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ // Make local copies of test hooks closed over by goroutines below.
+ // Prevents data races in tests.
+ testHookDoReturned := testHookDoReturned
+ testHookDidBodyClose := testHookDidBodyClose
+
+ go func() {
+ resp, err := client.Do(req)
+ testHookDoReturned()
+ result <- responseAndError{resp, err}
+ }()
+
+ var resp *http.Response
+
+ select {
+ case <-ctx.Done():
+ testHookContextDoneBeforeHeaders()
+ close(cancel)
+ // Clean up after the goroutine calling client.Do:
+ go func() {
+ if r := <-result; r.resp != nil {
+ testHookDidBodyClose()
+ r.resp.Body.Close()
+ }
+ }()
+ return nil, ctx.Err()
+ case r := <-result:
+ var err error
+ resp, err = r.resp, r.err
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ c := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ close(cancel)
+ case <-c:
+ // The response's Body is closed.
+ }
+ }()
+ resp.Body = &notifyingReader{resp.Body, c}
+
+ return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+ io.ReadCloser
+ notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+ n, err := r.ReadCloser.Read(p)
+ if err != nil && r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return n, err
+}
+
+func (r *notifyingReader) Close() error {
+ err := r.ReadCloser.Close()
+ if r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
new file mode 100644
index 000000000..9159cf022
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!go1.7
+
+package ctxhttp
+
+import (
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// golang.org/issue/14065
+func TestClosesResponseBodyOnCancel(t *testing.T) {
+ defer func() { testHookContextDoneBeforeHeaders = nop }()
+ defer func() { testHookDoReturned = nop }()
+ defer func() { testHookDidBodyClose = nop }()
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
+ defer ts.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // closed when Do enters select case <-ctx.Done()
+ enteredDonePath := make(chan struct{})
+
+ testHookContextDoneBeforeHeaders = func() {
+ close(enteredDonePath)
+ }
+
+ testHookDoReturned = func() {
+ // We now have the result (the Flush'd headers) at least,
+ // so we can cancel the request.
+ cancel()
+
+ // But block the client.Do goroutine from sending
+ // until Do enters into the <-ctx.Done() path, since
+ // otherwise if both channels are readable, select
+ // picks a random one.
+ <-enteredDonePath
+ }
+
+ sawBodyClose := make(chan struct{})
+ testHookDidBodyClose = func() { close(sawBodyClose) }
+
+ tr := &http.Transport{}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+ req, _ := http.NewRequest("GET", ts.URL, nil)
+ _, doErr := Do(ctx, c, req)
+
+ select {
+ case <-sawBodyClose:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for body to close")
+ }
+
+ if doErr != ctx.Err() {
+ t.Errorf("Do error = %v; want %v", doErr, ctx.Err())
+ }
+}
+
+type noteCloseConn struct {
+ net.Conn
+ onceClose sync.Once
+ closefn func()
+}
+
+func (c *noteCloseConn) Close() error {
+ c.onceClose.Do(c.closefn)
+ return c.Conn.Close()
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
new file mode 100644
index 000000000..1e4155180
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package ctxhttp
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ requestDuration = 100 * time.Millisecond
+ requestBody = "ok"
+)
+
+func okHandler(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(requestDuration)
+ io.WriteString(w, requestBody)
+}
+
+func TestNoTimeout(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(okHandler))
+ defer ts.Close()
+
+ ctx := context.Background()
+ res, err := Get(ctx, nil, ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(slurp) != requestBody {
+ t.Errorf("body = %q; want %q", slurp, requestBody)
+ }
+}
+
+func TestCancelBeforeHeaders(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ blockServer := make(chan struct{})
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ cancel()
+ <-blockServer
+ io.WriteString(w, requestBody)
+ }))
+ defer ts.Close()
+ defer close(blockServer)
+
+ res, err := Get(ctx, nil, ts.URL)
+ if err == nil {
+ res.Body.Close()
+ t.Fatal("Get returned unexpected nil error")
+ }
+ if err != context.Canceled {
+ t.Errorf("err = %v; want %v", err, context.Canceled)
+ }
+}
+
+func TestCancelAfterHangingRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+ <-w.(http.CloseNotifier).CloseNotify()
+ }))
+ defer ts.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ resp, err := Get(ctx, nil, ts.URL)
+ if err != nil {
+ t.Fatalf("unexpected error in Get: %v", err)
+ }
+
+ // Cancel befer reading the body.
+ // Reading Request.Body should fail, since the request was
+ // canceled before anything was written.
+ cancel()
+
+ done := make(chan struct{})
+
+ go func() {
+ b, err := ioutil.ReadAll(resp.Body)
+ if len(b) != 0 || err == nil {
+ t.Errorf(`Read got (%q, %v); want ("", error)`, b, err)
+ }
+ close(done)
+ }()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("Test timed out")
+ case <-done:
+ }
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 000000000..f8cda19ad
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 000000000..5a30acabd
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go
new file mode 100644
index 000000000..a6754dc36
--- /dev/null
+++ b/vendor/golang.org/x/net/context/withtimeout_test.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context_test
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+func ExampleWithTimeout() {
+ // Pass a context with a timeout to tell a blocking function that it
+ // should abandon its work after the timeout elapses.
+ ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ select {
+ case <-time.After(200 * time.Millisecond):
+ fmt.Println("overslept")
+ case <-ctx.Done():
+ fmt.Println(ctx.Err()) // prints "context deadline exceeded"
+ }
+ // Output:
+ // context deadline exceeded
+}
diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go
new file mode 100644
index 000000000..58fef89e0
--- /dev/null
+++ b/vendor/golang.org/x/net/dict/dict.go
@@ -0,0 +1,210 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dict implements the Dictionary Server Protocol
+// as defined in RFC 2229.
+package dict // import "golang.org/x/net/dict"
+
+import (
+ "net/textproto"
+ "strconv"
+ "strings"
+)
+
+// A Client represents a client connection to a dictionary server.
+type Client struct {
+ text *textproto.Conn
+}
+
+// Dial returns a new client connected to a dictionary server at
+// addr on the given network.
+func Dial(network, addr string) (*Client, error) {
+ text, err := textproto.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ _, _, err = text.ReadCodeLine(220)
+ if err != nil {
+ text.Close()
+ return nil, err
+ }
+ return &Client{text: text}, nil
+}
+
+// Close closes the connection to the dictionary server.
+func (c *Client) Close() error {
+ return c.text.Close()
+}
+
+// A Dict represents a dictionary available on the server.
+type Dict struct {
+ Name string // short name of dictionary
+ Desc string // long description
+}
+
+// Dicts returns a list of the dictionaries available on the server.
+func (c *Client) Dicts() ([]Dict, error) {
+ id, err := c.text.Cmd("SHOW DB")
+ if err != nil {
+ return nil, err
+ }
+
+ c.text.StartResponse(id)
+ defer c.text.EndResponse(id)
+
+ _, _, err = c.text.ReadCodeLine(110)
+ if err != nil {
+ return nil, err
+ }
+ lines, err := c.text.ReadDotLines()
+ if err != nil {
+ return nil, err
+ }
+ _, _, err = c.text.ReadCodeLine(250)
+
+ dicts := make([]Dict, len(lines))
+ for i := range dicts {
+ d := &dicts[i]
+ a, _ := fields(lines[i])
+ if len(a) < 2 {
+ return nil, textproto.ProtocolError("invalid dictionary: " + lines[i])
+ }
+ d.Name = a[0]
+ d.Desc = a[1]
+ }
+ return dicts, err
+}
+
+// A Defn represents a definition.
+type Defn struct {
+ Dict Dict // Dict where definition was found
+ Word string // Word being defined
+ Text []byte // Definition text, typically multiple lines
+}
+
+// Define requests the definition of the given word.
+// The argument dict names the dictionary to use,
+// the Name field of a Dict returned by Dicts.
+//
+// The special dictionary name "*" means to look in all the
+// server's dictionaries.
+// The special dictionary name "!" means to look in all the
+// server's dictionaries in turn, stopping after finding the word
+// in one of them.
+func (c *Client) Define(dict, word string) ([]*Defn, error) {
+ id, err := c.text.Cmd("DEFINE %s %q", dict, word)
+ if err != nil {
+ return nil, err
+ }
+
+ c.text.StartResponse(id)
+ defer c.text.EndResponse(id)
+
+ _, line, err := c.text.ReadCodeLine(150)
+ if err != nil {
+ return nil, err
+ }
+ a, _ := fields(line)
+ if len(a) < 1 {
+ return nil, textproto.ProtocolError("malformed response: " + line)
+ }
+ n, err := strconv.Atoi(a[0])
+ if err != nil {
+ return nil, textproto.ProtocolError("invalid definition count: " + a[0])
+ }
+ def := make([]*Defn, n)
+ for i := 0; i < n; i++ {
+ _, line, err = c.text.ReadCodeLine(151)
+ if err != nil {
+ return nil, err
+ }
+ a, _ := fields(line)
+ if len(a) < 3 {
+ // skip it, to keep protocol in sync
+ i--
+ n--
+ def = def[0:n]
+ continue
+ }
+ d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}}
+ d.Text, err = c.text.ReadDotBytes()
+ if err != nil {
+ return nil, err
+ }
+ def[i] = d
+ }
+ _, _, err = c.text.ReadCodeLine(250)
+ return def, err
+}
+
+// Fields returns the fields in s.
+// Fields are space separated unquoted words
+// or quoted with single or double quote.
+func fields(s string) ([]string, error) {
+ var v []string
+ i := 0
+ for {
+ for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
+ i++
+ }
+ if i >= len(s) {
+ break
+ }
+ if s[i] == '"' || s[i] == '\'' {
+ q := s[i]
+ // quoted string
+ var j int
+ for j = i + 1; ; j++ {
+ if j >= len(s) {
+ return nil, textproto.ProtocolError("malformed quoted string")
+ }
+ if s[j] == '\\' {
+ j++
+ continue
+ }
+ if s[j] == q {
+ j++
+ break
+ }
+ }
+ v = append(v, unquote(s[i+1:j-1]))
+ i = j
+ } else {
+ // atom
+ var j int
+ for j = i; j < len(s); j++ {
+ if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' {
+ break
+ }
+ }
+ v = append(v, s[i:j])
+ i = j
+ }
+ if i < len(s) {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ return nil, textproto.ProtocolError("quotes not on word boundaries")
+ }
+ }
+ }
+ return v, nil
+}
+
+func unquote(s string) string {
+ if strings.Index(s, "\\") < 0 {
+ return s
+ }
+ b := []byte(s)
+ w := 0
+ for r := 0; r < len(b); r++ {
+ c := b[r]
+ if c == '\\' {
+ r++
+ c = b[r]
+ }
+ b[w] = c
+ w++
+ }
+ return string(b[0:w])
+}
diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
new file mode 100644
index 000000000..cd0a8ac15
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atom provides integer codes (also known as atoms) for a fixed set of
+// frequently occurring HTML strings: tag names and attribute keys such as "p"
+// and "id".
+//
+// Sharing an atom's name between all elements with the same tag can result in
+// fewer string allocations when tokenizing and parsing HTML. Integer
+// comparisons are also generally faster than string comparisons.
+//
+// The value of an atom's particular code is not guaranteed to stay the same
+// between versions of this package. Neither is any ordering guaranteed:
+// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
+// be dense. The only guarantees are that e.g. looking up "div" will yield
+// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
+package atom // import "golang.org/x/net/html/atom"
+
+// Atom is an integer code for a string. The zero value maps to "".
+type Atom uint32
+
+// String returns the atom's name.
+func (a Atom) String() string {
+ start := uint32(a >> 8)
+ n := uint32(a & 0xff)
+ if start+n > uint32(len(atomText)) {
+ return ""
+ }
+ return atomText[start : start+n]
+}
+
+func (a Atom) string() string {
+ return atomText[a>>8 : a>>8+a&0xff]
+}
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s []byte) uint32 {
+ for i := range s {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+func match(s string, t []byte) bool {
+ for i, c := range t {
+ if s[i] != c {
+ return false
+ }
+ }
+ return true
+}
+
+// Lookup returns the atom whose name is s. It returns zero if there is no
+// such atom. The lookup is case sensitive.
+func Lookup(s []byte) Atom {
+ if len(s) == 0 || len(s) > maxAtomLen {
+ return 0
+ }
+ h := fnv(hash0, s)
+ if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ return 0
+}
+
+// String returns a string whose contents are equal to s. In that sense, it is
+// equivalent to string(s) but may be more efficient.
+func String(s []byte) string {
+ if a := Lookup(s); a != 0 {
+ return a.String()
+ }
+ return string(s)
+}
diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go
new file mode 100644
index 000000000..6e33704dd
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom_test.go
@@ -0,0 +1,109 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atom
+
+import (
+ "sort"
+ "testing"
+)
+
+func TestKnown(t *testing.T) {
+ for _, s := range testAtomList {
+ if atom := Lookup([]byte(s)); atom.String() != s {
+ t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String())
+ }
+ }
+}
+
+func TestHits(t *testing.T) {
+ for _, a := range table {
+ if a == 0 {
+ continue
+ }
+ got := Lookup([]byte(a.String()))
+ if got != a {
+ t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a))
+ }
+ }
+}
+
+func TestMisses(t *testing.T) {
+ testCases := []string{
+ "",
+ "\x00",
+ "\xff",
+ "A",
+ "DIV",
+ "Div",
+ "dIV",
+ "aa",
+ "a\x00",
+ "ab",
+ "abb",
+ "abbr0",
+ "abbr ",
+ " abbr",
+ " a",
+ "acceptcharset",
+ "acceptCharset",
+ "accept_charset",
+ "h0",
+ "h1h2",
+ "h7",
+ "onClick",
+ "λ",
+ // The following string has the same hash (0xa1d7fab7) as "onmouseover".
+ "\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7",
+ }
+ for _, tc := range testCases {
+ got := Lookup([]byte(tc))
+ if got != 0 {
+ t.Errorf("Lookup(%q): got %d, want 0", tc, got)
+ }
+ }
+}
+
+func TestForeignObject(t *testing.T) {
+ const (
+ afo = Foreignobject
+ afO = ForeignObject
+ sfo = "foreignobject"
+ sfO = "foreignObject"
+ )
+ if got := Lookup([]byte(sfo)); got != afo {
+ t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo)
+ }
+ if got := Lookup([]byte(sfO)); got != afO {
+ t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO)
+ }
+ if got := afo.String(); got != sfo {
+ t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo)
+ }
+ if got := afO.String(); got != sfO {
+ t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO)
+ }
+}
+
+func BenchmarkLookup(b *testing.B) {
+ sortedTable := make([]string, 0, len(table))
+ for _, a := range table {
+ if a != 0 {
+ sortedTable = append(sortedTable, a.String())
+ }
+ }
+ sort.Strings(sortedTable)
+
+ x := make([][]byte, 1000)
+ for i := range x {
+ x[i] = []byte(sortedTable[i%len(sortedTable)])
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, s := range x {
+ Lookup(s)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
new file mode 100644
index 000000000..6bfa86601
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/gen.go
@@ -0,0 +1,648 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates table.go and table_test.go.
+// Invoke as
+//
+// go run gen.go |gofmt >table.go
+// go run gen.go -test |gofmt >table_test.go
+
+import (
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "sort"
+ "strings"
+)
+
+// identifier converts s to a Go exported identifier.
+// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
+func identifier(s string) string {
+ b := make([]byte, 0, len(s))
+ cap := true
+ for _, c := range s {
+ if c == '-' {
+ cap = true
+ continue
+ }
+ if cap && 'a' <= c && c <= 'z' {
+ c -= 'a' - 'A'
+ }
+ cap = false
+ b = append(b, byte(c))
+ }
+ return string(b)
+}
+
+var test = flag.Bool("test", false, "generate table_test.go")
+
+func main() {
+ flag.Parse()
+
+ var all []string
+ all = append(all, elements...)
+ all = append(all, attributes...)
+ all = append(all, eventHandlers...)
+ all = append(all, extra...)
+ sort.Strings(all)
+
+ if *test {
+ fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
+ fmt.Printf("package atom\n\n")
+ fmt.Printf("var testAtomList = []string{\n")
+ for _, s := range all {
+ fmt.Printf("\t%q,\n", s)
+ }
+ fmt.Printf("}\n")
+ return
+ }
+
+ // uniq - lists have dups
+ // compute max len too
+ maxLen := 0
+ w := 0
+ for _, s := range all {
+ if w == 0 || all[w-1] != s {
+ if maxLen < len(s) {
+ maxLen = len(s)
+ }
+ all[w] = s
+ w++
+ }
+ }
+ all = all[:w]
+
+ // Find hash that minimizes table size.
+ var best *table
+ for i := 0; i < 1000000; i++ {
+ if best != nil && 1<<(best.k-1) < len(all) {
+ break
+ }
+ h := rand.Uint32()
+ for k := uint(0); k <= 16; k++ {
+ if best != nil && k >= best.k {
+ break
+ }
+ var t table
+ if t.init(h, k, all) {
+ best = &t
+ break
+ }
+ }
+ }
+ if best == nil {
+ fmt.Fprintf(os.Stderr, "failed to construct string table\n")
+ os.Exit(1)
+ }
+
+ // Lay out strings, using overlaps when possible.
+ layout := append([]string{}, all...)
+
+ // Remove strings that are substrings of other strings
+ for changed := true; changed; {
+ changed = false
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i != j && t != "" && strings.Contains(s, t) {
+ changed = true
+ layout[j] = ""
+ }
+ }
+ }
+ }
+
+ // Join strings where one suffix matches another prefix.
+ for {
+ // Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
+ // maximizing overlap length k.
+ besti := -1
+ bestj := -1
+ bestk := 0
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i == j {
+ continue
+ }
+ for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
+ if s[len(s)-k:] == t[:k] {
+ besti = i
+ bestj = j
+ bestk = k
+ }
+ }
+ }
+ }
+ if bestk > 0 {
+ layout[besti] += layout[bestj][bestk:]
+ layout[bestj] = ""
+ continue
+ }
+ break
+ }
+
+ text := strings.Join(layout, "")
+
+ atom := map[string]uint32{}
+ for _, s := range all {
+ off := strings.Index(text, s)
+ if off < 0 {
+ panic("lost string " + s)
+ }
+ atom[s] = uint32(off<<8 | len(s))
+ }
+
+ // Generate the Go code.
+ fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
+ fmt.Printf("package atom\n\nconst (\n")
+ for _, s := range all {
+ fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
+ }
+ fmt.Printf(")\n\n")
+
+ fmt.Printf("const hash0 = %#x\n\n", best.h0)
+ fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
+
+ fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
+ for i, s := range best.tab {
+ if s == "" {
+ continue
+ }
+ fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
+ }
+ fmt.Printf("}\n")
+ datasize := (1 << best.k) * 4
+
+ fmt.Printf("const atomText =\n")
+ textsize := len(text)
+ for len(text) > 60 {
+ fmt.Printf("\t%q +\n", text[:60])
+ text = text[60:]
+ }
+ fmt.Printf("\t%q\n\n", text)
+
+ fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
+}
+
+type byLen []string
+
+func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
+func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byLen) Len() int { return len(x) }
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s string) uint32 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// A table represents an attempt at constructing the lookup table.
+// The lookup table uses cuckoo hashing, meaning that each string
+// can be found in one of two positions.
+type table struct {
+ h0 uint32
+ k uint
+ mask uint32
+ tab []string
+}
+
+// hash returns the two hashes for s.
+func (t *table) hash(s string) (h1, h2 uint32) {
+ h := fnv(t.h0, s)
+ h1 = h & t.mask
+ h2 = (h >> 16) & t.mask
+ return
+}
+
+// init initializes the table with the given parameters.
+// h0 is the initial hash value,
+// k is the number of bits of hash value to use, and
+// x is the list of strings to store in the table.
+// init returns false if the table cannot be constructed.
+func (t *table) init(h0 uint32, k uint, x []string) bool {
+ t.h0 = h0
+ t.k = k
+ t.tab = make([]string, 1<<k)
+ t.mask = 1<<k - 1
+ for _, s := range x {
+ if !t.insert(s) {
+ return false
+ }
+ }
+ return true
+}
+
+// insert inserts s in the table.
+func (t *table) insert(s string) bool {
+ h1, h2 := t.hash(s)
+ if t.tab[h1] == "" {
+ t.tab[h1] = s
+ return true
+ }
+ if t.tab[h2] == "" {
+ t.tab[h2] = s
+ return true
+ }
+ if t.push(h1, 0) {
+ t.tab[h1] = s
+ return true
+ }
+ if t.push(h2, 0) {
+ t.tab[h2] = s
+ return true
+ }
+ return false
+}
+
+// push attempts to push aside the entry in slot i.
+func (t *table) push(i uint32, depth int) bool {
+ if depth > len(t.tab) {
+ return false
+ }
+ s := t.tab[i]
+ h1, h2 := t.hash(s)
+ j := h1 + h2 - i
+ if t.tab[j] != "" && !t.push(j, depth+1) {
+ return false
+ }
+ t.tab[j] = s
+ return true
+}
+
+// The lists of element names and attribute keys were taken from
+// https://html.spec.whatwg.org/multipage/indices.html#index
+// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
+
+var elements = []string{
+ "a",
+ "abbr",
+ "address",
+ "area",
+ "article",
+ "aside",
+ "audio",
+ "b",
+ "base",
+ "bdi",
+ "bdo",
+ "blockquote",
+ "body",
+ "br",
+ "button",
+ "canvas",
+ "caption",
+ "cite",
+ "code",
+ "col",
+ "colgroup",
+ "command",
+ "data",
+ "datalist",
+ "dd",
+ "del",
+ "details",
+ "dfn",
+ "dialog",
+ "div",
+ "dl",
+ "dt",
+ "em",
+ "embed",
+ "fieldset",
+ "figcaption",
+ "figure",
+ "footer",
+ "form",
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6",
+ "head",
+ "header",
+ "hgroup",
+ "hr",
+ "html",
+ "i",
+ "iframe",
+ "img",
+ "input",
+ "ins",
+ "kbd",
+ "keygen",
+ "label",
+ "legend",
+ "li",
+ "link",
+ "map",
+ "mark",
+ "menu",
+ "menuitem",
+ "meta",
+ "meter",
+ "nav",
+ "noscript",
+ "object",
+ "ol",
+ "optgroup",
+ "option",
+ "output",
+ "p",
+ "param",
+ "pre",
+ "progress",
+ "q",
+ "rp",
+ "rt",
+ "ruby",
+ "s",
+ "samp",
+ "script",
+ "section",
+ "select",
+ "small",
+ "source",
+ "span",
+ "strong",
+ "style",
+ "sub",
+ "summary",
+ "sup",
+ "table",
+ "tbody",
+ "td",
+ "template",
+ "textarea",
+ "tfoot",
+ "th",
+ "thead",
+ "time",
+ "title",
+ "tr",
+ "track",
+ "u",
+ "ul",
+ "var",
+ "video",
+ "wbr",
+}
+
+// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
+
+var attributes = []string{
+ "abbr",
+ "accept",
+ "accept-charset",
+ "accesskey",
+ "action",
+ "alt",
+ "async",
+ "autocomplete",
+ "autofocus",
+ "autoplay",
+ "challenge",
+ "charset",
+ "checked",
+ "cite",
+ "class",
+ "cols",
+ "colspan",
+ "command",
+ "content",
+ "contenteditable",
+ "contextmenu",
+ "controls",
+ "coords",
+ "crossorigin",
+ "data",
+ "datetime",
+ "default",
+ "defer",
+ "dir",
+ "dirname",
+ "disabled",
+ "download",
+ "draggable",
+ "dropzone",
+ "enctype",
+ "for",
+ "form",
+ "formaction",
+ "formenctype",
+ "formmethod",
+ "formnovalidate",
+ "formtarget",
+ "headers",
+ "height",
+ "hidden",
+ "high",
+ "href",
+ "hreflang",
+ "http-equiv",
+ "icon",
+ "id",
+ "inputmode",
+ "ismap",
+ "itemid",
+ "itemprop",
+ "itemref",
+ "itemscope",
+ "itemtype",
+ "keytype",
+ "kind",
+ "label",
+ "lang",
+ "list",
+ "loop",
+ "low",
+ "manifest",
+ "max",
+ "maxlength",
+ "media",
+ "mediagroup",
+ "method",
+ "min",
+ "minlength",
+ "multiple",
+ "muted",
+ "name",
+ "novalidate",
+ "open",
+ "optimum",
+ "pattern",
+ "ping",
+ "placeholder",
+ "poster",
+ "preload",
+ "radiogroup",
+ "readonly",
+ "rel",
+ "required",
+ "reversed",
+ "rows",
+ "rowspan",
+ "sandbox",
+ "spellcheck",
+ "scope",
+ "scoped",
+ "seamless",
+ "selected",
+ "shape",
+ "size",
+ "sizes",
+ "sortable",
+ "sorted",
+ "span",
+ "src",
+ "srcdoc",
+ "srclang",
+ "start",
+ "step",
+ "style",
+ "tabindex",
+ "target",
+ "title",
+ "translate",
+ "type",
+ "typemustmatch",
+ "usemap",
+ "value",
+ "width",
+ "wrap",
+}
+
+var eventHandlers = []string{
+ "onabort",
+ "onautocomplete",
+ "onautocompleteerror",
+ "onafterprint",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontextmenu",
+ "oncuechange",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onhashchange",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onlanguagechange",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadstart",
+ "onmessage",
+ "onmousedown",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onpopstate",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onscroll",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onshow",
+ "onsort",
+ "onstalled",
+ "onstorage",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onunload",
+ "onvolumechange",
+ "onwaiting",
+}
+
+// extra are ad-hoc values not covered by any of the lists above.
+var extra = []string{
+ "align",
+ "annotation",
+ "annotation-xml",
+ "applet",
+ "basefont",
+ "bgsound",
+ "big",
+ "blink",
+ "center",
+ "color",
+ "desc",
+ "face",
+ "font",
+ "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
+ "foreignobject",
+ "frame",
+ "frameset",
+ "image",
+ "isindex",
+ "listing",
+ "malignmark",
+ "marquee",
+ "math",
+ "mglyph",
+ "mi",
+ "mn",
+ "mo",
+ "ms",
+ "mtext",
+ "nobr",
+ "noembed",
+ "noframes",
+ "plaintext",
+ "prompt",
+ "public",
+ "spacer",
+ "strike",
+ "svg",
+ "system",
+ "tt",
+ "xmp",
+}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
new file mode 100644
index 000000000..2605ba310
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -0,0 +1,713 @@
+// generated by go run gen.go; DO NOT EDIT
+
+package atom
+
+const (
+ A Atom = 0x1
+ Abbr Atom = 0x4
+ Accept Atom = 0x2106
+ AcceptCharset Atom = 0x210e
+ Accesskey Atom = 0x3309
+ Action Atom = 0x1f606
+ Address Atom = 0x4f307
+ Align Atom = 0x1105
+ Alt Atom = 0x4503
+ Annotation Atom = 0x1670a
+ AnnotationXml Atom = 0x1670e
+ Applet Atom = 0x2b306
+ Area Atom = 0x2fa04
+ Article Atom = 0x38807
+ Aside Atom = 0x8305
+ Async Atom = 0x7b05
+ Audio Atom = 0xa605
+ Autocomplete Atom = 0x1fc0c
+ Autofocus Atom = 0xb309
+ Autoplay Atom = 0xce08
+ B Atom = 0x101
+ Base Atom = 0xd604
+ Basefont Atom = 0xd608
+ Bdi Atom = 0x1a03
+ Bdo Atom = 0xe703
+ Bgsound Atom = 0x11807
+ Big Atom = 0x12403
+ Blink Atom = 0x12705
+ Blockquote Atom = 0x12c0a
+ Body Atom = 0x2f04
+ Br Atom = 0x202
+ Button Atom = 0x13606
+ Canvas Atom = 0x7f06
+ Caption Atom = 0x1bb07
+ Center Atom = 0x5b506
+ Challenge Atom = 0x21f09
+ Charset Atom = 0x2807
+ Checked Atom = 0x32807
+ Cite Atom = 0x3c804
+ Class Atom = 0x4de05
+ Code Atom = 0x14904
+ Col Atom = 0x15003
+ Colgroup Atom = 0x15008
+ Color Atom = 0x15d05
+ Cols Atom = 0x16204
+ Colspan Atom = 0x16207
+ Command Atom = 0x17507
+ Content Atom = 0x42307
+ Contenteditable Atom = 0x4230f
+ Contextmenu Atom = 0x3310b
+ Controls Atom = 0x18808
+ Coords Atom = 0x19406
+ Crossorigin Atom = 0x19f0b
+ Data Atom = 0x44a04
+ Datalist Atom = 0x44a08
+ Datetime Atom = 0x23c08
+ Dd Atom = 0x26702
+ Default Atom = 0x8607
+ Defer Atom = 0x14b05
+ Del Atom = 0x3ef03
+ Desc Atom = 0x4db04
+ Details Atom = 0x4807
+ Dfn Atom = 0x6103
+ Dialog Atom = 0x1b06
+ Dir Atom = 0x6903
+ Dirname Atom = 0x6907
+ Disabled Atom = 0x10c08
+ Div Atom = 0x11303
+ Dl Atom = 0x11e02
+ Download Atom = 0x40008
+ Draggable Atom = 0x17b09
+ Dropzone Atom = 0x39108
+ Dt Atom = 0x50902
+ Em Atom = 0x6502
+ Embed Atom = 0x6505
+ Enctype Atom = 0x21107
+ Face Atom = 0x5b304
+ Fieldset Atom = 0x1b008
+ Figcaption Atom = 0x1b80a
+ Figure Atom = 0x1cc06
+ Font Atom = 0xda04
+ Footer Atom = 0x8d06
+ For Atom = 0x1d803
+ ForeignObject Atom = 0x1d80d
+ Foreignobject Atom = 0x1e50d
+ Form Atom = 0x1f204
+ Formaction Atom = 0x1f20a
+ Formenctype Atom = 0x20d0b
+ Formmethod Atom = 0x2280a
+ Formnovalidate Atom = 0x2320e
+ Formtarget Atom = 0x2470a
+ Frame Atom = 0x9a05
+ Frameset Atom = 0x9a08
+ H1 Atom = 0x26e02
+ H2 Atom = 0x29402
+ H3 Atom = 0x2a702
+ H4 Atom = 0x2e902
+ H5 Atom = 0x2f302
+ H6 Atom = 0x50b02
+ Head Atom = 0x2d504
+ Header Atom = 0x2d506
+ Headers Atom = 0x2d507
+ Height Atom = 0x25106
+ Hgroup Atom = 0x25906
+ Hidden Atom = 0x26506
+ High Atom = 0x26b04
+ Hr Atom = 0x27002
+ Href Atom = 0x27004
+ Hreflang Atom = 0x27008
+ Html Atom = 0x25504
+ HttpEquiv Atom = 0x2780a
+ I Atom = 0x601
+ Icon Atom = 0x42204
+ Id Atom = 0x8502
+ Iframe Atom = 0x29606
+ Image Atom = 0x29c05
+ Img Atom = 0x2a103
+ Input Atom = 0x3e805
+ Inputmode Atom = 0x3e809
+ Ins Atom = 0x1a803
+ Isindex Atom = 0x2a907
+ Ismap Atom = 0x2b005
+ Itemid Atom = 0x33c06
+ Itemprop Atom = 0x3c908
+ Itemref Atom = 0x5ad07
+ Itemscope Atom = 0x2b909
+ Itemtype Atom = 0x2c308
+ Kbd Atom = 0x1903
+ Keygen Atom = 0x3906
+ Keytype Atom = 0x53707
+ Kind Atom = 0x10904
+ Label Atom = 0xf005
+ Lang Atom = 0x27404
+ Legend Atom = 0x18206
+ Li Atom = 0x1202
+ Link Atom = 0x12804
+ List Atom = 0x44e04
+ Listing Atom = 0x44e07
+ Loop Atom = 0xf404
+ Low Atom = 0x11f03
+ Malignmark Atom = 0x100a
+ Manifest Atom = 0x5f108
+ Map Atom = 0x2b203
+ Mark Atom = 0x1604
+ Marquee Atom = 0x2cb07
+ Math Atom = 0x2d204
+ Max Atom = 0x2e103
+ Maxlength Atom = 0x2e109
+ Media Atom = 0x6e05
+ Mediagroup Atom = 0x6e0a
+ Menu Atom = 0x33804
+ Menuitem Atom = 0x33808
+ Meta Atom = 0x45d04
+ Meter Atom = 0x24205
+ Method Atom = 0x22c06
+ Mglyph Atom = 0x2a206
+ Mi Atom = 0x2eb02
+ Min Atom = 0x2eb03
+ Minlength Atom = 0x2eb09
+ Mn Atom = 0x23502
+ Mo Atom = 0x3ed02
+ Ms Atom = 0x2bc02
+ Mtext Atom = 0x2f505
+ Multiple Atom = 0x30308
+ Muted Atom = 0x30b05
+ Name Atom = 0x6c04
+ Nav Atom = 0x3e03
+ Nobr Atom = 0x5704
+ Noembed Atom = 0x6307
+ Noframes Atom = 0x9808
+ Noscript Atom = 0x3d208
+ Novalidate Atom = 0x2360a
+ Object Atom = 0x1ec06
+ Ol Atom = 0xc902
+ Onabort Atom = 0x13a07
+ Onafterprint Atom = 0x1c00c
+ Onautocomplete Atom = 0x1fa0e
+ Onautocompleteerror Atom = 0x1fa13
+ Onbeforeprint Atom = 0x6040d
+ Onbeforeunload Atom = 0x4e70e
+ Onblur Atom = 0xaa06
+ Oncancel Atom = 0xe908
+ Oncanplay Atom = 0x28509
+ Oncanplaythrough Atom = 0x28510
+ Onchange Atom = 0x3a708
+ Onclick Atom = 0x31007
+ Onclose Atom = 0x31707
+ Oncontextmenu Atom = 0x32f0d
+ Oncuechange Atom = 0x3420b
+ Ondblclick Atom = 0x34d0a
+ Ondrag Atom = 0x35706
+ Ondragend Atom = 0x35709
+ Ondragenter Atom = 0x3600b
+ Ondragleave Atom = 0x36b0b
+ Ondragover Atom = 0x3760a
+ Ondragstart Atom = 0x3800b
+ Ondrop Atom = 0x38f06
+ Ondurationchange Atom = 0x39f10
+ Onemptied Atom = 0x39609
+ Onended Atom = 0x3af07
+ Onerror Atom = 0x3b607
+ Onfocus Atom = 0x3bd07
+ Onhashchange Atom = 0x3da0c
+ Oninput Atom = 0x3e607
+ Oninvalid Atom = 0x3f209
+ Onkeydown Atom = 0x3fb09
+ Onkeypress Atom = 0x4080a
+ Onkeyup Atom = 0x41807
+ Onlanguagechange Atom = 0x43210
+ Onload Atom = 0x44206
+ Onloadeddata Atom = 0x4420c
+ Onloadedmetadata Atom = 0x45510
+ Onloadstart Atom = 0x46b0b
+ Onmessage Atom = 0x47609
+ Onmousedown Atom = 0x47f0b
+ Onmousemove Atom = 0x48a0b
+ Onmouseout Atom = 0x4950a
+ Onmouseover Atom = 0x4a20b
+ Onmouseup Atom = 0x4ad09
+ Onmousewheel Atom = 0x4b60c
+ Onoffline Atom = 0x4c209
+ Ononline Atom = 0x4cb08
+ Onpagehide Atom = 0x4d30a
+ Onpageshow Atom = 0x4fe0a
+ Onpause Atom = 0x50d07
+ Onplay Atom = 0x51706
+ Onplaying Atom = 0x51709
+ Onpopstate Atom = 0x5200a
+ Onprogress Atom = 0x52a0a
+ Onratechange Atom = 0x53e0c
+ Onreset Atom = 0x54a07
+ Onresize Atom = 0x55108
+ Onscroll Atom = 0x55f08
+ Onseeked Atom = 0x56708
+ Onseeking Atom = 0x56f09
+ Onselect Atom = 0x57808
+ Onshow Atom = 0x58206
+ Onsort Atom = 0x58b06
+ Onstalled Atom = 0x59509
+ Onstorage Atom = 0x59e09
+ Onsubmit Atom = 0x5a708
+ Onsuspend Atom = 0x5bb09
+ Ontimeupdate Atom = 0xdb0c
+ Ontoggle Atom = 0x5c408
+ Onunload Atom = 0x5cc08
+ Onvolumechange Atom = 0x5d40e
+ Onwaiting Atom = 0x5e209
+ Open Atom = 0x3cf04
+ Optgroup Atom = 0xf608
+ Optimum Atom = 0x5eb07
+ Option Atom = 0x60006
+ Output Atom = 0x49c06
+ P Atom = 0xc01
+ Param Atom = 0xc05
+ Pattern Atom = 0x5107
+ Ping Atom = 0x7704
+ Placeholder Atom = 0xc30b
+ Plaintext Atom = 0xfd09
+ Poster Atom = 0x15706
+ Pre Atom = 0x25e03
+ Preload Atom = 0x25e07
+ Progress Atom = 0x52c08
+ Prompt Atom = 0x5fa06
+ Public Atom = 0x41e06
+ Q Atom = 0x13101
+ Radiogroup Atom = 0x30a
+ Readonly Atom = 0x2fb08
+ Rel Atom = 0x25f03
+ Required Atom = 0x1d008
+ Reversed Atom = 0x5a08
+ Rows Atom = 0x9204
+ Rowspan Atom = 0x9207
+ Rp Atom = 0x1c602
+ Rt Atom = 0x13f02
+ Ruby Atom = 0xaf04
+ S Atom = 0x2c01
+ Samp Atom = 0x4e04
+ Sandbox Atom = 0xbb07
+ Scope Atom = 0x2bd05
+ Scoped Atom = 0x2bd06
+ Script Atom = 0x3d406
+ Seamless Atom = 0x31c08
+ Section Atom = 0x4e207
+ Select Atom = 0x57a06
+ Selected Atom = 0x57a08
+ Shape Atom = 0x4f905
+ Size Atom = 0x55504
+ Sizes Atom = 0x55505
+ Small Atom = 0x18f05
+ Sortable Atom = 0x58d08
+ Sorted Atom = 0x19906
+ Source Atom = 0x1aa06
+ Spacer Atom = 0x2db06
+ Span Atom = 0x9504
+ Spellcheck Atom = 0x3230a
+ Src Atom = 0x3c303
+ Srcdoc Atom = 0x3c306
+ Srclang Atom = 0x41107
+ Start Atom = 0x38605
+ Step Atom = 0x5f704
+ Strike Atom = 0x53306
+ Strong Atom = 0x55906
+ Style Atom = 0x61105
+ Sub Atom = 0x5a903
+ Summary Atom = 0x61607
+ Sup Atom = 0x61d03
+ Svg Atom = 0x62003
+ System Atom = 0x62306
+ Tabindex Atom = 0x46308
+ Table Atom = 0x42d05
+ Target Atom = 0x24b06
+ Tbody Atom = 0x2e05
+ Td Atom = 0x4702
+ Template Atom = 0x62608
+ Textarea Atom = 0x2f608
+ Tfoot Atom = 0x8c05
+ Th Atom = 0x22e02
+ Thead Atom = 0x2d405
+ Time Atom = 0xdd04
+ Title Atom = 0xa105
+ Tr Atom = 0x10502
+ Track Atom = 0x10505
+ Translate Atom = 0x14009
+ Tt Atom = 0x5302
+ Type Atom = 0x21404
+ Typemustmatch Atom = 0x2140d
+ U Atom = 0xb01
+ Ul Atom = 0x8a02
+ Usemap Atom = 0x51106
+ Value Atom = 0x4005
+ Var Atom = 0x11503
+ Video Atom = 0x28105
+ Wbr Atom = 0x12103
+ Width Atom = 0x50705
+ Wrap Atom = 0x58704
+ Xmp Atom = 0xc103
+)
+
+const hash0 = 0xc17da63e
+
+const maxAtomLen = 19
+
+var table = [1 << 9]Atom{
+ 0x1: 0x48a0b, // onmousemove
+ 0x2: 0x5e209, // onwaiting
+ 0x3: 0x1fa13, // onautocompleteerror
+ 0x4: 0x5fa06, // prompt
+ 0x7: 0x5eb07, // optimum
+ 0x8: 0x1604, // mark
+ 0xa: 0x5ad07, // itemref
+ 0xb: 0x4fe0a, // onpageshow
+ 0xc: 0x57a06, // select
+ 0xd: 0x17b09, // draggable
+ 0xe: 0x3e03, // nav
+ 0xf: 0x17507, // command
+ 0x11: 0xb01, // u
+ 0x14: 0x2d507, // headers
+ 0x15: 0x44a08, // datalist
+ 0x17: 0x4e04, // samp
+ 0x1a: 0x3fb09, // onkeydown
+ 0x1b: 0x55f08, // onscroll
+ 0x1c: 0x15003, // col
+ 0x20: 0x3c908, // itemprop
+ 0x21: 0x2780a, // http-equiv
+ 0x22: 0x61d03, // sup
+ 0x24: 0x1d008, // required
+ 0x2b: 0x25e07, // preload
+ 0x2c: 0x6040d, // onbeforeprint
+ 0x2d: 0x3600b, // ondragenter
+ 0x2e: 0x50902, // dt
+ 0x2f: 0x5a708, // onsubmit
+ 0x30: 0x27002, // hr
+ 0x31: 0x32f0d, // oncontextmenu
+ 0x33: 0x29c05, // image
+ 0x34: 0x50d07, // onpause
+ 0x35: 0x25906, // hgroup
+ 0x36: 0x7704, // ping
+ 0x37: 0x57808, // onselect
+ 0x3a: 0x11303, // div
+ 0x3b: 0x1fa0e, // onautocomplete
+ 0x40: 0x2eb02, // mi
+ 0x41: 0x31c08, // seamless
+ 0x42: 0x2807, // charset
+ 0x43: 0x8502, // id
+ 0x44: 0x5200a, // onpopstate
+ 0x45: 0x3ef03, // del
+ 0x46: 0x2cb07, // marquee
+ 0x47: 0x3309, // accesskey
+ 0x49: 0x8d06, // footer
+ 0x4a: 0x44e04, // list
+ 0x4b: 0x2b005, // ismap
+ 0x51: 0x33804, // menu
+ 0x52: 0x2f04, // body
+ 0x55: 0x9a08, // frameset
+ 0x56: 0x54a07, // onreset
+ 0x57: 0x12705, // blink
+ 0x58: 0xa105, // title
+ 0x59: 0x38807, // article
+ 0x5b: 0x22e02, // th
+ 0x5d: 0x13101, // q
+ 0x5e: 0x3cf04, // open
+ 0x5f: 0x2fa04, // area
+ 0x61: 0x44206, // onload
+ 0x62: 0xda04, // font
+ 0x63: 0xd604, // base
+ 0x64: 0x16207, // colspan
+ 0x65: 0x53707, // keytype
+ 0x66: 0x11e02, // dl
+ 0x68: 0x1b008, // fieldset
+ 0x6a: 0x2eb03, // min
+ 0x6b: 0x11503, // var
+ 0x6f: 0x2d506, // header
+ 0x70: 0x13f02, // rt
+ 0x71: 0x15008, // colgroup
+ 0x72: 0x23502, // mn
+ 0x74: 0x13a07, // onabort
+ 0x75: 0x3906, // keygen
+ 0x76: 0x4c209, // onoffline
+ 0x77: 0x21f09, // challenge
+ 0x78: 0x2b203, // map
+ 0x7a: 0x2e902, // h4
+ 0x7b: 0x3b607, // onerror
+ 0x7c: 0x2e109, // maxlength
+ 0x7d: 0x2f505, // mtext
+ 0x7e: 0xbb07, // sandbox
+ 0x7f: 0x58b06, // onsort
+ 0x80: 0x100a, // malignmark
+ 0x81: 0x45d04, // meta
+ 0x82: 0x7b05, // async
+ 0x83: 0x2a702, // h3
+ 0x84: 0x26702, // dd
+ 0x85: 0x27004, // href
+ 0x86: 0x6e0a, // mediagroup
+ 0x87: 0x19406, // coords
+ 0x88: 0x41107, // srclang
+ 0x89: 0x34d0a, // ondblclick
+ 0x8a: 0x4005, // value
+ 0x8c: 0xe908, // oncancel
+ 0x8e: 0x3230a, // spellcheck
+ 0x8f: 0x9a05, // frame
+ 0x91: 0x12403, // big
+ 0x94: 0x1f606, // action
+ 0x95: 0x6903, // dir
+ 0x97: 0x2fb08, // readonly
+ 0x99: 0x42d05, // table
+ 0x9a: 0x61607, // summary
+ 0x9b: 0x12103, // wbr
+ 0x9c: 0x30a, // radiogroup
+ 0x9d: 0x6c04, // name
+ 0x9f: 0x62306, // system
+ 0xa1: 0x15d05, // color
+ 0xa2: 0x7f06, // canvas
+ 0xa3: 0x25504, // html
+ 0xa5: 0x56f09, // onseeking
+ 0xac: 0x4f905, // shape
+ 0xad: 0x25f03, // rel
+ 0xae: 0x28510, // oncanplaythrough
+ 0xaf: 0x3760a, // ondragover
+ 0xb0: 0x62608, // template
+ 0xb1: 0x1d80d, // foreignObject
+ 0xb3: 0x9204, // rows
+ 0xb6: 0x44e07, // listing
+ 0xb7: 0x49c06, // output
+ 0xb9: 0x3310b, // contextmenu
+ 0xbb: 0x11f03, // low
+ 0xbc: 0x1c602, // rp
+ 0xbd: 0x5bb09, // onsuspend
+ 0xbe: 0x13606, // button
+ 0xbf: 0x4db04, // desc
+ 0xc1: 0x4e207, // section
+ 0xc2: 0x52a0a, // onprogress
+ 0xc3: 0x59e09, // onstorage
+ 0xc4: 0x2d204, // math
+ 0xc5: 0x4503, // alt
+ 0xc7: 0x8a02, // ul
+ 0xc8: 0x5107, // pattern
+ 0xc9: 0x4b60c, // onmousewheel
+ 0xca: 0x35709, // ondragend
+ 0xcb: 0xaf04, // ruby
+ 0xcc: 0xc01, // p
+ 0xcd: 0x31707, // onclose
+ 0xce: 0x24205, // meter
+ 0xcf: 0x11807, // bgsound
+ 0xd2: 0x25106, // height
+ 0xd4: 0x101, // b
+ 0xd5: 0x2c308, // itemtype
+ 0xd8: 0x1bb07, // caption
+ 0xd9: 0x10c08, // disabled
+ 0xdb: 0x33808, // menuitem
+ 0xdc: 0x62003, // svg
+ 0xdd: 0x18f05, // small
+ 0xde: 0x44a04, // data
+ 0xe0: 0x4cb08, // ononline
+ 0xe1: 0x2a206, // mglyph
+ 0xe3: 0x6505, // embed
+ 0xe4: 0x10502, // tr
+ 0xe5: 0x46b0b, // onloadstart
+ 0xe7: 0x3c306, // srcdoc
+ 0xeb: 0x5c408, // ontoggle
+ 0xed: 0xe703, // bdo
+ 0xee: 0x4702, // td
+ 0xef: 0x8305, // aside
+ 0xf0: 0x29402, // h2
+ 0xf1: 0x52c08, // progress
+ 0xf2: 0x12c0a, // blockquote
+ 0xf4: 0xf005, // label
+ 0xf5: 0x601, // i
+ 0xf7: 0x9207, // rowspan
+ 0xfb: 0x51709, // onplaying
+ 0xfd: 0x2a103, // img
+ 0xfe: 0xf608, // optgroup
+ 0xff: 0x42307, // content
+ 0x101: 0x53e0c, // onratechange
+ 0x103: 0x3da0c, // onhashchange
+ 0x104: 0x4807, // details
+ 0x106: 0x40008, // download
+ 0x109: 0x14009, // translate
+ 0x10b: 0x4230f, // contenteditable
+ 0x10d: 0x36b0b, // ondragleave
+ 0x10e: 0x2106, // accept
+ 0x10f: 0x57a08, // selected
+ 0x112: 0x1f20a, // formaction
+ 0x113: 0x5b506, // center
+ 0x115: 0x45510, // onloadedmetadata
+ 0x116: 0x12804, // link
+ 0x117: 0xdd04, // time
+ 0x118: 0x19f0b, // crossorigin
+ 0x119: 0x3bd07, // onfocus
+ 0x11a: 0x58704, // wrap
+ 0x11b: 0x42204, // icon
+ 0x11d: 0x28105, // video
+ 0x11e: 0x4de05, // class
+ 0x121: 0x5d40e, // onvolumechange
+ 0x122: 0xaa06, // onblur
+ 0x123: 0x2b909, // itemscope
+ 0x124: 0x61105, // style
+ 0x127: 0x41e06, // public
+ 0x129: 0x2320e, // formnovalidate
+ 0x12a: 0x58206, // onshow
+ 0x12c: 0x51706, // onplay
+ 0x12d: 0x3c804, // cite
+ 0x12e: 0x2bc02, // ms
+ 0x12f: 0xdb0c, // ontimeupdate
+ 0x130: 0x10904, // kind
+ 0x131: 0x2470a, // formtarget
+ 0x135: 0x3af07, // onended
+ 0x136: 0x26506, // hidden
+ 0x137: 0x2c01, // s
+ 0x139: 0x2280a, // formmethod
+ 0x13a: 0x3e805, // input
+ 0x13c: 0x50b02, // h6
+ 0x13d: 0xc902, // ol
+ 0x13e: 0x3420b, // oncuechange
+ 0x13f: 0x1e50d, // foreignobject
+ 0x143: 0x4e70e, // onbeforeunload
+ 0x144: 0x2bd05, // scope
+ 0x145: 0x39609, // onemptied
+ 0x146: 0x14b05, // defer
+ 0x147: 0xc103, // xmp
+ 0x148: 0x39f10, // ondurationchange
+ 0x149: 0x1903, // kbd
+ 0x14c: 0x47609, // onmessage
+ 0x14d: 0x60006, // option
+ 0x14e: 0x2eb09, // minlength
+ 0x14f: 0x32807, // checked
+ 0x150: 0xce08, // autoplay
+ 0x152: 0x202, // br
+ 0x153: 0x2360a, // novalidate
+ 0x156: 0x6307, // noembed
+ 0x159: 0x31007, // onclick
+ 0x15a: 0x47f0b, // onmousedown
+ 0x15b: 0x3a708, // onchange
+ 0x15e: 0x3f209, // oninvalid
+ 0x15f: 0x2bd06, // scoped
+ 0x160: 0x18808, // controls
+ 0x161: 0x30b05, // muted
+ 0x162: 0x58d08, // sortable
+ 0x163: 0x51106, // usemap
+ 0x164: 0x1b80a, // figcaption
+ 0x165: 0x35706, // ondrag
+ 0x166: 0x26b04, // high
+ 0x168: 0x3c303, // src
+ 0x169: 0x15706, // poster
+ 0x16b: 0x1670e, // annotation-xml
+ 0x16c: 0x5f704, // step
+ 0x16d: 0x4, // abbr
+ 0x16e: 0x1b06, // dialog
+ 0x170: 0x1202, // li
+ 0x172: 0x3ed02, // mo
+ 0x175: 0x1d803, // for
+ 0x176: 0x1a803, // ins
+ 0x178: 0x55504, // size
+ 0x179: 0x43210, // onlanguagechange
+ 0x17a: 0x8607, // default
+ 0x17b: 0x1a03, // bdi
+ 0x17c: 0x4d30a, // onpagehide
+ 0x17d: 0x6907, // dirname
+ 0x17e: 0x21404, // type
+ 0x17f: 0x1f204, // form
+ 0x181: 0x28509, // oncanplay
+ 0x182: 0x6103, // dfn
+ 0x183: 0x46308, // tabindex
+ 0x186: 0x6502, // em
+ 0x187: 0x27404, // lang
+ 0x189: 0x39108, // dropzone
+ 0x18a: 0x4080a, // onkeypress
+ 0x18b: 0x23c08, // datetime
+ 0x18c: 0x16204, // cols
+ 0x18d: 0x1, // a
+ 0x18e: 0x4420c, // onloadeddata
+ 0x190: 0xa605, // audio
+ 0x192: 0x2e05, // tbody
+ 0x193: 0x22c06, // method
+ 0x195: 0xf404, // loop
+ 0x196: 0x29606, // iframe
+ 0x198: 0x2d504, // head
+ 0x19e: 0x5f108, // manifest
+ 0x19f: 0xb309, // autofocus
+ 0x1a0: 0x14904, // code
+ 0x1a1: 0x55906, // strong
+ 0x1a2: 0x30308, // multiple
+ 0x1a3: 0xc05, // param
+ 0x1a6: 0x21107, // enctype
+ 0x1a7: 0x5b304, // face
+ 0x1a8: 0xfd09, // plaintext
+ 0x1a9: 0x26e02, // h1
+ 0x1aa: 0x59509, // onstalled
+ 0x1ad: 0x3d406, // script
+ 0x1ae: 0x2db06, // spacer
+ 0x1af: 0x55108, // onresize
+ 0x1b0: 0x4a20b, // onmouseover
+ 0x1b1: 0x5cc08, // onunload
+ 0x1b2: 0x56708, // onseeked
+ 0x1b4: 0x2140d, // typemustmatch
+ 0x1b5: 0x1cc06, // figure
+ 0x1b6: 0x4950a, // onmouseout
+ 0x1b7: 0x25e03, // pre
+ 0x1b8: 0x50705, // width
+ 0x1b9: 0x19906, // sorted
+ 0x1bb: 0x5704, // nobr
+ 0x1be: 0x5302, // tt
+ 0x1bf: 0x1105, // align
+ 0x1c0: 0x3e607, // oninput
+ 0x1c3: 0x41807, // onkeyup
+ 0x1c6: 0x1c00c, // onafterprint
+ 0x1c7: 0x210e, // accept-charset
+ 0x1c8: 0x33c06, // itemid
+ 0x1c9: 0x3e809, // inputmode
+ 0x1cb: 0x53306, // strike
+ 0x1cc: 0x5a903, // sub
+ 0x1cd: 0x10505, // track
+ 0x1ce: 0x38605, // start
+ 0x1d0: 0xd608, // basefont
+ 0x1d6: 0x1aa06, // source
+ 0x1d7: 0x18206, // legend
+ 0x1d8: 0x2d405, // thead
+ 0x1da: 0x8c05, // tfoot
+ 0x1dd: 0x1ec06, // object
+ 0x1de: 0x6e05, // media
+ 0x1df: 0x1670a, // annotation
+ 0x1e0: 0x20d0b, // formenctype
+ 0x1e2: 0x3d208, // noscript
+ 0x1e4: 0x55505, // sizes
+ 0x1e5: 0x1fc0c, // autocomplete
+ 0x1e6: 0x9504, // span
+ 0x1e7: 0x9808, // noframes
+ 0x1e8: 0x24b06, // target
+ 0x1e9: 0x38f06, // ondrop
+ 0x1ea: 0x2b306, // applet
+ 0x1ec: 0x5a08, // reversed
+ 0x1f0: 0x2a907, // isindex
+ 0x1f3: 0x27008, // hreflang
+ 0x1f5: 0x2f302, // h5
+ 0x1f6: 0x4f307, // address
+ 0x1fa: 0x2e103, // max
+ 0x1fb: 0xc30b, // placeholder
+ 0x1fc: 0x2f608, // textarea
+ 0x1fe: 0x4ad09, // onmouseup
+ 0x1ff: 0x3800b, // ondragstart
+}
+
+const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
+ "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
+ "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
+ "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
+ "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
+ "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
+ "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
+ "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
+ "bjectforeignobjectformactionautocompleteerrorformenctypemust" +
+ "matchallengeformmethodformnovalidatetimeterformtargetheightm" +
+ "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
+ "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
+ "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
+ "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
+ "hangeondblclickondragendondragenterondragleaveondragoverondr" +
+ "agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
+ "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
+ "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
+ "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
+ "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
+ "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
+ "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
+ "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
+ "ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
+ "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
+ "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
+ "mmarysupsvgsystemplate"
diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go
new file mode 100644
index 000000000..0f2ecce4f
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table_test.go
@@ -0,0 +1,351 @@
+// generated by go run gen.go -test; DO NOT EDIT
+
+package atom
+
+var testAtomList = []string{
+ "a",
+ "abbr",
+ "abbr",
+ "accept",
+ "accept-charset",
+ "accesskey",
+ "action",
+ "address",
+ "align",
+ "alt",
+ "annotation",
+ "annotation-xml",
+ "applet",
+ "area",
+ "article",
+ "aside",
+ "async",
+ "audio",
+ "autocomplete",
+ "autofocus",
+ "autoplay",
+ "b",
+ "base",
+ "basefont",
+ "bdi",
+ "bdo",
+ "bgsound",
+ "big",
+ "blink",
+ "blockquote",
+ "body",
+ "br",
+ "button",
+ "canvas",
+ "caption",
+ "center",
+ "challenge",
+ "charset",
+ "checked",
+ "cite",
+ "cite",
+ "class",
+ "code",
+ "col",
+ "colgroup",
+ "color",
+ "cols",
+ "colspan",
+ "command",
+ "command",
+ "content",
+ "contenteditable",
+ "contextmenu",
+ "controls",
+ "coords",
+ "crossorigin",
+ "data",
+ "data",
+ "datalist",
+ "datetime",
+ "dd",
+ "default",
+ "defer",
+ "del",
+ "desc",
+ "details",
+ "dfn",
+ "dialog",
+ "dir",
+ "dirname",
+ "disabled",
+ "div",
+ "dl",
+ "download",
+ "draggable",
+ "dropzone",
+ "dt",
+ "em",
+ "embed",
+ "enctype",
+ "face",
+ "fieldset",
+ "figcaption",
+ "figure",
+ "font",
+ "footer",
+ "for",
+ "foreignObject",
+ "foreignobject",
+ "form",
+ "form",
+ "formaction",
+ "formenctype",
+ "formmethod",
+ "formnovalidate",
+ "formtarget",
+ "frame",
+ "frameset",
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6",
+ "head",
+ "header",
+ "headers",
+ "height",
+ "hgroup",
+ "hidden",
+ "high",
+ "hr",
+ "href",
+ "hreflang",
+ "html",
+ "http-equiv",
+ "i",
+ "icon",
+ "id",
+ "iframe",
+ "image",
+ "img",
+ "input",
+ "inputmode",
+ "ins",
+ "isindex",
+ "ismap",
+ "itemid",
+ "itemprop",
+ "itemref",
+ "itemscope",
+ "itemtype",
+ "kbd",
+ "keygen",
+ "keytype",
+ "kind",
+ "label",
+ "label",
+ "lang",
+ "legend",
+ "li",
+ "link",
+ "list",
+ "listing",
+ "loop",
+ "low",
+ "malignmark",
+ "manifest",
+ "map",
+ "mark",
+ "marquee",
+ "math",
+ "max",
+ "maxlength",
+ "media",
+ "mediagroup",
+ "menu",
+ "menuitem",
+ "meta",
+ "meter",
+ "method",
+ "mglyph",
+ "mi",
+ "min",
+ "minlength",
+ "mn",
+ "mo",
+ "ms",
+ "mtext",
+ "multiple",
+ "muted",
+ "name",
+ "nav",
+ "nobr",
+ "noembed",
+ "noframes",
+ "noscript",
+ "novalidate",
+ "object",
+ "ol",
+ "onabort",
+ "onafterprint",
+ "onautocomplete",
+ "onautocompleteerror",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontextmenu",
+ "oncuechange",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onhashchange",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onlanguagechange",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadstart",
+ "onmessage",
+ "onmousedown",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onpopstate",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onscroll",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onshow",
+ "onsort",
+ "onstalled",
+ "onstorage",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onunload",
+ "onvolumechange",
+ "onwaiting",
+ "open",
+ "optgroup",
+ "optimum",
+ "option",
+ "output",
+ "p",
+ "param",
+ "pattern",
+ "ping",
+ "placeholder",
+ "plaintext",
+ "poster",
+ "pre",
+ "preload",
+ "progress",
+ "prompt",
+ "public",
+ "q",
+ "radiogroup",
+ "readonly",
+ "rel",
+ "required",
+ "reversed",
+ "rows",
+ "rowspan",
+ "rp",
+ "rt",
+ "ruby",
+ "s",
+ "samp",
+ "sandbox",
+ "scope",
+ "scoped",
+ "script",
+ "seamless",
+ "section",
+ "select",
+ "selected",
+ "shape",
+ "size",
+ "sizes",
+ "small",
+ "sortable",
+ "sorted",
+ "source",
+ "spacer",
+ "span",
+ "span",
+ "spellcheck",
+ "src",
+ "srcdoc",
+ "srclang",
+ "start",
+ "step",
+ "strike",
+ "strong",
+ "style",
+ "style",
+ "sub",
+ "summary",
+ "sup",
+ "svg",
+ "system",
+ "tabindex",
+ "table",
+ "target",
+ "tbody",
+ "td",
+ "template",
+ "textarea",
+ "tfoot",
+ "th",
+ "thead",
+ "time",
+ "title",
+ "title",
+ "tr",
+ "track",
+ "translate",
+ "tt",
+ "type",
+ "typemustmatch",
+ "u",
+ "ul",
+ "usemap",
+ "value",
+ "var",
+ "video",
+ "wbr",
+ "width",
+ "wrap",
+ "xmp",
+}
diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go
new file mode 100644
index 000000000..13bed1599
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/charset.go
@@ -0,0 +1,257 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package charset provides common text encodings for HTML documents.
+//
+// The mapping from encoding labels to encodings is defined at
+// https://encoding.spec.whatwg.org/.
+package charset // import "golang.org/x/net/html/charset"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "mime"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/charmap"
+ "golang.org/x/text/encoding/htmlindex"
+ "golang.org/x/text/transform"
+)
+
+// Lookup returns the encoding with the specified label, and its canonical
+// name. It returns nil and the empty string if label is not one of the
+// standard encodings for HTML. Matching is case-insensitive and ignores
+// leading and trailing whitespace. Encoders will use HTML escape sequences for
+// runes that are not supported by the character set.
+func Lookup(label string) (e encoding.Encoding, name string) {
+ e, err := htmlindex.Get(label)
+ if err != nil {
+ return nil, ""
+ }
+ name, _ = htmlindex.Name(e)
+ return &htmlEncoding{e}, name
+}
+
+type htmlEncoding struct{ encoding.Encoding }
+
+func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
+ // HTML requires a non-terminating legacy encoder. We use HTML escapes to
+ // substitute unsupported code points.
+ return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
+}
+
+// DetermineEncoding determines the encoding of an HTML document by examining
+// up to the first 1024 bytes of content and the declared Content-Type.
+//
+// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
+func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
+ if len(content) > 1024 {
+ content = content[:1024]
+ }
+
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ e, name = Lookup(b.enc)
+ return e, name, true
+ }
+ }
+
+ if _, params, err := mime.ParseMediaType(contentType); err == nil {
+ if cs, ok := params["charset"]; ok {
+ if e, name = Lookup(cs); e != nil {
+ return e, name, true
+ }
+ }
+ }
+
+ if len(content) > 0 {
+ e, name = prescan(content)
+ if e != nil {
+ return e, name, false
+ }
+ }
+
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return encoding.Nop, "utf-8", false
+ }
+
+ // TODO: change default depending on user's locale?
+ return charmap.Windows1252, "windows-1252", false
+}
+
+// NewReader returns an io.Reader that converts the content of r to UTF-8.
+// It calls DetermineEncoding to find out what r's encoding is.
+func NewReader(r io.Reader, contentType string) (io.Reader, error) {
+ preview := make([]byte, 1024)
+ n, err := io.ReadFull(r, preview)
+ switch {
+ case err == io.ErrUnexpectedEOF:
+ preview = preview[:n]
+ r = bytes.NewReader(preview)
+ case err != nil:
+ return nil, err
+ default:
+ r = io.MultiReader(bytes.NewReader(preview), r)
+ }
+
+ if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
+ r = transform.NewReader(r, e.NewDecoder())
+ }
+ return r, nil
+}
+
+// NewReaderLabel returns a reader that converts from the specified charset to
+// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
+// returns an error if Lookup returns nil. It is suitable for use as
+// encoding/xml.Decoder's CharsetReader function.
+func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
+ e, _ := Lookup(label)
+ if e == nil {
+ return nil, fmt.Errorf("unsupported charset: %q", label)
+ }
+ return transform.NewReader(input, e.NewDecoder()), nil
+}
+
+func prescan(content []byte) (e encoding.Encoding, name string) {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return nil, ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name = ""
+ e = nil
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ if e == nil {
+ name = fromMetaElement(string(val))
+ if name != "" {
+ e, name = Lookup(name)
+ if e != nil {
+ needPragma = doNeedPragma
+ }
+ }
+ }
+
+ case "charset":
+ e, name = Lookup(string(val))
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ e = encoding.Nop
+ }
+
+ if e != nil {
+ return e, name
+ }
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+var boms = []struct {
+ bom []byte
+ enc string
+}{
+ {[]byte{0xfe, 0xff}, "utf-16be"},
+ {[]byte{0xff, 0xfe}, "utf-16le"},
+ {[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
+}
diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go
new file mode 100644
index 000000000..e4e7d86bf
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/charset_test.go
@@ -0,0 +1,237 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package charset
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io/ioutil"
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/text/transform"
+)
+
+func transformString(t transform.Transformer, s string) (string, error) {
+ r := transform.NewReader(strings.NewReader(s), t)
+ b, err := ioutil.ReadAll(r)
+ return string(b), err
+}
+
+type testCase struct {
+ utf8, other, otherEncoding string
+}
+
+// testCases for encoding and decoding.
+var testCases = []testCase{
+ {"Résumé", "Résumé", "utf8"},
+ {"Résumé", "R\xe9sum\xe9", "latin1"},
+ {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"},
+ {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"},
+ {"Hello, world", "Hello, world", "ASCII"},
+ {"Gdańsk", "Gda\xf1sk", "ISO-8859-2"},
+ {"Ââ ÄŒÄ ÄÄ‘ ÅŠÅ‹ Õõ Å Å¡ Žž Ã…Ã¥ Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"},
+ {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"},
+ {"latviešu", "latvie\xf0u", "ISO-8859-13"},
+ {"Seònaid", "Se\xf2naid", "ISO-8859-14"},
+ {"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"},
+ {"românește", "rom\xe2ne\xbate", "ISO-8859-16"},
+ {"nutraĵo", "nutra\xbco", "ISO-8859-3"},
+ {"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"},
+ {"руÑÑкий", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"},
+ {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"},
+ {"KaÄŸan", "Ka\xf0an", "ISO-8859-9"},
+ {"Résumé", "R\x8esum\x8e", "macintosh"},
+ {"Gdańsk", "Gda\xf1sk", "windows-1250"},
+ {"руÑÑкий", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"},
+ {"Résumé", "R\xe9sum\xe9", "windows-1252"},
+ {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"},
+ {"KaÄŸan", "Ka\xf0an", "windows-1254"},
+ {"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"},
+ {"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"},
+ {"latviešu", "latvie\xf0u", "windows-1257"},
+ {"Việt", "Vi\xea\xf2t", "windows-1258"},
+ {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"},
+ {"руÑÑкий", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"},
+ {"українÑька", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"},
+ {"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"},
+ {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"},
+ {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"},
+ {"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"},
+ {"㧯", "\x82\x31\x89\x38", "gb18030"},
+ {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"},
+ {"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"},
+ {"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"},
+ {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"},
+ {"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"},
+ {"다ìŒê³¼ ê°™ì€ ì¡°ê±´ì„ ë”°ë¼ì•¼ 합니다: 저작ìží‘œì‹œ", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7Õ´Ï´\xd9: \xc0\xfa\xc0\xdb\xc0\xdaÇ¥\xbd\xc3", "EUC-KR"},
+}
+
+func TestDecode(t *testing.T) {
+ testCases := append(testCases, []testCase{
+ // Replace multi-byte maximum subpart of ill-formed subsequence with
+ // single replacement character (WhatWG requirement).
+ {"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"},
+ }...)
+ for _, tc := range testCases {
+ e, _ := Lookup(tc.otherEncoding)
+ if e == nil {
+ t.Errorf("%s: not found", tc.otherEncoding)
+ continue
+ }
+ s, err := transformString(e.NewDecoder(), tc.other)
+ if err != nil {
+ t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err)
+ continue
+ }
+ if s != tc.utf8 {
+ t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8)
+ }
+ }
+}
+
+func TestEncode(t *testing.T) {
+ testCases := append(testCases, []testCase{
+ // Use Go-style replacement.
+ {"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"},
+ // U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding.
+ {"Gdańsk", "Gda&#324;sk", "ISO-8859-11"},
+ {"\ufffd", "&#65533;", "ISO-8859-11"},
+ {"a\xe1\x80b", "a&#65533;&#65533;b", "ISO-8859-11"},
+ }...)
+ for _, tc := range testCases {
+ e, _ := Lookup(tc.otherEncoding)
+ if e == nil {
+ t.Errorf("%s: not found", tc.otherEncoding)
+ continue
+ }
+ s, err := transformString(e.NewEncoder(), tc.utf8)
+ if err != nil {
+ t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err)
+ continue
+ }
+ if s != tc.other {
+ t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other)
+ }
+ }
+}
+
+var sniffTestCases = []struct {
+ filename, declared, want string
+}{
+ {"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
+ {"UTF-16LE-BOM.html", "", "utf-16le"},
+ {"UTF-16BE-BOM.html", "", "utf-16be"},
+ {"meta-content-attribute.html", "text/html", "iso-8859-15"},
+ {"meta-charset-attribute.html", "text/html", "iso-8859-15"},
+ {"No-encoding-declaration.html", "text/html", "utf-8"},
+ {"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"},
+ {"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
+ {"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
+ {"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"},
+ {"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"},
+}
+
+func TestSniff(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl": // platforms that don't permit direct file system access
+ t.Skipf("not supported on %q", runtime.GOOS)
+ }
+
+ for _, tc := range sniffTestCases {
+ content, err := ioutil.ReadFile("testdata/" + tc.filename)
+ if err != nil {
+ t.Errorf("%s: error reading file: %v", tc.filename, err)
+ continue
+ }
+
+ _, name, _ := DetermineEncoding(content, tc.declared)
+ if name != tc.want {
+ t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want)
+ continue
+ }
+ }
+}
+
+func TestReader(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl": // platforms that don't permit direct file system access
+ t.Skipf("not supported on %q", runtime.GOOS)
+ }
+
+ for _, tc := range sniffTestCases {
+ content, err := ioutil.ReadFile("testdata/" + tc.filename)
+ if err != nil {
+ t.Errorf("%s: error reading file: %v", tc.filename, err)
+ continue
+ }
+
+ r, err := NewReader(bytes.NewReader(content), tc.declared)
+ if err != nil {
+ t.Errorf("%s: error creating reader: %v", tc.filename, err)
+ continue
+ }
+
+ got, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err)
+ continue
+ }
+
+ e, _ := Lookup(tc.want)
+ want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
+ if err != nil {
+ t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err)
+ continue
+ }
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("%s: got %q, want %q", tc.filename, got, want)
+ continue
+ }
+ }
+}
+
+var metaTestCases = []struct {
+ meta, want string
+}{
+ {"", ""},
+ {"text/html", ""},
+ {"text/html; charset utf-8", ""},
+ {"text/html; charset=latin-2", "latin-2"},
+ {"text/html; charset; charset = utf-8", "utf-8"},
+ {`charset="big5"`, "big5"},
+ {"charset='shift_jis'", "shift_jis"},
+}
+
+func TestFromMeta(t *testing.T) {
+ for _, tc := range metaTestCases {
+ got := fromMetaElement(tc.meta)
+ if got != tc.want {
+ t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want)
+ }
+ }
+}
+
+func TestXML(t *testing.T) {
+ const s = "<?xml version=\"1.0\" encoding=\"windows-1252\"?><a><Word>r\xe9sum\xe9</Word></a>"
+
+ d := xml.NewDecoder(strings.NewReader(s))
+ d.CharsetReader = NewReaderLabel
+
+ var a struct {
+ Word string
+ }
+ err := d.Decode(&a)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+
+ want := "résumé"
+ if a.Word != want {
+ t.Errorf("got %q, want %q", a.Word, want)
+ }
+}
diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html
new file mode 100644
index 000000000..9915fa0ee
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <title>HTTP charset</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="The character encoding of a page can be set using the HTTP header charset declaration.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
+</head>
+<body>
+<p class='title'>HTTP charset</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">The character encoding of a page can be set using the HTTP header charset declaration.</p>
+<div class="notes"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-003">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-001<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
new file mode 100644
index 000000000..26e5d8b4e
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <title>HTTP vs UTF-8 BOM</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
+</head>
+<body>
+<p class='title'>HTTP vs UTF-8 BOM</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.</p>
+<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p><p>If the test is unsuccessful, the characters &#x00EF;&#x00BB;&#x00BF; should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-022">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-034<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-034" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
new file mode 100644
index 000000000..2f07e9515
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta charset="iso-8859-1" > <title>HTTP vs meta charset</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.">
+<style type='text/css'>
+.test div { width: 50px; }.test div { width: 90px; }
+</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
+</head>
+<body>
+<p class='title'>HTTP vs meta charset</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.</p>
+<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-037">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-018<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-018" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
new file mode 100644
index 000000000..6853cddec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta http-equiv="content-type" content="text/html;charset=iso-8859-1" > <title>HTTP vs meta content</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.">
+<style type='text/css'>
+.test div { width: 50px; }.test div { width: 90px; }
+</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
+</head>
+<body>
+<p class='title'>HTTP vs meta content</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.</p>
+<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-018">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-016<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-016" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
new file mode 100644
index 000000000..612e26c6c
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <title>No encoding declaration</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
+</head>
+<body>
+<p class='title'>No encoding declaration</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.</p>
+<div class="notes"><p><p>The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-034">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-015<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-015" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README
new file mode 100644
index 000000000..38ef0f9f1
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/README
@@ -0,0 +1,9 @@
+These test cases come from
+http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics
+
+Distributed under both the W3C Test Suite License
+(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license)
+and the W3C 3-clause BSD License
+(http://www.w3.org/Consortium/Legal/2008/03-bsd-license).
+To contribute to a W3C Test Suite, see the policies and contribution
+forms (http://www.w3.org/2004/10/27-testcases).
diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
new file mode 100644
index 000000000..3abf7a934
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
Binary files differ
diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
new file mode 100644
index 000000000..76254c980
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
Binary files differ
diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
new file mode 100644
index 000000000..83de43338
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta charset="iso-8859-15"> <title>UTF-8 BOM vs meta charset</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.">
+<style type='text/css'>
+.test div { width: 50px; }.test div { width: 90px; }
+</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
+</head>
+<body>
+<p class='title'>UTF-8 BOM vs meta charset</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.</p>
+<div class="notes"><p><p>The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-024">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-038<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-038" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
new file mode 100644
index 000000000..501aac2d6
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>UTF-8 BOM vs meta content</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
+</head>
+<body>
+<p class='title'>UTF-8 BOM vs meta content</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.</p>
+<div class="notes"><p><p>The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-038">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-037<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-037" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
new file mode 100644
index 000000000..2d7d25aba
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta charset="iso-8859-15"> <title>meta charset attribute</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="The character encoding of the page can be set by a meta element with charset attribute.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
+</head>
+<body>
+<p class='title'>meta charset attribute</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with charset attribute.</p>
+<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-015">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-009<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-009" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
new file mode 100644
index 000000000..1c3f228e7
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html lang="en" >
+<head>
+ <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>meta content attribute</title>
+<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
+<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
+<link rel="stylesheet" type="text/css" href="./generatedtests.css">
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+<meta name='flags' content='http'>
+<meta name="assert" content="The character encoding of the page can be set by a meta element with http-equiv and content attributes.">
+<style type='text/css'>
+.test div { width: 50px; }</style>
+<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
+</head>
+<body>
+<p class='title'>meta content attribute</p>
+
+
+<div id='log'></div>
+
+
+<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
+
+
+
+
+
+<div class='description'>
+<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with http-equiv and content attributes.</p>
+<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
+</div>
+</div>
+<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-009">Next test</a></div><div class="doctype">HTML5</div>
+<p class="jump">the-input-byte-stream-007<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-007" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
+<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
+ <li>The test is read from a server that supports HTTP.</li></ul></div>
+</div>
+<script>
+test(function() {
+assert_equals(document.getElementById('box').offsetWidth, 100);
+}, " ");
+</script>
+
+</body>
+</html>
+
+
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
new file mode 100644
index 000000000..52f651ff6
--- /dev/null
+++ b/vendor/golang.org/x/net/html/const.go
@@ -0,0 +1,102 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// Section 12.2.3.2 of the HTML5 specification says "The following elements
+// have varying levels of special parsing rules".
+// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
+var isSpecialElementMap = map[string]bool{
+ "address": true,
+ "applet": true,
+ "area": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "bgsound": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "button": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "embed": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hgroup": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "img": true,
+ "input": true,
+ "isindex": true,
+ "li": true,
+ "link": true,
+ "listing": true,
+ "marquee": true,
+ "menu": true,
+ "meta": true,
+ "nav": true,
+ "noembed": true,
+ "noframes": true,
+ "noscript": true,
+ "object": true,
+ "ol": true,
+ "p": true,
+ "param": true,
+ "plaintext": true,
+ "pre": true,
+ "script": true,
+ "section": true,
+ "select": true,
+ "source": true,
+ "style": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "template": true,
+ "textarea": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+ "wbr": true,
+ "xmp": true,
+}
+
+func isSpecialElement(element *Node) bool {
+ switch element.Namespace {
+ case "", "html":
+ return isSpecialElementMap[element.Data]
+ case "svg":
+ return element.Data == "foreignObject"
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
new file mode 100644
index 000000000..94f496874
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -0,0 +1,106 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package html implements an HTML5-compliant tokenizer and parser.
+
+Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
+caller's responsibility to ensure that r provides UTF-8 encoded HTML.
+
+ z := html.NewTokenizer(r)
+
+Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
+which parses the next token and returns its type, or an error:
+
+ for {
+ tt := z.Next()
+ if tt == html.ErrorToken {
+ // ...
+ return ...
+ }
+ // Process the current token.
+ }
+
+There are two APIs for retrieving the current token. The high-level API is to
+call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
+allow optionally calling Raw after Next but before Token, Text, TagName, or
+TagAttr. In EBNF notation, the valid call sequence per token is:
+
+ Next {Raw} [ Token | Text | TagName {TagAttr} ]
+
+Token returns an independent data structure that completely describes a token.
+Entities (such as "&lt;") are unescaped, tag names and attribute keys are
+lower-cased, and attributes are collected into a []Attribute. For example:
+
+ for {
+ if z.Next() == html.ErrorToken {
+ // Returning io.EOF indicates success.
+ return z.Err()
+ }
+ emitToken(z.Token())
+ }
+
+The low-level API performs fewer allocations and copies, but the contents of
+the []byte values returned by Text, TagName and TagAttr may change on the next
+call to Next. For example, to extract an HTML page's anchor text:
+
+ depth := 0
+ for {
+ tt := z.Next()
+ switch tt {
+ case ErrorToken:
+ return z.Err()
+ case TextToken:
+ if depth > 0 {
+ // emitBytes should copy the []byte it receives,
+ // if it doesn't process it immediately.
+ emitBytes(z.Text())
+ }
+ case StartTagToken, EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+
+Parsing is done by calling Parse with an io.Reader, which returns the root of
+the parse tree (the document element) as a *Node. It is the caller's
+responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
+example, to process each anchor node in depth-first order:
+
+ doc, err := html.Parse(r)
+ if err != nil {
+ // ...
+ }
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ // Do something with n...
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ f(doc)
+
+The relevant specifications include:
+https://html.spec.whatwg.org/multipage/syntax.html and
+https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+*/
+package html // import "golang.org/x/net/html"
+
+// The tokenization algorithm implemented by this package is not a line-by-line
+// transliteration of the relatively verbose state-machine in the WHATWG
+// specification. A more direct approach is used instead, where the program
+// counter implies the state, such as whether it is tokenizing a tag or a text
+// node. Specification compliance is verified by checking expected and actual
+// outputs over a test suite rather than aiming for algorithmic fidelity.
+
+// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
+// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
new file mode 100644
index 000000000..c484e5a94
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doctype.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+// parseDoctype parses the data from a DoctypeToken into a name,
+// public identifier, and system identifier. It returns a Node whose Type
+// is DoctypeNode, whose Data is the name, and which has attributes
+// named "system" and "public" for the two identifiers if they were present.
+// quirks is whether the document should be parsed in "quirks mode".
+func parseDoctype(s string) (n *Node, quirks bool) {
+ n = &Node{Type: DoctypeNode}
+
+ // Find the name.
+ space := strings.IndexAny(s, whitespace)
+ if space == -1 {
+ space = len(s)
+ }
+ n.Data = s[:space]
+ // The comparison to "html" is case-sensitive.
+ if n.Data != "html" {
+ quirks = true
+ }
+ n.Data = strings.ToLower(n.Data)
+ s = strings.TrimLeft(s[space:], whitespace)
+
+ if len(s) < 6 {
+ // It can't start with "PUBLIC" or "SYSTEM".
+ // Ignore the rest of the string.
+ return n, quirks || s != ""
+ }
+
+ key := strings.ToLower(s[:6])
+ s = s[6:]
+ for key == "public" || key == "system" {
+ s = strings.TrimLeft(s, whitespace)
+ if s == "" {
+ break
+ }
+ quote := s[0]
+ if quote != '"' && quote != '\'' {
+ break
+ }
+ s = s[1:]
+ q := strings.IndexRune(s, rune(quote))
+ var id string
+ if q == -1 {
+ id = s
+ s = ""
+ } else {
+ id = s[:q]
+ s = s[q+1:]
+ }
+ n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
+ if key == "public" {
+ key = "system"
+ } else {
+ key = ""
+ }
+ }
+
+ if key != "" || s != "" {
+ quirks = true
+ } else if len(n.Attr) > 0 {
+ if n.Attr[0].Key == "public" {
+ public := strings.ToLower(n.Attr[0].Val)
+ switch public {
+ case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
+ quirks = true
+ default:
+ for _, q := range quirkyIDs {
+ if strings.HasPrefix(public, q) {
+ quirks = true
+ break
+ }
+ }
+ }
+ // The following two public IDs only cause quirks mode if there is no system ID.
+ if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
+ strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
+ quirks = true
+ }
+ }
+ if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
+ strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ quirks = true
+ }
+ }
+
+ return n, quirks
+}
+
+// quirkyIDs is a list of public doctype identifiers that cause a document
+// to be interpreted in quirks mode. The identifiers should be in lower case.
+var quirkyIDs = []string{
+ "+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//",
+}
diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
new file mode 100644
index 000000000..a50c04c60
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity.go
@@ -0,0 +1,2253 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// All entities that do not end with ';' are 6 or fewer bytes long.
+const longestEntityWithoutSemicolon = 6
+
+// entity is a map from HTML entity names to their values. The semicolon matters:
+// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
+// lists both "amp" and "amp;" as two separate entries.
+//
+// Note that the HTML5 list is larger than the HTML4 list at
+// http://www.w3.org/TR/html4/sgml/entities.html
+var entity = map[string]rune{
+ "AElig;": '\U000000C6',
+ "AMP;": '\U00000026',
+ "Aacute;": '\U000000C1',
+ "Abreve;": '\U00000102',
+ "Acirc;": '\U000000C2',
+ "Acy;": '\U00000410',
+ "Afr;": '\U0001D504',
+ "Agrave;": '\U000000C0',
+ "Alpha;": '\U00000391',
+ "Amacr;": '\U00000100',
+ "And;": '\U00002A53',
+ "Aogon;": '\U00000104',
+ "Aopf;": '\U0001D538',
+ "ApplyFunction;": '\U00002061',
+ "Aring;": '\U000000C5',
+ "Ascr;": '\U0001D49C',
+ "Assign;": '\U00002254',
+ "Atilde;": '\U000000C3',
+ "Auml;": '\U000000C4',
+ "Backslash;": '\U00002216',
+ "Barv;": '\U00002AE7',
+ "Barwed;": '\U00002306',
+ "Bcy;": '\U00000411',
+ "Because;": '\U00002235',
+ "Bernoullis;": '\U0000212C',
+ "Beta;": '\U00000392',
+ "Bfr;": '\U0001D505',
+ "Bopf;": '\U0001D539',
+ "Breve;": '\U000002D8',
+ "Bscr;": '\U0000212C',
+ "Bumpeq;": '\U0000224E',
+ "CHcy;": '\U00000427',
+ "COPY;": '\U000000A9',
+ "Cacute;": '\U00000106',
+ "Cap;": '\U000022D2',
+ "CapitalDifferentialD;": '\U00002145',
+ "Cayleys;": '\U0000212D',
+ "Ccaron;": '\U0000010C',
+ "Ccedil;": '\U000000C7',
+ "Ccirc;": '\U00000108',
+ "Cconint;": '\U00002230',
+ "Cdot;": '\U0000010A',
+ "Cedilla;": '\U000000B8',
+ "CenterDot;": '\U000000B7',
+ "Cfr;": '\U0000212D',
+ "Chi;": '\U000003A7',
+ "CircleDot;": '\U00002299',
+ "CircleMinus;": '\U00002296',
+ "CirclePlus;": '\U00002295',
+ "CircleTimes;": '\U00002297',
+ "ClockwiseContourIntegral;": '\U00002232',
+ "CloseCurlyDoubleQuote;": '\U0000201D',
+ "CloseCurlyQuote;": '\U00002019',
+ "Colon;": '\U00002237',
+ "Colone;": '\U00002A74',
+ "Congruent;": '\U00002261',
+ "Conint;": '\U0000222F',
+ "ContourIntegral;": '\U0000222E',
+ "Copf;": '\U00002102',
+ "Coproduct;": '\U00002210',
+ "CounterClockwiseContourIntegral;": '\U00002233',
+ "Cross;": '\U00002A2F',
+ "Cscr;": '\U0001D49E',
+ "Cup;": '\U000022D3',
+ "CupCap;": '\U0000224D',
+ "DD;": '\U00002145',
+ "DDotrahd;": '\U00002911',
+ "DJcy;": '\U00000402',
+ "DScy;": '\U00000405',
+ "DZcy;": '\U0000040F',
+ "Dagger;": '\U00002021',
+ "Darr;": '\U000021A1',
+ "Dashv;": '\U00002AE4',
+ "Dcaron;": '\U0000010E',
+ "Dcy;": '\U00000414',
+ "Del;": '\U00002207',
+ "Delta;": '\U00000394',
+ "Dfr;": '\U0001D507',
+ "DiacriticalAcute;": '\U000000B4',
+ "DiacriticalDot;": '\U000002D9',
+ "DiacriticalDoubleAcute;": '\U000002DD',
+ "DiacriticalGrave;": '\U00000060',
+ "DiacriticalTilde;": '\U000002DC',
+ "Diamond;": '\U000022C4',
+ "DifferentialD;": '\U00002146',
+ "Dopf;": '\U0001D53B',
+ "Dot;": '\U000000A8',
+ "DotDot;": '\U000020DC',
+ "DotEqual;": '\U00002250',
+ "DoubleContourIntegral;": '\U0000222F',
+ "DoubleDot;": '\U000000A8',
+ "DoubleDownArrow;": '\U000021D3',
+ "DoubleLeftArrow;": '\U000021D0',
+ "DoubleLeftRightArrow;": '\U000021D4',
+ "DoubleLeftTee;": '\U00002AE4',
+ "DoubleLongLeftArrow;": '\U000027F8',
+ "DoubleLongLeftRightArrow;": '\U000027FA',
+ "DoubleLongRightArrow;": '\U000027F9',
+ "DoubleRightArrow;": '\U000021D2',
+ "DoubleRightTee;": '\U000022A8',
+ "DoubleUpArrow;": '\U000021D1',
+ "DoubleUpDownArrow;": '\U000021D5',
+ "DoubleVerticalBar;": '\U00002225',
+ "DownArrow;": '\U00002193',
+ "DownArrowBar;": '\U00002913',
+ "DownArrowUpArrow;": '\U000021F5',
+ "DownBreve;": '\U00000311',
+ "DownLeftRightVector;": '\U00002950',
+ "DownLeftTeeVector;": '\U0000295E',
+ "DownLeftVector;": '\U000021BD',
+ "DownLeftVectorBar;": '\U00002956',
+ "DownRightTeeVector;": '\U0000295F',
+ "DownRightVector;": '\U000021C1',
+ "DownRightVectorBar;": '\U00002957',
+ "DownTee;": '\U000022A4',
+ "DownTeeArrow;": '\U000021A7',
+ "Downarrow;": '\U000021D3',
+ "Dscr;": '\U0001D49F',
+ "Dstrok;": '\U00000110',
+ "ENG;": '\U0000014A',
+ "ETH;": '\U000000D0',
+ "Eacute;": '\U000000C9',
+ "Ecaron;": '\U0000011A',
+ "Ecirc;": '\U000000CA',
+ "Ecy;": '\U0000042D',
+ "Edot;": '\U00000116',
+ "Efr;": '\U0001D508',
+ "Egrave;": '\U000000C8',
+ "Element;": '\U00002208',
+ "Emacr;": '\U00000112',
+ "EmptySmallSquare;": '\U000025FB',
+ "EmptyVerySmallSquare;": '\U000025AB',
+ "Eogon;": '\U00000118',
+ "Eopf;": '\U0001D53C',
+ "Epsilon;": '\U00000395',
+ "Equal;": '\U00002A75',
+ "EqualTilde;": '\U00002242',
+ "Equilibrium;": '\U000021CC',
+ "Escr;": '\U00002130',
+ "Esim;": '\U00002A73',
+ "Eta;": '\U00000397',
+ "Euml;": '\U000000CB',
+ "Exists;": '\U00002203',
+ "ExponentialE;": '\U00002147',
+ "Fcy;": '\U00000424',
+ "Ffr;": '\U0001D509',
+ "FilledSmallSquare;": '\U000025FC',
+ "FilledVerySmallSquare;": '\U000025AA',
+ "Fopf;": '\U0001D53D',
+ "ForAll;": '\U00002200',
+ "Fouriertrf;": '\U00002131',
+ "Fscr;": '\U00002131',
+ "GJcy;": '\U00000403',
+ "GT;": '\U0000003E',
+ "Gamma;": '\U00000393',
+ "Gammad;": '\U000003DC',
+ "Gbreve;": '\U0000011E',
+ "Gcedil;": '\U00000122',
+ "Gcirc;": '\U0000011C',
+ "Gcy;": '\U00000413',
+ "Gdot;": '\U00000120',
+ "Gfr;": '\U0001D50A',
+ "Gg;": '\U000022D9',
+ "Gopf;": '\U0001D53E',
+ "GreaterEqual;": '\U00002265',
+ "GreaterEqualLess;": '\U000022DB',
+ "GreaterFullEqual;": '\U00002267',
+ "GreaterGreater;": '\U00002AA2',
+ "GreaterLess;": '\U00002277',
+ "GreaterSlantEqual;": '\U00002A7E',
+ "GreaterTilde;": '\U00002273',
+ "Gscr;": '\U0001D4A2',
+ "Gt;": '\U0000226B',
+ "HARDcy;": '\U0000042A',
+ "Hacek;": '\U000002C7',
+ "Hat;": '\U0000005E',
+ "Hcirc;": '\U00000124',
+ "Hfr;": '\U0000210C',
+ "HilbertSpace;": '\U0000210B',
+ "Hopf;": '\U0000210D',
+ "HorizontalLine;": '\U00002500',
+ "Hscr;": '\U0000210B',
+ "Hstrok;": '\U00000126',
+ "HumpDownHump;": '\U0000224E',
+ "HumpEqual;": '\U0000224F',
+ "IEcy;": '\U00000415',
+ "IJlig;": '\U00000132',
+ "IOcy;": '\U00000401',
+ "Iacute;": '\U000000CD',
+ "Icirc;": '\U000000CE',
+ "Icy;": '\U00000418',
+ "Idot;": '\U00000130',
+ "Ifr;": '\U00002111',
+ "Igrave;": '\U000000CC',
+ "Im;": '\U00002111',
+ "Imacr;": '\U0000012A',
+ "ImaginaryI;": '\U00002148',
+ "Implies;": '\U000021D2',
+ "Int;": '\U0000222C',
+ "Integral;": '\U0000222B',
+ "Intersection;": '\U000022C2',
+ "InvisibleComma;": '\U00002063',
+ "InvisibleTimes;": '\U00002062',
+ "Iogon;": '\U0000012E',
+ "Iopf;": '\U0001D540',
+ "Iota;": '\U00000399',
+ "Iscr;": '\U00002110',
+ "Itilde;": '\U00000128',
+ "Iukcy;": '\U00000406',
+ "Iuml;": '\U000000CF',
+ "Jcirc;": '\U00000134',
+ "Jcy;": '\U00000419',
+ "Jfr;": '\U0001D50D',
+ "Jopf;": '\U0001D541',
+ "Jscr;": '\U0001D4A5',
+ "Jsercy;": '\U00000408',
+ "Jukcy;": '\U00000404',
+ "KHcy;": '\U00000425',
+ "KJcy;": '\U0000040C',
+ "Kappa;": '\U0000039A',
+ "Kcedil;": '\U00000136',
+ "Kcy;": '\U0000041A',
+ "Kfr;": '\U0001D50E',
+ "Kopf;": '\U0001D542',
+ "Kscr;": '\U0001D4A6',
+ "LJcy;": '\U00000409',
+ "LT;": '\U0000003C',
+ "Lacute;": '\U00000139',
+ "Lambda;": '\U0000039B',
+ "Lang;": '\U000027EA',
+ "Laplacetrf;": '\U00002112',
+ "Larr;": '\U0000219E',
+ "Lcaron;": '\U0000013D',
+ "Lcedil;": '\U0000013B',
+ "Lcy;": '\U0000041B',
+ "LeftAngleBracket;": '\U000027E8',
+ "LeftArrow;": '\U00002190',
+ "LeftArrowBar;": '\U000021E4',
+ "LeftArrowRightArrow;": '\U000021C6',
+ "LeftCeiling;": '\U00002308',
+ "LeftDoubleBracket;": '\U000027E6',
+ "LeftDownTeeVector;": '\U00002961',
+ "LeftDownVector;": '\U000021C3',
+ "LeftDownVectorBar;": '\U00002959',
+ "LeftFloor;": '\U0000230A',
+ "LeftRightArrow;": '\U00002194',
+ "LeftRightVector;": '\U0000294E',
+ "LeftTee;": '\U000022A3',
+ "LeftTeeArrow;": '\U000021A4',
+ "LeftTeeVector;": '\U0000295A',
+ "LeftTriangle;": '\U000022B2',
+ "LeftTriangleBar;": '\U000029CF',
+ "LeftTriangleEqual;": '\U000022B4',
+ "LeftUpDownVector;": '\U00002951',
+ "LeftUpTeeVector;": '\U00002960',
+ "LeftUpVector;": '\U000021BF',
+ "LeftUpVectorBar;": '\U00002958',
+ "LeftVector;": '\U000021BC',
+ "LeftVectorBar;": '\U00002952',
+ "Leftarrow;": '\U000021D0',
+ "Leftrightarrow;": '\U000021D4',
+ "LessEqualGreater;": '\U000022DA',
+ "LessFullEqual;": '\U00002266',
+ "LessGreater;": '\U00002276',
+ "LessLess;": '\U00002AA1',
+ "LessSlantEqual;": '\U00002A7D',
+ "LessTilde;": '\U00002272',
+ "Lfr;": '\U0001D50F',
+ "Ll;": '\U000022D8',
+ "Lleftarrow;": '\U000021DA',
+ "Lmidot;": '\U0000013F',
+ "LongLeftArrow;": '\U000027F5',
+ "LongLeftRightArrow;": '\U000027F7',
+ "LongRightArrow;": '\U000027F6',
+ "Longleftarrow;": '\U000027F8',
+ "Longleftrightarrow;": '\U000027FA',
+ "Longrightarrow;": '\U000027F9',
+ "Lopf;": '\U0001D543',
+ "LowerLeftArrow;": '\U00002199',
+ "LowerRightArrow;": '\U00002198',
+ "Lscr;": '\U00002112',
+ "Lsh;": '\U000021B0',
+ "Lstrok;": '\U00000141',
+ "Lt;": '\U0000226A',
+ "Map;": '\U00002905',
+ "Mcy;": '\U0000041C',
+ "MediumSpace;": '\U0000205F',
+ "Mellintrf;": '\U00002133',
+ "Mfr;": '\U0001D510',
+ "MinusPlus;": '\U00002213',
+ "Mopf;": '\U0001D544',
+ "Mscr;": '\U00002133',
+ "Mu;": '\U0000039C',
+ "NJcy;": '\U0000040A',
+ "Nacute;": '\U00000143',
+ "Ncaron;": '\U00000147',
+ "Ncedil;": '\U00000145',
+ "Ncy;": '\U0000041D',
+ "NegativeMediumSpace;": '\U0000200B',
+ "NegativeThickSpace;": '\U0000200B',
+ "NegativeThinSpace;": '\U0000200B',
+ "NegativeVeryThinSpace;": '\U0000200B',
+ "NestedGreaterGreater;": '\U0000226B',
+ "NestedLessLess;": '\U0000226A',
+ "NewLine;": '\U0000000A',
+ "Nfr;": '\U0001D511',
+ "NoBreak;": '\U00002060',
+ "NonBreakingSpace;": '\U000000A0',
+ "Nopf;": '\U00002115',
+ "Not;": '\U00002AEC',
+ "NotCongruent;": '\U00002262',
+ "NotCupCap;": '\U0000226D',
+ "NotDoubleVerticalBar;": '\U00002226',
+ "NotElement;": '\U00002209',
+ "NotEqual;": '\U00002260',
+ "NotExists;": '\U00002204',
+ "NotGreater;": '\U0000226F',
+ "NotGreaterEqual;": '\U00002271',
+ "NotGreaterLess;": '\U00002279',
+ "NotGreaterTilde;": '\U00002275',
+ "NotLeftTriangle;": '\U000022EA',
+ "NotLeftTriangleEqual;": '\U000022EC',
+ "NotLess;": '\U0000226E',
+ "NotLessEqual;": '\U00002270',
+ "NotLessGreater;": '\U00002278',
+ "NotLessTilde;": '\U00002274',
+ "NotPrecedes;": '\U00002280',
+ "NotPrecedesSlantEqual;": '\U000022E0',
+ "NotReverseElement;": '\U0000220C',
+ "NotRightTriangle;": '\U000022EB',
+ "NotRightTriangleEqual;": '\U000022ED',
+ "NotSquareSubsetEqual;": '\U000022E2',
+ "NotSquareSupersetEqual;": '\U000022E3',
+ "NotSubsetEqual;": '\U00002288',
+ "NotSucceeds;": '\U00002281',
+ "NotSucceedsSlantEqual;": '\U000022E1',
+ "NotSupersetEqual;": '\U00002289',
+ "NotTilde;": '\U00002241',
+ "NotTildeEqual;": '\U00002244',
+ "NotTildeFullEqual;": '\U00002247',
+ "NotTildeTilde;": '\U00002249',
+ "NotVerticalBar;": '\U00002224',
+ "Nscr;": '\U0001D4A9',
+ "Ntilde;": '\U000000D1',
+ "Nu;": '\U0000039D',
+ "OElig;": '\U00000152',
+ "Oacute;": '\U000000D3',
+ "Ocirc;": '\U000000D4',
+ "Ocy;": '\U0000041E',
+ "Odblac;": '\U00000150',
+ "Ofr;": '\U0001D512',
+ "Ograve;": '\U000000D2',
+ "Omacr;": '\U0000014C',
+ "Omega;": '\U000003A9',
+ "Omicron;": '\U0000039F',
+ "Oopf;": '\U0001D546',
+ "OpenCurlyDoubleQuote;": '\U0000201C',
+ "OpenCurlyQuote;": '\U00002018',
+ "Or;": '\U00002A54',
+ "Oscr;": '\U0001D4AA',
+ "Oslash;": '\U000000D8',
+ "Otilde;": '\U000000D5',
+ "Otimes;": '\U00002A37',
+ "Ouml;": '\U000000D6',
+ "OverBar;": '\U0000203E',
+ "OverBrace;": '\U000023DE',
+ "OverBracket;": '\U000023B4',
+ "OverParenthesis;": '\U000023DC',
+ "PartialD;": '\U00002202',
+ "Pcy;": '\U0000041F',
+ "Pfr;": '\U0001D513',
+ "Phi;": '\U000003A6',
+ "Pi;": '\U000003A0',
+ "PlusMinus;": '\U000000B1',
+ "Poincareplane;": '\U0000210C',
+ "Popf;": '\U00002119',
+ "Pr;": '\U00002ABB',
+ "Precedes;": '\U0000227A',
+ "PrecedesEqual;": '\U00002AAF',
+ "PrecedesSlantEqual;": '\U0000227C',
+ "PrecedesTilde;": '\U0000227E',
+ "Prime;": '\U00002033',
+ "Product;": '\U0000220F',
+ "Proportion;": '\U00002237',
+ "Proportional;": '\U0000221D',
+ "Pscr;": '\U0001D4AB',
+ "Psi;": '\U000003A8',
+ "QUOT;": '\U00000022',
+ "Qfr;": '\U0001D514',
+ "Qopf;": '\U0000211A',
+ "Qscr;": '\U0001D4AC',
+ "RBarr;": '\U00002910',
+ "REG;": '\U000000AE',
+ "Racute;": '\U00000154',
+ "Rang;": '\U000027EB',
+ "Rarr;": '\U000021A0',
+ "Rarrtl;": '\U00002916',
+ "Rcaron;": '\U00000158',
+ "Rcedil;": '\U00000156',
+ "Rcy;": '\U00000420',
+ "Re;": '\U0000211C',
+ "ReverseElement;": '\U0000220B',
+ "ReverseEquilibrium;": '\U000021CB',
+ "ReverseUpEquilibrium;": '\U0000296F',
+ "Rfr;": '\U0000211C',
+ "Rho;": '\U000003A1',
+ "RightAngleBracket;": '\U000027E9',
+ "RightArrow;": '\U00002192',
+ "RightArrowBar;": '\U000021E5',
+ "RightArrowLeftArrow;": '\U000021C4',
+ "RightCeiling;": '\U00002309',
+ "RightDoubleBracket;": '\U000027E7',
+ "RightDownTeeVector;": '\U0000295D',
+ "RightDownVector;": '\U000021C2',
+ "RightDownVectorBar;": '\U00002955',
+ "RightFloor;": '\U0000230B',
+ "RightTee;": '\U000022A2',
+ "RightTeeArrow;": '\U000021A6',
+ "RightTeeVector;": '\U0000295B',
+ "RightTriangle;": '\U000022B3',
+ "RightTriangleBar;": '\U000029D0',
+ "RightTriangleEqual;": '\U000022B5',
+ "RightUpDownVector;": '\U0000294F',
+ "RightUpTeeVector;": '\U0000295C',
+ "RightUpVector;": '\U000021BE',
+ "RightUpVectorBar;": '\U00002954',
+ "RightVector;": '\U000021C0',
+ "RightVectorBar;": '\U00002953',
+ "Rightarrow;": '\U000021D2',
+ "Ropf;": '\U0000211D',
+ "RoundImplies;": '\U00002970',
+ "Rrightarrow;": '\U000021DB',
+ "Rscr;": '\U0000211B',
+ "Rsh;": '\U000021B1',
+ "RuleDelayed;": '\U000029F4',
+ "SHCHcy;": '\U00000429',
+ "SHcy;": '\U00000428',
+ "SOFTcy;": '\U0000042C',
+ "Sacute;": '\U0000015A',
+ "Sc;": '\U00002ABC',
+ "Scaron;": '\U00000160',
+ "Scedil;": '\U0000015E',
+ "Scirc;": '\U0000015C',
+ "Scy;": '\U00000421',
+ "Sfr;": '\U0001D516',
+ "ShortDownArrow;": '\U00002193',
+ "ShortLeftArrow;": '\U00002190',
+ "ShortRightArrow;": '\U00002192',
+ "ShortUpArrow;": '\U00002191',
+ "Sigma;": '\U000003A3',
+ "SmallCircle;": '\U00002218',
+ "Sopf;": '\U0001D54A',
+ "Sqrt;": '\U0000221A',
+ "Square;": '\U000025A1',
+ "SquareIntersection;": '\U00002293',
+ "SquareSubset;": '\U0000228F',
+ "SquareSubsetEqual;": '\U00002291',
+ "SquareSuperset;": '\U00002290',
+ "SquareSupersetEqual;": '\U00002292',
+ "SquareUnion;": '\U00002294',
+ "Sscr;": '\U0001D4AE',
+ "Star;": '\U000022C6',
+ "Sub;": '\U000022D0',
+ "Subset;": '\U000022D0',
+ "SubsetEqual;": '\U00002286',
+ "Succeeds;": '\U0000227B',
+ "SucceedsEqual;": '\U00002AB0',
+ "SucceedsSlantEqual;": '\U0000227D',
+ "SucceedsTilde;": '\U0000227F',
+ "SuchThat;": '\U0000220B',
+ "Sum;": '\U00002211',
+ "Sup;": '\U000022D1',
+ "Superset;": '\U00002283',
+ "SupersetEqual;": '\U00002287',
+ "Supset;": '\U000022D1',
+ "THORN;": '\U000000DE',
+ "TRADE;": '\U00002122',
+ "TSHcy;": '\U0000040B',
+ "TScy;": '\U00000426',
+ "Tab;": '\U00000009',
+ "Tau;": '\U000003A4',
+ "Tcaron;": '\U00000164',
+ "Tcedil;": '\U00000162',
+ "Tcy;": '\U00000422',
+ "Tfr;": '\U0001D517',
+ "Therefore;": '\U00002234',
+ "Theta;": '\U00000398',
+ "ThinSpace;": '\U00002009',
+ "Tilde;": '\U0000223C',
+ "TildeEqual;": '\U00002243',
+ "TildeFullEqual;": '\U00002245',
+ "TildeTilde;": '\U00002248',
+ "Topf;": '\U0001D54B',
+ "TripleDot;": '\U000020DB',
+ "Tscr;": '\U0001D4AF',
+ "Tstrok;": '\U00000166',
+ "Uacute;": '\U000000DA',
+ "Uarr;": '\U0000219F',
+ "Uarrocir;": '\U00002949',
+ "Ubrcy;": '\U0000040E',
+ "Ubreve;": '\U0000016C',
+ "Ucirc;": '\U000000DB',
+ "Ucy;": '\U00000423',
+ "Udblac;": '\U00000170',
+ "Ufr;": '\U0001D518',
+ "Ugrave;": '\U000000D9',
+ "Umacr;": '\U0000016A',
+ "UnderBar;": '\U0000005F',
+ "UnderBrace;": '\U000023DF',
+ "UnderBracket;": '\U000023B5',
+ "UnderParenthesis;": '\U000023DD',
+ "Union;": '\U000022C3',
+ "UnionPlus;": '\U0000228E',
+ "Uogon;": '\U00000172',
+ "Uopf;": '\U0001D54C',
+ "UpArrow;": '\U00002191',
+ "UpArrowBar;": '\U00002912',
+ "UpArrowDownArrow;": '\U000021C5',
+ "UpDownArrow;": '\U00002195',
+ "UpEquilibrium;": '\U0000296E',
+ "UpTee;": '\U000022A5',
+ "UpTeeArrow;": '\U000021A5',
+ "Uparrow;": '\U000021D1',
+ "Updownarrow;": '\U000021D5',
+ "UpperLeftArrow;": '\U00002196',
+ "UpperRightArrow;": '\U00002197',
+ "Upsi;": '\U000003D2',
+ "Upsilon;": '\U000003A5',
+ "Uring;": '\U0000016E',
+ "Uscr;": '\U0001D4B0',
+ "Utilde;": '\U00000168',
+ "Uuml;": '\U000000DC',
+ "VDash;": '\U000022AB',
+ "Vbar;": '\U00002AEB',
+ "Vcy;": '\U00000412',
+ "Vdash;": '\U000022A9',
+ "Vdashl;": '\U00002AE6',
+ "Vee;": '\U000022C1',
+ "Verbar;": '\U00002016',
+ "Vert;": '\U00002016',
+ "VerticalBar;": '\U00002223',
+ "VerticalLine;": '\U0000007C',
+ "VerticalSeparator;": '\U00002758',
+ "VerticalTilde;": '\U00002240',
+ "VeryThinSpace;": '\U0000200A',
+ "Vfr;": '\U0001D519',
+ "Vopf;": '\U0001D54D',
+ "Vscr;": '\U0001D4B1',
+ "Vvdash;": '\U000022AA',
+ "Wcirc;": '\U00000174',
+ "Wedge;": '\U000022C0',
+ "Wfr;": '\U0001D51A',
+ "Wopf;": '\U0001D54E',
+ "Wscr;": '\U0001D4B2',
+ "Xfr;": '\U0001D51B',
+ "Xi;": '\U0000039E',
+ "Xopf;": '\U0001D54F',
+ "Xscr;": '\U0001D4B3',
+ "YAcy;": '\U0000042F',
+ "YIcy;": '\U00000407',
+ "YUcy;": '\U0000042E',
+ "Yacute;": '\U000000DD',
+ "Ycirc;": '\U00000176',
+ "Ycy;": '\U0000042B',
+ "Yfr;": '\U0001D51C',
+ "Yopf;": '\U0001D550',
+ "Yscr;": '\U0001D4B4',
+ "Yuml;": '\U00000178',
+ "ZHcy;": '\U00000416',
+ "Zacute;": '\U00000179',
+ "Zcaron;": '\U0000017D',
+ "Zcy;": '\U00000417',
+ "Zdot;": '\U0000017B',
+ "ZeroWidthSpace;": '\U0000200B',
+ "Zeta;": '\U00000396',
+ "Zfr;": '\U00002128',
+ "Zopf;": '\U00002124',
+ "Zscr;": '\U0001D4B5',
+ "aacute;": '\U000000E1',
+ "abreve;": '\U00000103',
+ "ac;": '\U0000223E',
+ "acd;": '\U0000223F',
+ "acirc;": '\U000000E2',
+ "acute;": '\U000000B4',
+ "acy;": '\U00000430',
+ "aelig;": '\U000000E6',
+ "af;": '\U00002061',
+ "afr;": '\U0001D51E',
+ "agrave;": '\U000000E0',
+ "alefsym;": '\U00002135',
+ "aleph;": '\U00002135',
+ "alpha;": '\U000003B1',
+ "amacr;": '\U00000101',
+ "amalg;": '\U00002A3F',
+ "amp;": '\U00000026',
+ "and;": '\U00002227',
+ "andand;": '\U00002A55',
+ "andd;": '\U00002A5C',
+ "andslope;": '\U00002A58',
+ "andv;": '\U00002A5A',
+ "ang;": '\U00002220',
+ "ange;": '\U000029A4',
+ "angle;": '\U00002220',
+ "angmsd;": '\U00002221',
+ "angmsdaa;": '\U000029A8',
+ "angmsdab;": '\U000029A9',
+ "angmsdac;": '\U000029AA',
+ "angmsdad;": '\U000029AB',
+ "angmsdae;": '\U000029AC',
+ "angmsdaf;": '\U000029AD',
+ "angmsdag;": '\U000029AE',
+ "angmsdah;": '\U000029AF',
+ "angrt;": '\U0000221F',
+ "angrtvb;": '\U000022BE',
+ "angrtvbd;": '\U0000299D',
+ "angsph;": '\U00002222',
+ "angst;": '\U000000C5',
+ "angzarr;": '\U0000237C',
+ "aogon;": '\U00000105',
+ "aopf;": '\U0001D552',
+ "ap;": '\U00002248',
+ "apE;": '\U00002A70',
+ "apacir;": '\U00002A6F',
+ "ape;": '\U0000224A',
+ "apid;": '\U0000224B',
+ "apos;": '\U00000027',
+ "approx;": '\U00002248',
+ "approxeq;": '\U0000224A',
+ "aring;": '\U000000E5',
+ "ascr;": '\U0001D4B6',
+ "ast;": '\U0000002A',
+ "asymp;": '\U00002248',
+ "asympeq;": '\U0000224D',
+ "atilde;": '\U000000E3',
+ "auml;": '\U000000E4',
+ "awconint;": '\U00002233',
+ "awint;": '\U00002A11',
+ "bNot;": '\U00002AED',
+ "backcong;": '\U0000224C',
+ "backepsilon;": '\U000003F6',
+ "backprime;": '\U00002035',
+ "backsim;": '\U0000223D',
+ "backsimeq;": '\U000022CD',
+ "barvee;": '\U000022BD',
+ "barwed;": '\U00002305',
+ "barwedge;": '\U00002305',
+ "bbrk;": '\U000023B5',
+ "bbrktbrk;": '\U000023B6',
+ "bcong;": '\U0000224C',
+ "bcy;": '\U00000431',
+ "bdquo;": '\U0000201E',
+ "becaus;": '\U00002235',
+ "because;": '\U00002235',
+ "bemptyv;": '\U000029B0',
+ "bepsi;": '\U000003F6',
+ "bernou;": '\U0000212C',
+ "beta;": '\U000003B2',
+ "beth;": '\U00002136',
+ "between;": '\U0000226C',
+ "bfr;": '\U0001D51F',
+ "bigcap;": '\U000022C2',
+ "bigcirc;": '\U000025EF',
+ "bigcup;": '\U000022C3',
+ "bigodot;": '\U00002A00',
+ "bigoplus;": '\U00002A01',
+ "bigotimes;": '\U00002A02',
+ "bigsqcup;": '\U00002A06',
+ "bigstar;": '\U00002605',
+ "bigtriangledown;": '\U000025BD',
+ "bigtriangleup;": '\U000025B3',
+ "biguplus;": '\U00002A04',
+ "bigvee;": '\U000022C1',
+ "bigwedge;": '\U000022C0',
+ "bkarow;": '\U0000290D',
+ "blacklozenge;": '\U000029EB',
+ "blacksquare;": '\U000025AA',
+ "blacktriangle;": '\U000025B4',
+ "blacktriangledown;": '\U000025BE',
+ "blacktriangleleft;": '\U000025C2',
+ "blacktriangleright;": '\U000025B8',
+ "blank;": '\U00002423',
+ "blk12;": '\U00002592',
+ "blk14;": '\U00002591',
+ "blk34;": '\U00002593',
+ "block;": '\U00002588',
+ "bnot;": '\U00002310',
+ "bopf;": '\U0001D553',
+ "bot;": '\U000022A5',
+ "bottom;": '\U000022A5',
+ "bowtie;": '\U000022C8',
+ "boxDL;": '\U00002557',
+ "boxDR;": '\U00002554',
+ "boxDl;": '\U00002556',
+ "boxDr;": '\U00002553',
+ "boxH;": '\U00002550',
+ "boxHD;": '\U00002566',
+ "boxHU;": '\U00002569',
+ "boxHd;": '\U00002564',
+ "boxHu;": '\U00002567',
+ "boxUL;": '\U0000255D',
+ "boxUR;": '\U0000255A',
+ "boxUl;": '\U0000255C',
+ "boxUr;": '\U00002559',
+ "boxV;": '\U00002551',
+ "boxVH;": '\U0000256C',
+ "boxVL;": '\U00002563',
+ "boxVR;": '\U00002560',
+ "boxVh;": '\U0000256B',
+ "boxVl;": '\U00002562',
+ "boxVr;": '\U0000255F',
+ "boxbox;": '\U000029C9',
+ "boxdL;": '\U00002555',
+ "boxdR;": '\U00002552',
+ "boxdl;": '\U00002510',
+ "boxdr;": '\U0000250C',
+ "boxh;": '\U00002500',
+ "boxhD;": '\U00002565',
+ "boxhU;": '\U00002568',
+ "boxhd;": '\U0000252C',
+ "boxhu;": '\U00002534',
+ "boxminus;": '\U0000229F',
+ "boxplus;": '\U0000229E',
+ "boxtimes;": '\U000022A0',
+ "boxuL;": '\U0000255B',
+ "boxuR;": '\U00002558',
+ "boxul;": '\U00002518',
+ "boxur;": '\U00002514',
+ "boxv;": '\U00002502',
+ "boxvH;": '\U0000256A',
+ "boxvL;": '\U00002561',
+ "boxvR;": '\U0000255E',
+ "boxvh;": '\U0000253C',
+ "boxvl;": '\U00002524',
+ "boxvr;": '\U0000251C',
+ "bprime;": '\U00002035',
+ "breve;": '\U000002D8',
+ "brvbar;": '\U000000A6',
+ "bscr;": '\U0001D4B7',
+ "bsemi;": '\U0000204F',
+ "bsim;": '\U0000223D',
+ "bsime;": '\U000022CD',
+ "bsol;": '\U0000005C',
+ "bsolb;": '\U000029C5',
+ "bsolhsub;": '\U000027C8',
+ "bull;": '\U00002022',
+ "bullet;": '\U00002022',
+ "bump;": '\U0000224E',
+ "bumpE;": '\U00002AAE',
+ "bumpe;": '\U0000224F',
+ "bumpeq;": '\U0000224F',
+ "cacute;": '\U00000107',
+ "cap;": '\U00002229',
+ "capand;": '\U00002A44',
+ "capbrcup;": '\U00002A49',
+ "capcap;": '\U00002A4B',
+ "capcup;": '\U00002A47',
+ "capdot;": '\U00002A40',
+ "caret;": '\U00002041',
+ "caron;": '\U000002C7',
+ "ccaps;": '\U00002A4D',
+ "ccaron;": '\U0000010D',
+ "ccedil;": '\U000000E7',
+ "ccirc;": '\U00000109',
+ "ccups;": '\U00002A4C',
+ "ccupssm;": '\U00002A50',
+ "cdot;": '\U0000010B',
+ "cedil;": '\U000000B8',
+ "cemptyv;": '\U000029B2',
+ "cent;": '\U000000A2',
+ "centerdot;": '\U000000B7',
+ "cfr;": '\U0001D520',
+ "chcy;": '\U00000447',
+ "check;": '\U00002713',
+ "checkmark;": '\U00002713',
+ "chi;": '\U000003C7',
+ "cir;": '\U000025CB',
+ "cirE;": '\U000029C3',
+ "circ;": '\U000002C6',
+ "circeq;": '\U00002257',
+ "circlearrowleft;": '\U000021BA',
+ "circlearrowright;": '\U000021BB',
+ "circledR;": '\U000000AE',
+ "circledS;": '\U000024C8',
+ "circledast;": '\U0000229B',
+ "circledcirc;": '\U0000229A',
+ "circleddash;": '\U0000229D',
+ "cire;": '\U00002257',
+ "cirfnint;": '\U00002A10',
+ "cirmid;": '\U00002AEF',
+ "cirscir;": '\U000029C2',
+ "clubs;": '\U00002663',
+ "clubsuit;": '\U00002663',
+ "colon;": '\U0000003A',
+ "colone;": '\U00002254',
+ "coloneq;": '\U00002254',
+ "comma;": '\U0000002C',
+ "commat;": '\U00000040',
+ "comp;": '\U00002201',
+ "compfn;": '\U00002218',
+ "complement;": '\U00002201',
+ "complexes;": '\U00002102',
+ "cong;": '\U00002245',
+ "congdot;": '\U00002A6D',
+ "conint;": '\U0000222E',
+ "copf;": '\U0001D554',
+ "coprod;": '\U00002210',
+ "copy;": '\U000000A9',
+ "copysr;": '\U00002117',
+ "crarr;": '\U000021B5',
+ "cross;": '\U00002717',
+ "cscr;": '\U0001D4B8',
+ "csub;": '\U00002ACF',
+ "csube;": '\U00002AD1',
+ "csup;": '\U00002AD0',
+ "csupe;": '\U00002AD2',
+ "ctdot;": '\U000022EF',
+ "cudarrl;": '\U00002938',
+ "cudarrr;": '\U00002935',
+ "cuepr;": '\U000022DE',
+ "cuesc;": '\U000022DF',
+ "cularr;": '\U000021B6',
+ "cularrp;": '\U0000293D',
+ "cup;": '\U0000222A',
+ "cupbrcap;": '\U00002A48',
+ "cupcap;": '\U00002A46',
+ "cupcup;": '\U00002A4A',
+ "cupdot;": '\U0000228D',
+ "cupor;": '\U00002A45',
+ "curarr;": '\U000021B7',
+ "curarrm;": '\U0000293C',
+ "curlyeqprec;": '\U000022DE',
+ "curlyeqsucc;": '\U000022DF',
+ "curlyvee;": '\U000022CE',
+ "curlywedge;": '\U000022CF',
+ "curren;": '\U000000A4',
+ "curvearrowleft;": '\U000021B6',
+ "curvearrowright;": '\U000021B7',
+ "cuvee;": '\U000022CE',
+ "cuwed;": '\U000022CF',
+ "cwconint;": '\U00002232',
+ "cwint;": '\U00002231',
+ "cylcty;": '\U0000232D',
+ "dArr;": '\U000021D3',
+ "dHar;": '\U00002965',
+ "dagger;": '\U00002020',
+ "daleth;": '\U00002138',
+ "darr;": '\U00002193',
+ "dash;": '\U00002010',
+ "dashv;": '\U000022A3',
+ "dbkarow;": '\U0000290F',
+ "dblac;": '\U000002DD',
+ "dcaron;": '\U0000010F',
+ "dcy;": '\U00000434',
+ "dd;": '\U00002146',
+ "ddagger;": '\U00002021',
+ "ddarr;": '\U000021CA',
+ "ddotseq;": '\U00002A77',
+ "deg;": '\U000000B0',
+ "delta;": '\U000003B4',
+ "demptyv;": '\U000029B1',
+ "dfisht;": '\U0000297F',
+ "dfr;": '\U0001D521',
+ "dharl;": '\U000021C3',
+ "dharr;": '\U000021C2',
+ "diam;": '\U000022C4',
+ "diamond;": '\U000022C4',
+ "diamondsuit;": '\U00002666',
+ "diams;": '\U00002666',
+ "die;": '\U000000A8',
+ "digamma;": '\U000003DD',
+ "disin;": '\U000022F2',
+ "div;": '\U000000F7',
+ "divide;": '\U000000F7',
+ "divideontimes;": '\U000022C7',
+ "divonx;": '\U000022C7',
+ "djcy;": '\U00000452',
+ "dlcorn;": '\U0000231E',
+ "dlcrop;": '\U0000230D',
+ "dollar;": '\U00000024',
+ "dopf;": '\U0001D555',
+ "dot;": '\U000002D9',
+ "doteq;": '\U00002250',
+ "doteqdot;": '\U00002251',
+ "dotminus;": '\U00002238',
+ "dotplus;": '\U00002214',
+ "dotsquare;": '\U000022A1',
+ "doublebarwedge;": '\U00002306',
+ "downarrow;": '\U00002193',
+ "downdownarrows;": '\U000021CA',
+ "downharpoonleft;": '\U000021C3',
+ "downharpoonright;": '\U000021C2',
+ "drbkarow;": '\U00002910',
+ "drcorn;": '\U0000231F',
+ "drcrop;": '\U0000230C',
+ "dscr;": '\U0001D4B9',
+ "dscy;": '\U00000455',
+ "dsol;": '\U000029F6',
+ "dstrok;": '\U00000111',
+ "dtdot;": '\U000022F1',
+ "dtri;": '\U000025BF',
+ "dtrif;": '\U000025BE',
+ "duarr;": '\U000021F5',
+ "duhar;": '\U0000296F',
+ "dwangle;": '\U000029A6',
+ "dzcy;": '\U0000045F',
+ "dzigrarr;": '\U000027FF',
+ "eDDot;": '\U00002A77',
+ "eDot;": '\U00002251',
+ "eacute;": '\U000000E9',
+ "easter;": '\U00002A6E',
+ "ecaron;": '\U0000011B',
+ "ecir;": '\U00002256',
+ "ecirc;": '\U000000EA',
+ "ecolon;": '\U00002255',
+ "ecy;": '\U0000044D',
+ "edot;": '\U00000117',
+ "ee;": '\U00002147',
+ "efDot;": '\U00002252',
+ "efr;": '\U0001D522',
+ "eg;": '\U00002A9A',
+ "egrave;": '\U000000E8',
+ "egs;": '\U00002A96',
+ "egsdot;": '\U00002A98',
+ "el;": '\U00002A99',
+ "elinters;": '\U000023E7',
+ "ell;": '\U00002113',
+ "els;": '\U00002A95',
+ "elsdot;": '\U00002A97',
+ "emacr;": '\U00000113',
+ "empty;": '\U00002205',
+ "emptyset;": '\U00002205',
+ "emptyv;": '\U00002205',
+ "emsp;": '\U00002003',
+ "emsp13;": '\U00002004',
+ "emsp14;": '\U00002005',
+ "eng;": '\U0000014B',
+ "ensp;": '\U00002002',
+ "eogon;": '\U00000119',
+ "eopf;": '\U0001D556',
+ "epar;": '\U000022D5',
+ "eparsl;": '\U000029E3',
+ "eplus;": '\U00002A71',
+ "epsi;": '\U000003B5',
+ "epsilon;": '\U000003B5',
+ "epsiv;": '\U000003F5',
+ "eqcirc;": '\U00002256',
+ "eqcolon;": '\U00002255',
+ "eqsim;": '\U00002242',
+ "eqslantgtr;": '\U00002A96',
+ "eqslantless;": '\U00002A95',
+ "equals;": '\U0000003D',
+ "equest;": '\U0000225F',
+ "equiv;": '\U00002261',
+ "equivDD;": '\U00002A78',
+ "eqvparsl;": '\U000029E5',
+ "erDot;": '\U00002253',
+ "erarr;": '\U00002971',
+ "escr;": '\U0000212F',
+ "esdot;": '\U00002250',
+ "esim;": '\U00002242',
+ "eta;": '\U000003B7',
+ "eth;": '\U000000F0',
+ "euml;": '\U000000EB',
+ "euro;": '\U000020AC',
+ "excl;": '\U00000021',
+ "exist;": '\U00002203',
+ "expectation;": '\U00002130',
+ "exponentiale;": '\U00002147',
+ "fallingdotseq;": '\U00002252',
+ "fcy;": '\U00000444',
+ "female;": '\U00002640',
+ "ffilig;": '\U0000FB03',
+ "fflig;": '\U0000FB00',
+ "ffllig;": '\U0000FB04',
+ "ffr;": '\U0001D523',
+ "filig;": '\U0000FB01',
+ "flat;": '\U0000266D',
+ "fllig;": '\U0000FB02',
+ "fltns;": '\U000025B1',
+ "fnof;": '\U00000192',
+ "fopf;": '\U0001D557',
+ "forall;": '\U00002200',
+ "fork;": '\U000022D4',
+ "forkv;": '\U00002AD9',
+ "fpartint;": '\U00002A0D',
+ "frac12;": '\U000000BD',
+ "frac13;": '\U00002153',
+ "frac14;": '\U000000BC',
+ "frac15;": '\U00002155',
+ "frac16;": '\U00002159',
+ "frac18;": '\U0000215B',
+ "frac23;": '\U00002154',
+ "frac25;": '\U00002156',
+ "frac34;": '\U000000BE',
+ "frac35;": '\U00002157',
+ "frac38;": '\U0000215C',
+ "frac45;": '\U00002158',
+ "frac56;": '\U0000215A',
+ "frac58;": '\U0000215D',
+ "frac78;": '\U0000215E',
+ "frasl;": '\U00002044',
+ "frown;": '\U00002322',
+ "fscr;": '\U0001D4BB',
+ "gE;": '\U00002267',
+ "gEl;": '\U00002A8C',
+ "gacute;": '\U000001F5',
+ "gamma;": '\U000003B3',
+ "gammad;": '\U000003DD',
+ "gap;": '\U00002A86',
+ "gbreve;": '\U0000011F',
+ "gcirc;": '\U0000011D',
+ "gcy;": '\U00000433',
+ "gdot;": '\U00000121',
+ "ge;": '\U00002265',
+ "gel;": '\U000022DB',
+ "geq;": '\U00002265',
+ "geqq;": '\U00002267',
+ "geqslant;": '\U00002A7E',
+ "ges;": '\U00002A7E',
+ "gescc;": '\U00002AA9',
+ "gesdot;": '\U00002A80',
+ "gesdoto;": '\U00002A82',
+ "gesdotol;": '\U00002A84',
+ "gesles;": '\U00002A94',
+ "gfr;": '\U0001D524',
+ "gg;": '\U0000226B',
+ "ggg;": '\U000022D9',
+ "gimel;": '\U00002137',
+ "gjcy;": '\U00000453',
+ "gl;": '\U00002277',
+ "glE;": '\U00002A92',
+ "gla;": '\U00002AA5',
+ "glj;": '\U00002AA4',
+ "gnE;": '\U00002269',
+ "gnap;": '\U00002A8A',
+ "gnapprox;": '\U00002A8A',
+ "gne;": '\U00002A88',
+ "gneq;": '\U00002A88',
+ "gneqq;": '\U00002269',
+ "gnsim;": '\U000022E7',
+ "gopf;": '\U0001D558',
+ "grave;": '\U00000060',
+ "gscr;": '\U0000210A',
+ "gsim;": '\U00002273',
+ "gsime;": '\U00002A8E',
+ "gsiml;": '\U00002A90',
+ "gt;": '\U0000003E',
+ "gtcc;": '\U00002AA7',
+ "gtcir;": '\U00002A7A',
+ "gtdot;": '\U000022D7',
+ "gtlPar;": '\U00002995',
+ "gtquest;": '\U00002A7C',
+ "gtrapprox;": '\U00002A86',
+ "gtrarr;": '\U00002978',
+ "gtrdot;": '\U000022D7',
+ "gtreqless;": '\U000022DB',
+ "gtreqqless;": '\U00002A8C',
+ "gtrless;": '\U00002277',
+ "gtrsim;": '\U00002273',
+ "hArr;": '\U000021D4',
+ "hairsp;": '\U0000200A',
+ "half;": '\U000000BD',
+ "hamilt;": '\U0000210B',
+ "hardcy;": '\U0000044A',
+ "harr;": '\U00002194',
+ "harrcir;": '\U00002948',
+ "harrw;": '\U000021AD',
+ "hbar;": '\U0000210F',
+ "hcirc;": '\U00000125',
+ "hearts;": '\U00002665',
+ "heartsuit;": '\U00002665',
+ "hellip;": '\U00002026',
+ "hercon;": '\U000022B9',
+ "hfr;": '\U0001D525',
+ "hksearow;": '\U00002925',
+ "hkswarow;": '\U00002926',
+ "hoarr;": '\U000021FF',
+ "homtht;": '\U0000223B',
+ "hookleftarrow;": '\U000021A9',
+ "hookrightarrow;": '\U000021AA',
+ "hopf;": '\U0001D559',
+ "horbar;": '\U00002015',
+ "hscr;": '\U0001D4BD',
+ "hslash;": '\U0000210F',
+ "hstrok;": '\U00000127',
+ "hybull;": '\U00002043',
+ "hyphen;": '\U00002010',
+ "iacute;": '\U000000ED',
+ "ic;": '\U00002063',
+ "icirc;": '\U000000EE',
+ "icy;": '\U00000438',
+ "iecy;": '\U00000435',
+ "iexcl;": '\U000000A1',
+ "iff;": '\U000021D4',
+ "ifr;": '\U0001D526',
+ "igrave;": '\U000000EC',
+ "ii;": '\U00002148',
+ "iiiint;": '\U00002A0C',
+ "iiint;": '\U0000222D',
+ "iinfin;": '\U000029DC',
+ "iiota;": '\U00002129',
+ "ijlig;": '\U00000133',
+ "imacr;": '\U0000012B',
+ "image;": '\U00002111',
+ "imagline;": '\U00002110',
+ "imagpart;": '\U00002111',
+ "imath;": '\U00000131',
+ "imof;": '\U000022B7',
+ "imped;": '\U000001B5',
+ "in;": '\U00002208',
+ "incare;": '\U00002105',
+ "infin;": '\U0000221E',
+ "infintie;": '\U000029DD',
+ "inodot;": '\U00000131',
+ "int;": '\U0000222B',
+ "intcal;": '\U000022BA',
+ "integers;": '\U00002124',
+ "intercal;": '\U000022BA',
+ "intlarhk;": '\U00002A17',
+ "intprod;": '\U00002A3C',
+ "iocy;": '\U00000451',
+ "iogon;": '\U0000012F',
+ "iopf;": '\U0001D55A',
+ "iota;": '\U000003B9',
+ "iprod;": '\U00002A3C',
+ "iquest;": '\U000000BF',
+ "iscr;": '\U0001D4BE',
+ "isin;": '\U00002208',
+ "isinE;": '\U000022F9',
+ "isindot;": '\U000022F5',
+ "isins;": '\U000022F4',
+ "isinsv;": '\U000022F3',
+ "isinv;": '\U00002208',
+ "it;": '\U00002062',
+ "itilde;": '\U00000129',
+ "iukcy;": '\U00000456',
+ "iuml;": '\U000000EF',
+ "jcirc;": '\U00000135',
+ "jcy;": '\U00000439',
+ "jfr;": '\U0001D527',
+ "jmath;": '\U00000237',
+ "jopf;": '\U0001D55B',
+ "jscr;": '\U0001D4BF',
+ "jsercy;": '\U00000458',
+ "jukcy;": '\U00000454',
+ "kappa;": '\U000003BA',
+ "kappav;": '\U000003F0',
+ "kcedil;": '\U00000137',
+ "kcy;": '\U0000043A',
+ "kfr;": '\U0001D528',
+ "kgreen;": '\U00000138',
+ "khcy;": '\U00000445',
+ "kjcy;": '\U0000045C',
+ "kopf;": '\U0001D55C',
+ "kscr;": '\U0001D4C0',
+ "lAarr;": '\U000021DA',
+ "lArr;": '\U000021D0',
+ "lAtail;": '\U0000291B',
+ "lBarr;": '\U0000290E',
+ "lE;": '\U00002266',
+ "lEg;": '\U00002A8B',
+ "lHar;": '\U00002962',
+ "lacute;": '\U0000013A',
+ "laemptyv;": '\U000029B4',
+ "lagran;": '\U00002112',
+ "lambda;": '\U000003BB',
+ "lang;": '\U000027E8',
+ "langd;": '\U00002991',
+ "langle;": '\U000027E8',
+ "lap;": '\U00002A85',
+ "laquo;": '\U000000AB',
+ "larr;": '\U00002190',
+ "larrb;": '\U000021E4',
+ "larrbfs;": '\U0000291F',
+ "larrfs;": '\U0000291D',
+ "larrhk;": '\U000021A9',
+ "larrlp;": '\U000021AB',
+ "larrpl;": '\U00002939',
+ "larrsim;": '\U00002973',
+ "larrtl;": '\U000021A2',
+ "lat;": '\U00002AAB',
+ "latail;": '\U00002919',
+ "late;": '\U00002AAD',
+ "lbarr;": '\U0000290C',
+ "lbbrk;": '\U00002772',
+ "lbrace;": '\U0000007B',
+ "lbrack;": '\U0000005B',
+ "lbrke;": '\U0000298B',
+ "lbrksld;": '\U0000298F',
+ "lbrkslu;": '\U0000298D',
+ "lcaron;": '\U0000013E',
+ "lcedil;": '\U0000013C',
+ "lceil;": '\U00002308',
+ "lcub;": '\U0000007B',
+ "lcy;": '\U0000043B',
+ "ldca;": '\U00002936',
+ "ldquo;": '\U0000201C',
+ "ldquor;": '\U0000201E',
+ "ldrdhar;": '\U00002967',
+ "ldrushar;": '\U0000294B',
+ "ldsh;": '\U000021B2',
+ "le;": '\U00002264',
+ "leftarrow;": '\U00002190',
+ "leftarrowtail;": '\U000021A2',
+ "leftharpoondown;": '\U000021BD',
+ "leftharpoonup;": '\U000021BC',
+ "leftleftarrows;": '\U000021C7',
+ "leftrightarrow;": '\U00002194',
+ "leftrightarrows;": '\U000021C6',
+ "leftrightharpoons;": '\U000021CB',
+ "leftrightsquigarrow;": '\U000021AD',
+ "leftthreetimes;": '\U000022CB',
+ "leg;": '\U000022DA',
+ "leq;": '\U00002264',
+ "leqq;": '\U00002266',
+ "leqslant;": '\U00002A7D',
+ "les;": '\U00002A7D',
+ "lescc;": '\U00002AA8',
+ "lesdot;": '\U00002A7F',
+ "lesdoto;": '\U00002A81',
+ "lesdotor;": '\U00002A83',
+ "lesges;": '\U00002A93',
+ "lessapprox;": '\U00002A85',
+ "lessdot;": '\U000022D6',
+ "lesseqgtr;": '\U000022DA',
+ "lesseqqgtr;": '\U00002A8B',
+ "lessgtr;": '\U00002276',
+ "lesssim;": '\U00002272',
+ "lfisht;": '\U0000297C',
+ "lfloor;": '\U0000230A',
+ "lfr;": '\U0001D529',
+ "lg;": '\U00002276',
+ "lgE;": '\U00002A91',
+ "lhard;": '\U000021BD',
+ "lharu;": '\U000021BC',
+ "lharul;": '\U0000296A',
+ "lhblk;": '\U00002584',
+ "ljcy;": '\U00000459',
+ "ll;": '\U0000226A',
+ "llarr;": '\U000021C7',
+ "llcorner;": '\U0000231E',
+ "llhard;": '\U0000296B',
+ "lltri;": '\U000025FA',
+ "lmidot;": '\U00000140',
+ "lmoust;": '\U000023B0',
+ "lmoustache;": '\U000023B0',
+ "lnE;": '\U00002268',
+ "lnap;": '\U00002A89',
+ "lnapprox;": '\U00002A89',
+ "lne;": '\U00002A87',
+ "lneq;": '\U00002A87',
+ "lneqq;": '\U00002268',
+ "lnsim;": '\U000022E6',
+ "loang;": '\U000027EC',
+ "loarr;": '\U000021FD',
+ "lobrk;": '\U000027E6',
+ "longleftarrow;": '\U000027F5',
+ "longleftrightarrow;": '\U000027F7',
+ "longmapsto;": '\U000027FC',
+ "longrightarrow;": '\U000027F6',
+ "looparrowleft;": '\U000021AB',
+ "looparrowright;": '\U000021AC',
+ "lopar;": '\U00002985',
+ "lopf;": '\U0001D55D',
+ "loplus;": '\U00002A2D',
+ "lotimes;": '\U00002A34',
+ "lowast;": '\U00002217',
+ "lowbar;": '\U0000005F',
+ "loz;": '\U000025CA',
+ "lozenge;": '\U000025CA',
+ "lozf;": '\U000029EB',
+ "lpar;": '\U00000028',
+ "lparlt;": '\U00002993',
+ "lrarr;": '\U000021C6',
+ "lrcorner;": '\U0000231F',
+ "lrhar;": '\U000021CB',
+ "lrhard;": '\U0000296D',
+ "lrm;": '\U0000200E',
+ "lrtri;": '\U000022BF',
+ "lsaquo;": '\U00002039',
+ "lscr;": '\U0001D4C1',
+ "lsh;": '\U000021B0',
+ "lsim;": '\U00002272',
+ "lsime;": '\U00002A8D',
+ "lsimg;": '\U00002A8F',
+ "lsqb;": '\U0000005B',
+ "lsquo;": '\U00002018',
+ "lsquor;": '\U0000201A',
+ "lstrok;": '\U00000142',
+ "lt;": '\U0000003C',
+ "ltcc;": '\U00002AA6',
+ "ltcir;": '\U00002A79',
+ "ltdot;": '\U000022D6',
+ "lthree;": '\U000022CB',
+ "ltimes;": '\U000022C9',
+ "ltlarr;": '\U00002976',
+ "ltquest;": '\U00002A7B',
+ "ltrPar;": '\U00002996',
+ "ltri;": '\U000025C3',
+ "ltrie;": '\U000022B4',
+ "ltrif;": '\U000025C2',
+ "lurdshar;": '\U0000294A',
+ "luruhar;": '\U00002966',
+ "mDDot;": '\U0000223A',
+ "macr;": '\U000000AF',
+ "male;": '\U00002642',
+ "malt;": '\U00002720',
+ "maltese;": '\U00002720',
+ "map;": '\U000021A6',
+ "mapsto;": '\U000021A6',
+ "mapstodown;": '\U000021A7',
+ "mapstoleft;": '\U000021A4',
+ "mapstoup;": '\U000021A5',
+ "marker;": '\U000025AE',
+ "mcomma;": '\U00002A29',
+ "mcy;": '\U0000043C',
+ "mdash;": '\U00002014',
+ "measuredangle;": '\U00002221',
+ "mfr;": '\U0001D52A',
+ "mho;": '\U00002127',
+ "micro;": '\U000000B5',
+ "mid;": '\U00002223',
+ "midast;": '\U0000002A',
+ "midcir;": '\U00002AF0',
+ "middot;": '\U000000B7',
+ "minus;": '\U00002212',
+ "minusb;": '\U0000229F',
+ "minusd;": '\U00002238',
+ "minusdu;": '\U00002A2A',
+ "mlcp;": '\U00002ADB',
+ "mldr;": '\U00002026',
+ "mnplus;": '\U00002213',
+ "models;": '\U000022A7',
+ "mopf;": '\U0001D55E',
+ "mp;": '\U00002213',
+ "mscr;": '\U0001D4C2',
+ "mstpos;": '\U0000223E',
+ "mu;": '\U000003BC',
+ "multimap;": '\U000022B8',
+ "mumap;": '\U000022B8',
+ "nLeftarrow;": '\U000021CD',
+ "nLeftrightarrow;": '\U000021CE',
+ "nRightarrow;": '\U000021CF',
+ "nVDash;": '\U000022AF',
+ "nVdash;": '\U000022AE',
+ "nabla;": '\U00002207',
+ "nacute;": '\U00000144',
+ "nap;": '\U00002249',
+ "napos;": '\U00000149',
+ "napprox;": '\U00002249',
+ "natur;": '\U0000266E',
+ "natural;": '\U0000266E',
+ "naturals;": '\U00002115',
+ "nbsp;": '\U000000A0',
+ "ncap;": '\U00002A43',
+ "ncaron;": '\U00000148',
+ "ncedil;": '\U00000146',
+ "ncong;": '\U00002247',
+ "ncup;": '\U00002A42',
+ "ncy;": '\U0000043D',
+ "ndash;": '\U00002013',
+ "ne;": '\U00002260',
+ "neArr;": '\U000021D7',
+ "nearhk;": '\U00002924',
+ "nearr;": '\U00002197',
+ "nearrow;": '\U00002197',
+ "nequiv;": '\U00002262',
+ "nesear;": '\U00002928',
+ "nexist;": '\U00002204',
+ "nexists;": '\U00002204',
+ "nfr;": '\U0001D52B',
+ "nge;": '\U00002271',
+ "ngeq;": '\U00002271',
+ "ngsim;": '\U00002275',
+ "ngt;": '\U0000226F',
+ "ngtr;": '\U0000226F',
+ "nhArr;": '\U000021CE',
+ "nharr;": '\U000021AE',
+ "nhpar;": '\U00002AF2',
+ "ni;": '\U0000220B',
+ "nis;": '\U000022FC',
+ "nisd;": '\U000022FA',
+ "niv;": '\U0000220B',
+ "njcy;": '\U0000045A',
+ "nlArr;": '\U000021CD',
+ "nlarr;": '\U0000219A',
+ "nldr;": '\U00002025',
+ "nle;": '\U00002270',
+ "nleftarrow;": '\U0000219A',
+ "nleftrightarrow;": '\U000021AE',
+ "nleq;": '\U00002270',
+ "nless;": '\U0000226E',
+ "nlsim;": '\U00002274',
+ "nlt;": '\U0000226E',
+ "nltri;": '\U000022EA',
+ "nltrie;": '\U000022EC',
+ "nmid;": '\U00002224',
+ "nopf;": '\U0001D55F',
+ "not;": '\U000000AC',
+ "notin;": '\U00002209',
+ "notinva;": '\U00002209',
+ "notinvb;": '\U000022F7',
+ "notinvc;": '\U000022F6',
+ "notni;": '\U0000220C',
+ "notniva;": '\U0000220C',
+ "notnivb;": '\U000022FE',
+ "notnivc;": '\U000022FD',
+ "npar;": '\U00002226',
+ "nparallel;": '\U00002226',
+ "npolint;": '\U00002A14',
+ "npr;": '\U00002280',
+ "nprcue;": '\U000022E0',
+ "nprec;": '\U00002280',
+ "nrArr;": '\U000021CF',
+ "nrarr;": '\U0000219B',
+ "nrightarrow;": '\U0000219B',
+ "nrtri;": '\U000022EB',
+ "nrtrie;": '\U000022ED',
+ "nsc;": '\U00002281',
+ "nsccue;": '\U000022E1',
+ "nscr;": '\U0001D4C3',
+ "nshortmid;": '\U00002224',
+ "nshortparallel;": '\U00002226',
+ "nsim;": '\U00002241',
+ "nsime;": '\U00002244',
+ "nsimeq;": '\U00002244',
+ "nsmid;": '\U00002224',
+ "nspar;": '\U00002226',
+ "nsqsube;": '\U000022E2',
+ "nsqsupe;": '\U000022E3',
+ "nsub;": '\U00002284',
+ "nsube;": '\U00002288',
+ "nsubseteq;": '\U00002288',
+ "nsucc;": '\U00002281',
+ "nsup;": '\U00002285',
+ "nsupe;": '\U00002289',
+ "nsupseteq;": '\U00002289',
+ "ntgl;": '\U00002279',
+ "ntilde;": '\U000000F1',
+ "ntlg;": '\U00002278',
+ "ntriangleleft;": '\U000022EA',
+ "ntrianglelefteq;": '\U000022EC',
+ "ntriangleright;": '\U000022EB',
+ "ntrianglerighteq;": '\U000022ED',
+ "nu;": '\U000003BD',
+ "num;": '\U00000023',
+ "numero;": '\U00002116',
+ "numsp;": '\U00002007',
+ "nvDash;": '\U000022AD',
+ "nvHarr;": '\U00002904',
+ "nvdash;": '\U000022AC',
+ "nvinfin;": '\U000029DE',
+ "nvlArr;": '\U00002902',
+ "nvrArr;": '\U00002903',
+ "nwArr;": '\U000021D6',
+ "nwarhk;": '\U00002923',
+ "nwarr;": '\U00002196',
+ "nwarrow;": '\U00002196',
+ "nwnear;": '\U00002927',
+ "oS;": '\U000024C8',
+ "oacute;": '\U000000F3',
+ "oast;": '\U0000229B',
+ "ocir;": '\U0000229A',
+ "ocirc;": '\U000000F4',
+ "ocy;": '\U0000043E',
+ "odash;": '\U0000229D',
+ "odblac;": '\U00000151',
+ "odiv;": '\U00002A38',
+ "odot;": '\U00002299',
+ "odsold;": '\U000029BC',
+ "oelig;": '\U00000153',
+ "ofcir;": '\U000029BF',
+ "ofr;": '\U0001D52C',
+ "ogon;": '\U000002DB',
+ "ograve;": '\U000000F2',
+ "ogt;": '\U000029C1',
+ "ohbar;": '\U000029B5',
+ "ohm;": '\U000003A9',
+ "oint;": '\U0000222E',
+ "olarr;": '\U000021BA',
+ "olcir;": '\U000029BE',
+ "olcross;": '\U000029BB',
+ "oline;": '\U0000203E',
+ "olt;": '\U000029C0',
+ "omacr;": '\U0000014D',
+ "omega;": '\U000003C9',
+ "omicron;": '\U000003BF',
+ "omid;": '\U000029B6',
+ "ominus;": '\U00002296',
+ "oopf;": '\U0001D560',
+ "opar;": '\U000029B7',
+ "operp;": '\U000029B9',
+ "oplus;": '\U00002295',
+ "or;": '\U00002228',
+ "orarr;": '\U000021BB',
+ "ord;": '\U00002A5D',
+ "order;": '\U00002134',
+ "orderof;": '\U00002134',
+ "ordf;": '\U000000AA',
+ "ordm;": '\U000000BA',
+ "origof;": '\U000022B6',
+ "oror;": '\U00002A56',
+ "orslope;": '\U00002A57',
+ "orv;": '\U00002A5B',
+ "oscr;": '\U00002134',
+ "oslash;": '\U000000F8',
+ "osol;": '\U00002298',
+ "otilde;": '\U000000F5',
+ "otimes;": '\U00002297',
+ "otimesas;": '\U00002A36',
+ "ouml;": '\U000000F6',
+ "ovbar;": '\U0000233D',
+ "par;": '\U00002225',
+ "para;": '\U000000B6',
+ "parallel;": '\U00002225',
+ "parsim;": '\U00002AF3',
+ "parsl;": '\U00002AFD',
+ "part;": '\U00002202',
+ "pcy;": '\U0000043F',
+ "percnt;": '\U00000025',
+ "period;": '\U0000002E',
+ "permil;": '\U00002030',
+ "perp;": '\U000022A5',
+ "pertenk;": '\U00002031',
+ "pfr;": '\U0001D52D',
+ "phi;": '\U000003C6',
+ "phiv;": '\U000003D5',
+ "phmmat;": '\U00002133',
+ "phone;": '\U0000260E',
+ "pi;": '\U000003C0',
+ "pitchfork;": '\U000022D4',
+ "piv;": '\U000003D6',
+ "planck;": '\U0000210F',
+ "planckh;": '\U0000210E',
+ "plankv;": '\U0000210F',
+ "plus;": '\U0000002B',
+ "plusacir;": '\U00002A23',
+ "plusb;": '\U0000229E',
+ "pluscir;": '\U00002A22',
+ "plusdo;": '\U00002214',
+ "plusdu;": '\U00002A25',
+ "pluse;": '\U00002A72',
+ "plusmn;": '\U000000B1',
+ "plussim;": '\U00002A26',
+ "plustwo;": '\U00002A27',
+ "pm;": '\U000000B1',
+ "pointint;": '\U00002A15',
+ "popf;": '\U0001D561',
+ "pound;": '\U000000A3',
+ "pr;": '\U0000227A',
+ "prE;": '\U00002AB3',
+ "prap;": '\U00002AB7',
+ "prcue;": '\U0000227C',
+ "pre;": '\U00002AAF',
+ "prec;": '\U0000227A',
+ "precapprox;": '\U00002AB7',
+ "preccurlyeq;": '\U0000227C',
+ "preceq;": '\U00002AAF',
+ "precnapprox;": '\U00002AB9',
+ "precneqq;": '\U00002AB5',
+ "precnsim;": '\U000022E8',
+ "precsim;": '\U0000227E',
+ "prime;": '\U00002032',
+ "primes;": '\U00002119',
+ "prnE;": '\U00002AB5',
+ "prnap;": '\U00002AB9',
+ "prnsim;": '\U000022E8',
+ "prod;": '\U0000220F',
+ "profalar;": '\U0000232E',
+ "profline;": '\U00002312',
+ "profsurf;": '\U00002313',
+ "prop;": '\U0000221D',
+ "propto;": '\U0000221D',
+ "prsim;": '\U0000227E',
+ "prurel;": '\U000022B0',
+ "pscr;": '\U0001D4C5',
+ "psi;": '\U000003C8',
+ "puncsp;": '\U00002008',
+ "qfr;": '\U0001D52E',
+ "qint;": '\U00002A0C',
+ "qopf;": '\U0001D562',
+ "qprime;": '\U00002057',
+ "qscr;": '\U0001D4C6',
+ "quaternions;": '\U0000210D',
+ "quatint;": '\U00002A16',
+ "quest;": '\U0000003F',
+ "questeq;": '\U0000225F',
+ "quot;": '\U00000022',
+ "rAarr;": '\U000021DB',
+ "rArr;": '\U000021D2',
+ "rAtail;": '\U0000291C',
+ "rBarr;": '\U0000290F',
+ "rHar;": '\U00002964',
+ "racute;": '\U00000155',
+ "radic;": '\U0000221A',
+ "raemptyv;": '\U000029B3',
+ "rang;": '\U000027E9',
+ "rangd;": '\U00002992',
+ "range;": '\U000029A5',
+ "rangle;": '\U000027E9',
+ "raquo;": '\U000000BB',
+ "rarr;": '\U00002192',
+ "rarrap;": '\U00002975',
+ "rarrb;": '\U000021E5',
+ "rarrbfs;": '\U00002920',
+ "rarrc;": '\U00002933',
+ "rarrfs;": '\U0000291E',
+ "rarrhk;": '\U000021AA',
+ "rarrlp;": '\U000021AC',
+ "rarrpl;": '\U00002945',
+ "rarrsim;": '\U00002974',
+ "rarrtl;": '\U000021A3',
+ "rarrw;": '\U0000219D',
+ "ratail;": '\U0000291A',
+ "ratio;": '\U00002236',
+ "rationals;": '\U0000211A',
+ "rbarr;": '\U0000290D',
+ "rbbrk;": '\U00002773',
+ "rbrace;": '\U0000007D',
+ "rbrack;": '\U0000005D',
+ "rbrke;": '\U0000298C',
+ "rbrksld;": '\U0000298E',
+ "rbrkslu;": '\U00002990',
+ "rcaron;": '\U00000159',
+ "rcedil;": '\U00000157',
+ "rceil;": '\U00002309',
+ "rcub;": '\U0000007D',
+ "rcy;": '\U00000440',
+ "rdca;": '\U00002937',
+ "rdldhar;": '\U00002969',
+ "rdquo;": '\U0000201D',
+ "rdquor;": '\U0000201D',
+ "rdsh;": '\U000021B3',
+ "real;": '\U0000211C',
+ "realine;": '\U0000211B',
+ "realpart;": '\U0000211C',
+ "reals;": '\U0000211D',
+ "rect;": '\U000025AD',
+ "reg;": '\U000000AE',
+ "rfisht;": '\U0000297D',
+ "rfloor;": '\U0000230B',
+ "rfr;": '\U0001D52F',
+ "rhard;": '\U000021C1',
+ "rharu;": '\U000021C0',
+ "rharul;": '\U0000296C',
+ "rho;": '\U000003C1',
+ "rhov;": '\U000003F1',
+ "rightarrow;": '\U00002192',
+ "rightarrowtail;": '\U000021A3',
+ "rightharpoondown;": '\U000021C1',
+ "rightharpoonup;": '\U000021C0',
+ "rightleftarrows;": '\U000021C4',
+ "rightleftharpoons;": '\U000021CC',
+ "rightrightarrows;": '\U000021C9',
+ "rightsquigarrow;": '\U0000219D',
+ "rightthreetimes;": '\U000022CC',
+ "ring;": '\U000002DA',
+ "risingdotseq;": '\U00002253',
+ "rlarr;": '\U000021C4',
+ "rlhar;": '\U000021CC',
+ "rlm;": '\U0000200F',
+ "rmoust;": '\U000023B1',
+ "rmoustache;": '\U000023B1',
+ "rnmid;": '\U00002AEE',
+ "roang;": '\U000027ED',
+ "roarr;": '\U000021FE',
+ "robrk;": '\U000027E7',
+ "ropar;": '\U00002986',
+ "ropf;": '\U0001D563',
+ "roplus;": '\U00002A2E',
+ "rotimes;": '\U00002A35',
+ "rpar;": '\U00000029',
+ "rpargt;": '\U00002994',
+ "rppolint;": '\U00002A12',
+ "rrarr;": '\U000021C9',
+ "rsaquo;": '\U0000203A',
+ "rscr;": '\U0001D4C7',
+ "rsh;": '\U000021B1',
+ "rsqb;": '\U0000005D',
+ "rsquo;": '\U00002019',
+ "rsquor;": '\U00002019',
+ "rthree;": '\U000022CC',
+ "rtimes;": '\U000022CA',
+ "rtri;": '\U000025B9',
+ "rtrie;": '\U000022B5',
+ "rtrif;": '\U000025B8',
+ "rtriltri;": '\U000029CE',
+ "ruluhar;": '\U00002968',
+ "rx;": '\U0000211E',
+ "sacute;": '\U0000015B',
+ "sbquo;": '\U0000201A',
+ "sc;": '\U0000227B',
+ "scE;": '\U00002AB4',
+ "scap;": '\U00002AB8',
+ "scaron;": '\U00000161',
+ "sccue;": '\U0000227D',
+ "sce;": '\U00002AB0',
+ "scedil;": '\U0000015F',
+ "scirc;": '\U0000015D',
+ "scnE;": '\U00002AB6',
+ "scnap;": '\U00002ABA',
+ "scnsim;": '\U000022E9',
+ "scpolint;": '\U00002A13',
+ "scsim;": '\U0000227F',
+ "scy;": '\U00000441',
+ "sdot;": '\U000022C5',
+ "sdotb;": '\U000022A1',
+ "sdote;": '\U00002A66',
+ "seArr;": '\U000021D8',
+ "searhk;": '\U00002925',
+ "searr;": '\U00002198',
+ "searrow;": '\U00002198',
+ "sect;": '\U000000A7',
+ "semi;": '\U0000003B',
+ "seswar;": '\U00002929',
+ "setminus;": '\U00002216',
+ "setmn;": '\U00002216',
+ "sext;": '\U00002736',
+ "sfr;": '\U0001D530',
+ "sfrown;": '\U00002322',
+ "sharp;": '\U0000266F',
+ "shchcy;": '\U00000449',
+ "shcy;": '\U00000448',
+ "shortmid;": '\U00002223',
+ "shortparallel;": '\U00002225',
+ "shy;": '\U000000AD',
+ "sigma;": '\U000003C3',
+ "sigmaf;": '\U000003C2',
+ "sigmav;": '\U000003C2',
+ "sim;": '\U0000223C',
+ "simdot;": '\U00002A6A',
+ "sime;": '\U00002243',
+ "simeq;": '\U00002243',
+ "simg;": '\U00002A9E',
+ "simgE;": '\U00002AA0',
+ "siml;": '\U00002A9D',
+ "simlE;": '\U00002A9F',
+ "simne;": '\U00002246',
+ "simplus;": '\U00002A24',
+ "simrarr;": '\U00002972',
+ "slarr;": '\U00002190',
+ "smallsetminus;": '\U00002216',
+ "smashp;": '\U00002A33',
+ "smeparsl;": '\U000029E4',
+ "smid;": '\U00002223',
+ "smile;": '\U00002323',
+ "smt;": '\U00002AAA',
+ "smte;": '\U00002AAC',
+ "softcy;": '\U0000044C',
+ "sol;": '\U0000002F',
+ "solb;": '\U000029C4',
+ "solbar;": '\U0000233F',
+ "sopf;": '\U0001D564',
+ "spades;": '\U00002660',
+ "spadesuit;": '\U00002660',
+ "spar;": '\U00002225',
+ "sqcap;": '\U00002293',
+ "sqcup;": '\U00002294',
+ "sqsub;": '\U0000228F',
+ "sqsube;": '\U00002291',
+ "sqsubset;": '\U0000228F',
+ "sqsubseteq;": '\U00002291',
+ "sqsup;": '\U00002290',
+ "sqsupe;": '\U00002292',
+ "sqsupset;": '\U00002290',
+ "sqsupseteq;": '\U00002292',
+ "squ;": '\U000025A1',
+ "square;": '\U000025A1',
+ "squarf;": '\U000025AA',
+ "squf;": '\U000025AA',
+ "srarr;": '\U00002192',
+ "sscr;": '\U0001D4C8',
+ "ssetmn;": '\U00002216',
+ "ssmile;": '\U00002323',
+ "sstarf;": '\U000022C6',
+ "star;": '\U00002606',
+ "starf;": '\U00002605',
+ "straightepsilon;": '\U000003F5',
+ "straightphi;": '\U000003D5',
+ "strns;": '\U000000AF',
+ "sub;": '\U00002282',
+ "subE;": '\U00002AC5',
+ "subdot;": '\U00002ABD',
+ "sube;": '\U00002286',
+ "subedot;": '\U00002AC3',
+ "submult;": '\U00002AC1',
+ "subnE;": '\U00002ACB',
+ "subne;": '\U0000228A',
+ "subplus;": '\U00002ABF',
+ "subrarr;": '\U00002979',
+ "subset;": '\U00002282',
+ "subseteq;": '\U00002286',
+ "subseteqq;": '\U00002AC5',
+ "subsetneq;": '\U0000228A',
+ "subsetneqq;": '\U00002ACB',
+ "subsim;": '\U00002AC7',
+ "subsub;": '\U00002AD5',
+ "subsup;": '\U00002AD3',
+ "succ;": '\U0000227B',
+ "succapprox;": '\U00002AB8',
+ "succcurlyeq;": '\U0000227D',
+ "succeq;": '\U00002AB0',
+ "succnapprox;": '\U00002ABA',
+ "succneqq;": '\U00002AB6',
+ "succnsim;": '\U000022E9',
+ "succsim;": '\U0000227F',
+ "sum;": '\U00002211',
+ "sung;": '\U0000266A',
+ "sup;": '\U00002283',
+ "sup1;": '\U000000B9',
+ "sup2;": '\U000000B2',
+ "sup3;": '\U000000B3',
+ "supE;": '\U00002AC6',
+ "supdot;": '\U00002ABE',
+ "supdsub;": '\U00002AD8',
+ "supe;": '\U00002287',
+ "supedot;": '\U00002AC4',
+ "suphsol;": '\U000027C9',
+ "suphsub;": '\U00002AD7',
+ "suplarr;": '\U0000297B',
+ "supmult;": '\U00002AC2',
+ "supnE;": '\U00002ACC',
+ "supne;": '\U0000228B',
+ "supplus;": '\U00002AC0',
+ "supset;": '\U00002283',
+ "supseteq;": '\U00002287',
+ "supseteqq;": '\U00002AC6',
+ "supsetneq;": '\U0000228B',
+ "supsetneqq;": '\U00002ACC',
+ "supsim;": '\U00002AC8',
+ "supsub;": '\U00002AD4',
+ "supsup;": '\U00002AD6',
+ "swArr;": '\U000021D9',
+ "swarhk;": '\U00002926',
+ "swarr;": '\U00002199',
+ "swarrow;": '\U00002199',
+ "swnwar;": '\U0000292A',
+ "szlig;": '\U000000DF',
+ "target;": '\U00002316',
+ "tau;": '\U000003C4',
+ "tbrk;": '\U000023B4',
+ "tcaron;": '\U00000165',
+ "tcedil;": '\U00000163',
+ "tcy;": '\U00000442',
+ "tdot;": '\U000020DB',
+ "telrec;": '\U00002315',
+ "tfr;": '\U0001D531',
+ "there4;": '\U00002234',
+ "therefore;": '\U00002234',
+ "theta;": '\U000003B8',
+ "thetasym;": '\U000003D1',
+ "thetav;": '\U000003D1',
+ "thickapprox;": '\U00002248',
+ "thicksim;": '\U0000223C',
+ "thinsp;": '\U00002009',
+ "thkap;": '\U00002248',
+ "thksim;": '\U0000223C',
+ "thorn;": '\U000000FE',
+ "tilde;": '\U000002DC',
+ "times;": '\U000000D7',
+ "timesb;": '\U000022A0',
+ "timesbar;": '\U00002A31',
+ "timesd;": '\U00002A30',
+ "tint;": '\U0000222D',
+ "toea;": '\U00002928',
+ "top;": '\U000022A4',
+ "topbot;": '\U00002336',
+ "topcir;": '\U00002AF1',
+ "topf;": '\U0001D565',
+ "topfork;": '\U00002ADA',
+ "tosa;": '\U00002929',
+ "tprime;": '\U00002034',
+ "trade;": '\U00002122',
+ "triangle;": '\U000025B5',
+ "triangledown;": '\U000025BF',
+ "triangleleft;": '\U000025C3',
+ "trianglelefteq;": '\U000022B4',
+ "triangleq;": '\U0000225C',
+ "triangleright;": '\U000025B9',
+ "trianglerighteq;": '\U000022B5',
+ "tridot;": '\U000025EC',
+ "trie;": '\U0000225C',
+ "triminus;": '\U00002A3A',
+ "triplus;": '\U00002A39',
+ "trisb;": '\U000029CD',
+ "tritime;": '\U00002A3B',
+ "trpezium;": '\U000023E2',
+ "tscr;": '\U0001D4C9',
+ "tscy;": '\U00000446',
+ "tshcy;": '\U0000045B',
+ "tstrok;": '\U00000167',
+ "twixt;": '\U0000226C',
+ "twoheadleftarrow;": '\U0000219E',
+ "twoheadrightarrow;": '\U000021A0',
+ "uArr;": '\U000021D1',
+ "uHar;": '\U00002963',
+ "uacute;": '\U000000FA',
+ "uarr;": '\U00002191',
+ "ubrcy;": '\U0000045E',
+ "ubreve;": '\U0000016D',
+ "ucirc;": '\U000000FB',
+ "ucy;": '\U00000443',
+ "udarr;": '\U000021C5',
+ "udblac;": '\U00000171',
+ "udhar;": '\U0000296E',
+ "ufisht;": '\U0000297E',
+ "ufr;": '\U0001D532',
+ "ugrave;": '\U000000F9',
+ "uharl;": '\U000021BF',
+ "uharr;": '\U000021BE',
+ "uhblk;": '\U00002580',
+ "ulcorn;": '\U0000231C',
+ "ulcorner;": '\U0000231C',
+ "ulcrop;": '\U0000230F',
+ "ultri;": '\U000025F8',
+ "umacr;": '\U0000016B',
+ "uml;": '\U000000A8',
+ "uogon;": '\U00000173',
+ "uopf;": '\U0001D566',
+ "uparrow;": '\U00002191',
+ "updownarrow;": '\U00002195',
+ "upharpoonleft;": '\U000021BF',
+ "upharpoonright;": '\U000021BE',
+ "uplus;": '\U0000228E',
+ "upsi;": '\U000003C5',
+ "upsih;": '\U000003D2',
+ "upsilon;": '\U000003C5',
+ "upuparrows;": '\U000021C8',
+ "urcorn;": '\U0000231D',
+ "urcorner;": '\U0000231D',
+ "urcrop;": '\U0000230E',
+ "uring;": '\U0000016F',
+ "urtri;": '\U000025F9',
+ "uscr;": '\U0001D4CA',
+ "utdot;": '\U000022F0',
+ "utilde;": '\U00000169',
+ "utri;": '\U000025B5',
+ "utrif;": '\U000025B4',
+ "uuarr;": '\U000021C8',
+ "uuml;": '\U000000FC',
+ "uwangle;": '\U000029A7',
+ "vArr;": '\U000021D5',
+ "vBar;": '\U00002AE8',
+ "vBarv;": '\U00002AE9',
+ "vDash;": '\U000022A8',
+ "vangrt;": '\U0000299C',
+ "varepsilon;": '\U000003F5',
+ "varkappa;": '\U000003F0',
+ "varnothing;": '\U00002205',
+ "varphi;": '\U000003D5',
+ "varpi;": '\U000003D6',
+ "varpropto;": '\U0000221D',
+ "varr;": '\U00002195',
+ "varrho;": '\U000003F1',
+ "varsigma;": '\U000003C2',
+ "vartheta;": '\U000003D1',
+ "vartriangleleft;": '\U000022B2',
+ "vartriangleright;": '\U000022B3',
+ "vcy;": '\U00000432',
+ "vdash;": '\U000022A2',
+ "vee;": '\U00002228',
+ "veebar;": '\U000022BB',
+ "veeeq;": '\U0000225A',
+ "vellip;": '\U000022EE',
+ "verbar;": '\U0000007C',
+ "vert;": '\U0000007C',
+ "vfr;": '\U0001D533',
+ "vltri;": '\U000022B2',
+ "vopf;": '\U0001D567',
+ "vprop;": '\U0000221D',
+ "vrtri;": '\U000022B3',
+ "vscr;": '\U0001D4CB',
+ "vzigzag;": '\U0000299A',
+ "wcirc;": '\U00000175',
+ "wedbar;": '\U00002A5F',
+ "wedge;": '\U00002227',
+ "wedgeq;": '\U00002259',
+ "weierp;": '\U00002118',
+ "wfr;": '\U0001D534',
+ "wopf;": '\U0001D568',
+ "wp;": '\U00002118',
+ "wr;": '\U00002240',
+ "wreath;": '\U00002240',
+ "wscr;": '\U0001D4CC',
+ "xcap;": '\U000022C2',
+ "xcirc;": '\U000025EF',
+ "xcup;": '\U000022C3',
+ "xdtri;": '\U000025BD',
+ "xfr;": '\U0001D535',
+ "xhArr;": '\U000027FA',
+ "xharr;": '\U000027F7',
+ "xi;": '\U000003BE',
+ "xlArr;": '\U000027F8',
+ "xlarr;": '\U000027F5',
+ "xmap;": '\U000027FC',
+ "xnis;": '\U000022FB',
+ "xodot;": '\U00002A00',
+ "xopf;": '\U0001D569',
+ "xoplus;": '\U00002A01',
+ "xotime;": '\U00002A02',
+ "xrArr;": '\U000027F9',
+ "xrarr;": '\U000027F6',
+ "xscr;": '\U0001D4CD',
+ "xsqcup;": '\U00002A06',
+ "xuplus;": '\U00002A04',
+ "xutri;": '\U000025B3',
+ "xvee;": '\U000022C1',
+ "xwedge;": '\U000022C0',
+ "yacute;": '\U000000FD',
+ "yacy;": '\U0000044F',
+ "ycirc;": '\U00000177',
+ "ycy;": '\U0000044B',
+ "yen;": '\U000000A5',
+ "yfr;": '\U0001D536',
+ "yicy;": '\U00000457',
+ "yopf;": '\U0001D56A',
+ "yscr;": '\U0001D4CE',
+ "yucy;": '\U0000044E',
+ "yuml;": '\U000000FF',
+ "zacute;": '\U0000017A',
+ "zcaron;": '\U0000017E',
+ "zcy;": '\U00000437',
+ "zdot;": '\U0000017C',
+ "zeetrf;": '\U00002128',
+ "zeta;": '\U000003B6',
+ "zfr;": '\U0001D537',
+ "zhcy;": '\U00000436',
+ "zigrarr;": '\U000021DD',
+ "zopf;": '\U0001D56B',
+ "zscr;": '\U0001D4CF',
+ "zwj;": '\U0000200D',
+ "zwnj;": '\U0000200C',
+ "AElig": '\U000000C6',
+ "AMP": '\U00000026',
+ "Aacute": '\U000000C1',
+ "Acirc": '\U000000C2',
+ "Agrave": '\U000000C0',
+ "Aring": '\U000000C5',
+ "Atilde": '\U000000C3',
+ "Auml": '\U000000C4',
+ "COPY": '\U000000A9',
+ "Ccedil": '\U000000C7',
+ "ETH": '\U000000D0',
+ "Eacute": '\U000000C9',
+ "Ecirc": '\U000000CA',
+ "Egrave": '\U000000C8',
+ "Euml": '\U000000CB',
+ "GT": '\U0000003E',
+ "Iacute": '\U000000CD',
+ "Icirc": '\U000000CE',
+ "Igrave": '\U000000CC',
+ "Iuml": '\U000000CF',
+ "LT": '\U0000003C',
+ "Ntilde": '\U000000D1',
+ "Oacute": '\U000000D3',
+ "Ocirc": '\U000000D4',
+ "Ograve": '\U000000D2',
+ "Oslash": '\U000000D8',
+ "Otilde": '\U000000D5',
+ "Ouml": '\U000000D6',
+ "QUOT": '\U00000022',
+ "REG": '\U000000AE',
+ "THORN": '\U000000DE',
+ "Uacute": '\U000000DA',
+ "Ucirc": '\U000000DB',
+ "Ugrave": '\U000000D9',
+ "Uuml": '\U000000DC',
+ "Yacute": '\U000000DD',
+ "aacute": '\U000000E1',
+ "acirc": '\U000000E2',
+ "acute": '\U000000B4',
+ "aelig": '\U000000E6',
+ "agrave": '\U000000E0',
+ "amp": '\U00000026',
+ "aring": '\U000000E5',
+ "atilde": '\U000000E3',
+ "auml": '\U000000E4',
+ "brvbar": '\U000000A6',
+ "ccedil": '\U000000E7',
+ "cedil": '\U000000B8',
+ "cent": '\U000000A2',
+ "copy": '\U000000A9',
+ "curren": '\U000000A4',
+ "deg": '\U000000B0',
+ "divide": '\U000000F7',
+ "eacute": '\U000000E9',
+ "ecirc": '\U000000EA',
+ "egrave": '\U000000E8',
+ "eth": '\U000000F0',
+ "euml": '\U000000EB',
+ "frac12": '\U000000BD',
+ "frac14": '\U000000BC',
+ "frac34": '\U000000BE',
+ "gt": '\U0000003E',
+ "iacute": '\U000000ED',
+ "icirc": '\U000000EE',
+ "iexcl": '\U000000A1',
+ "igrave": '\U000000EC',
+ "iquest": '\U000000BF',
+ "iuml": '\U000000EF',
+ "laquo": '\U000000AB',
+ "lt": '\U0000003C',
+ "macr": '\U000000AF',
+ "micro": '\U000000B5',
+ "middot": '\U000000B7',
+ "nbsp": '\U000000A0',
+ "not": '\U000000AC',
+ "ntilde": '\U000000F1',
+ "oacute": '\U000000F3',
+ "ocirc": '\U000000F4',
+ "ograve": '\U000000F2',
+ "ordf": '\U000000AA',
+ "ordm": '\U000000BA',
+ "oslash": '\U000000F8',
+ "otilde": '\U000000F5',
+ "ouml": '\U000000F6',
+ "para": '\U000000B6',
+ "plusmn": '\U000000B1',
+ "pound": '\U000000A3',
+ "quot": '\U00000022',
+ "raquo": '\U000000BB',
+ "reg": '\U000000AE',
+ "sect": '\U000000A7',
+ "shy": '\U000000AD',
+ "sup1": '\U000000B9',
+ "sup2": '\U000000B2',
+ "sup3": '\U000000B3',
+ "szlig": '\U000000DF',
+ "thorn": '\U000000FE',
+ "times": '\U000000D7',
+ "uacute": '\U000000FA',
+ "ucirc": '\U000000FB',
+ "ugrave": '\U000000F9',
+ "uml": '\U000000A8',
+ "uuml": '\U000000FC',
+ "yacute": '\U000000FD',
+ "yen": '\U000000A5',
+ "yuml": '\U000000FF',
+}
+
+// HTML entities that are two unicode codepoints.
+var entity2 = map[string][2]rune{
+ // TODO(nigeltao): Handle replacements that are wider than their names.
+ // "nLt;": {'\u226A', '\u20D2'},
+ // "nGt;": {'\u226B', '\u20D2'},
+ "NotEqualTilde;": {'\u2242', '\u0338'},
+ "NotGreaterFullEqual;": {'\u2267', '\u0338'},
+ "NotGreaterGreater;": {'\u226B', '\u0338'},
+ "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
+ "NotHumpDownHump;": {'\u224E', '\u0338'},
+ "NotHumpEqual;": {'\u224F', '\u0338'},
+ "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
+ "NotLessLess;": {'\u226A', '\u0338'},
+ "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
+ "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
+ "NotNestedLessLess;": {'\u2AA1', '\u0338'},
+ "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
+ "NotRightTriangleBar;": {'\u29D0', '\u0338'},
+ "NotSquareSubset;": {'\u228F', '\u0338'},
+ "NotSquareSuperset;": {'\u2290', '\u0338'},
+ "NotSubset;": {'\u2282', '\u20D2'},
+ "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
+ "NotSucceedsTilde;": {'\u227F', '\u0338'},
+ "NotSuperset;": {'\u2283', '\u20D2'},
+ "ThickSpace;": {'\u205F', '\u200A'},
+ "acE;": {'\u223E', '\u0333'},
+ "bne;": {'\u003D', '\u20E5'},
+ "bnequiv;": {'\u2261', '\u20E5'},
+ "caps;": {'\u2229', '\uFE00'},
+ "cups;": {'\u222A', '\uFE00'},
+ "fjlig;": {'\u0066', '\u006A'},
+ "gesl;": {'\u22DB', '\uFE00'},
+ "gvertneqq;": {'\u2269', '\uFE00'},
+ "gvnE;": {'\u2269', '\uFE00'},
+ "lates;": {'\u2AAD', '\uFE00'},
+ "lesg;": {'\u22DA', '\uFE00'},
+ "lvertneqq;": {'\u2268', '\uFE00'},
+ "lvnE;": {'\u2268', '\uFE00'},
+ "nGg;": {'\u22D9', '\u0338'},
+ "nGtv;": {'\u226B', '\u0338'},
+ "nLl;": {'\u22D8', '\u0338'},
+ "nLtv;": {'\u226A', '\u0338'},
+ "nang;": {'\u2220', '\u20D2'},
+ "napE;": {'\u2A70', '\u0338'},
+ "napid;": {'\u224B', '\u0338'},
+ "nbump;": {'\u224E', '\u0338'},
+ "nbumpe;": {'\u224F', '\u0338'},
+ "ncongdot;": {'\u2A6D', '\u0338'},
+ "nedot;": {'\u2250', '\u0338'},
+ "nesim;": {'\u2242', '\u0338'},
+ "ngE;": {'\u2267', '\u0338'},
+ "ngeqq;": {'\u2267', '\u0338'},
+ "ngeqslant;": {'\u2A7E', '\u0338'},
+ "nges;": {'\u2A7E', '\u0338'},
+ "nlE;": {'\u2266', '\u0338'},
+ "nleqq;": {'\u2266', '\u0338'},
+ "nleqslant;": {'\u2A7D', '\u0338'},
+ "nles;": {'\u2A7D', '\u0338'},
+ "notinE;": {'\u22F9', '\u0338'},
+ "notindot;": {'\u22F5', '\u0338'},
+ "nparsl;": {'\u2AFD', '\u20E5'},
+ "npart;": {'\u2202', '\u0338'},
+ "npre;": {'\u2AAF', '\u0338'},
+ "npreceq;": {'\u2AAF', '\u0338'},
+ "nrarrc;": {'\u2933', '\u0338'},
+ "nrarrw;": {'\u219D', '\u0338'},
+ "nsce;": {'\u2AB0', '\u0338'},
+ "nsubE;": {'\u2AC5', '\u0338'},
+ "nsubset;": {'\u2282', '\u20D2'},
+ "nsubseteqq;": {'\u2AC5', '\u0338'},
+ "nsucceq;": {'\u2AB0', '\u0338'},
+ "nsupE;": {'\u2AC6', '\u0338'},
+ "nsupset;": {'\u2283', '\u20D2'},
+ "nsupseteqq;": {'\u2AC6', '\u0338'},
+ "nvap;": {'\u224D', '\u20D2'},
+ "nvge;": {'\u2265', '\u20D2'},
+ "nvgt;": {'\u003E', '\u20D2'},
+ "nvle;": {'\u2264', '\u20D2'},
+ "nvlt;": {'\u003C', '\u20D2'},
+ "nvltrie;": {'\u22B4', '\u20D2'},
+ "nvrtrie;": {'\u22B5', '\u20D2'},
+ "nvsim;": {'\u223C', '\u20D2'},
+ "race;": {'\u223D', '\u0331'},
+ "smtes;": {'\u2AAC', '\uFE00'},
+ "sqcaps;": {'\u2293', '\uFE00'},
+ "sqcups;": {'\u2294', '\uFE00'},
+ "varsubsetneq;": {'\u228A', '\uFE00'},
+ "varsubsetneqq;": {'\u2ACB', '\uFE00'},
+ "varsupsetneq;": {'\u228B', '\uFE00'},
+ "varsupsetneqq;": {'\u2ACC', '\uFE00'},
+ "vnsub;": {'\u2282', '\u20D2'},
+ "vnsup;": {'\u2283', '\u20D2'},
+ "vsubnE;": {'\u2ACB', '\uFE00'},
+ "vsubne;": {'\u228A', '\uFE00'},
+ "vsupnE;": {'\u2ACC', '\uFE00'},
+ "vsupne;": {'\u228B', '\uFE00'},
+}
diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go
new file mode 100644
index 000000000..b53f866fa
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity_test.go
@@ -0,0 +1,29 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "testing"
+ "unicode/utf8"
+)
+
+func TestEntityLength(t *testing.T) {
+ // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key).
+ // The +1 comes from the leading "&". This property implies that the length of
+ // unescaped text is <= the length of escaped text.
+ for k, v := range entity {
+ if 1+len(k) < utf8.RuneLen(v) {
+ t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v))
+ }
+ if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' {
+ t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon)
+ }
+ }
+ for k, v := range entity2 {
+ if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) {
+ t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1]))
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
new file mode 100644
index 000000000..d85613962
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -0,0 +1,258 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+)
+
+// These replacements permit compatibility with old numeric entities that
+// assumed Windows-1252 encoding.
+// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+var replacementTable = [...]rune{
+ '\u20AC', // First entry is what 0x80 should be replaced with.
+ '\u0081',
+ '\u201A',
+ '\u0192',
+ '\u201E',
+ '\u2026',
+ '\u2020',
+ '\u2021',
+ '\u02C6',
+ '\u2030',
+ '\u0160',
+ '\u2039',
+ '\u0152',
+ '\u008D',
+ '\u017D',
+ '\u008F',
+ '\u0090',
+ '\u2018',
+ '\u2019',
+ '\u201C',
+ '\u201D',
+ '\u2022',
+ '\u2013',
+ '\u2014',
+ '\u02DC',
+ '\u2122',
+ '\u0161',
+ '\u203A',
+ '\u0153',
+ '\u009D',
+ '\u017E',
+ '\u0178', // Last entry is 0x9F.
+ // 0x00->'\uFFFD' is handled programmatically.
+ // 0x0D->'\u000D' is a no-op.
+}
+
+// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
+// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
+// Precondition: b[src] == '&' && dst <= src.
+// attribute should be true if parsing an attribute value.
+func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
+ // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+
+ // i starts at 1 because we already know that s[0] == '&'.
+ i, s := 1, b[src:]
+
+ if len(s) <= 1 {
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if s[i] == '#' {
+ if len(s) <= 3 { // We need to have at least "&#.".
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+ i++
+ c := s[i]
+ hex := false
+ if c == 'x' || c == 'X' {
+ hex = true
+ i++
+ }
+
+ x := '\x00'
+ for i < len(s) {
+ c = s[i]
+ i++
+ if hex {
+ if '0' <= c && c <= '9' {
+ x = 16*x + rune(c) - '0'
+ continue
+ } else if 'a' <= c && c <= 'f' {
+ x = 16*x + rune(c) - 'a' + 10
+ continue
+ } else if 'A' <= c && c <= 'F' {
+ x = 16*x + rune(c) - 'A' + 10
+ continue
+ }
+ } else if '0' <= c && c <= '9' {
+ x = 10*x + rune(c) - '0'
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ if i <= 3 { // No characters matched.
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if 0x80 <= x && x <= 0x9F {
+ // Replace characters from Windows-1252 with UTF-8 equivalents.
+ x = replacementTable[x-0x80]
+ } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
+ // Replace invalid characters with the replacement character.
+ x = '\uFFFD'
+ }
+
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ }
+
+ // Consume the maximum number of characters possible, with the
+ // consumed characters matching one of the named references.
+
+ for i < len(s) {
+ c := s[i]
+ i++
+ // Lower-cased characters are more common in entities, so we check for them first.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ entityName := string(s[1:i])
+ if entityName == "" {
+ // No-op.
+ } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
+ // No-op.
+ } else if x := entity[entityName]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ } else if x := entity2[entityName]; x[0] != 0 {
+ dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
+ return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
+ } else if !attribute {
+ maxLen := len(entityName) - 1
+ if maxLen > longestEntityWithoutSemicolon {
+ maxLen = longestEntityWithoutSemicolon
+ }
+ for j := maxLen; j > 1; j-- {
+ if x := entity[entityName[:j]]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
+ }
+ }
+ }
+
+ dst1, src1 = dst+i, src+i
+ copy(b[dst:dst1], b[src:src1])
+ return dst1, src1
+}
+
+// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
+// attribute should be true if parsing an attribute value.
+func unescape(b []byte, attribute bool) []byte {
+ for i, c := range b {
+ if c == '&' {
+ dst, src := unescapeEntity(b, i, i, attribute)
+ for src < len(b) {
+ c := b[src]
+ if c == '&' {
+ dst, src = unescapeEntity(b, dst, src, attribute)
+ } else {
+ b[dst] = c
+ dst, src = dst+1, src+1
+ }
+ }
+ return b[0:dst]
+ }
+ }
+ return b
+}
+
+// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
+func lower(b []byte) []byte {
+ for i, c := range b {
+ if 'A' <= c && c <= 'Z' {
+ b[i] = c + 'a' - 'A'
+ }
+ }
+ return b
+}
+
+const escapedChars = "&'<>\"\r"
+
+func escape(w writer, s string) error {
+ i := strings.IndexAny(s, escapedChars)
+ for i != -1 {
+ if _, err := w.WriteString(s[:i]); err != nil {
+ return err
+ }
+ var esc string
+ switch s[i] {
+ case '&':
+ esc = "&amp;"
+ case '\'':
+ // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
+ esc = "&#39;"
+ case '<':
+ esc = "&lt;"
+ case '>':
+ esc = "&gt;"
+ case '"':
+ // "&#34;" is shorter than "&quot;".
+ esc = "&#34;"
+ case '\r':
+ esc = "&#13;"
+ default:
+ panic("unrecognized escape character")
+ }
+ s = s[i+1:]
+ if _, err := w.WriteString(esc); err != nil {
+ return err
+ }
+ i = strings.IndexAny(s, escapedChars)
+ }
+ _, err := w.WriteString(s)
+ return err
+}
+
+// EscapeString escapes special characters like "<" to become "&lt;". It
+// escapes only five such characters: <, >, &, ' and ".
+// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
+// always true.
+func EscapeString(s string) string {
+ if strings.IndexAny(s, escapedChars) == -1 {
+ return s
+ }
+ var buf bytes.Buffer
+ escape(&buf, s)
+ return buf.String()
+}
+
+// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
+// larger range of entities than EscapeString escapes. For example, "&aacute;"
+// unescapes to "á", as does "&#225;" and "&xE1;".
+// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
+// always true.
+func UnescapeString(s string) string {
+ for _, c := range s {
+ if c == '&' {
+ return string(unescape([]byte(s), false))
+ }
+ }
+ return s
+}
diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go
new file mode 100644
index 000000000..b405d4b4a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape_test.go
@@ -0,0 +1,97 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import "testing"
+
+type unescapeTest struct {
+ // A short description of the test case.
+ desc string
+ // The HTML text.
+ html string
+ // The unescaped text.
+ unescaped string
+}
+
+var unescapeTests = []unescapeTest{
+ // Handle no entities.
+ {
+ "copy",
+ "A\ttext\nstring",
+ "A\ttext\nstring",
+ },
+ // Handle simple named entities.
+ {
+ "simple",
+ "&amp; &gt; &lt;",
+ "& > <",
+ },
+ // Handle hitting the end of the string.
+ {
+ "stringEnd",
+ "&amp &amp",
+ "& &",
+ },
+ // Handle entities with two codepoints.
+ {
+ "multiCodepoint",
+ "text &gesl; blah",
+ "text \u22db\ufe00 blah",
+ },
+ // Handle decimal numeric entities.
+ {
+ "decimalEntity",
+ "Delta = &#916; ",
+ "Delta = Δ ",
+ },
+ // Handle hexadecimal numeric entities.
+ {
+ "hexadecimalEntity",
+ "Lambda = &#x3bb; = &#X3Bb ",
+ "Lambda = λ = λ ",
+ },
+ // Handle numeric early termination.
+ {
+ "numericEnds",
+ "&# &#x &#128;43 &copy = &#169f = &#xa9",
+ "&# &#x €43 © = ©f = ©",
+ },
+ // Handle numeric ISO-8859-1 entity replacements.
+ {
+ "numericReplacements",
+ "Footnote&#x87;",
+ "Footnote‡",
+ },
+}
+
+func TestUnescape(t *testing.T) {
+ for _, tt := range unescapeTests {
+ unescaped := UnescapeString(tt.html)
+ if unescaped != tt.unescaped {
+ t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped)
+ }
+ }
+}
+
+func TestUnescapeEscape(t *testing.T) {
+ ss := []string{
+ ``,
+ `abc def`,
+ `a & b`,
+ `a&amp;b`,
+ `a &amp b`,
+ `&quot;`,
+ `"`,
+ `"<&>"`,
+ `&quot;&lt;&amp;&gt;&quot;`,
+ `3&5==1 && 0<1, "0&lt;1", a+acute=&aacute;`,
+ `The special characters are: <, >, &, ' and "`,
+ }
+ for _, s := range ss {
+ if got := UnescapeString(EscapeString(s)); got != s {
+ t.Errorf("got %q want %q", got, s)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go
new file mode 100644
index 000000000..0b06ed773
--- /dev/null
+++ b/vendor/golang.org/x/net/html/example_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This example demonstrates parsing HTML data and walking the resulting tree.
+package html_test
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+func ExampleParse() {
+ s := `<p>Links:</p><ul><li><a href="foo">Foo</a><li><a href="/bar/baz">BarBaz</a></ul>`
+ doc, err := html.Parse(strings.NewReader(s))
+ if err != nil {
+ log.Fatal(err)
+ }
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ for _, a := range n.Attr {
+ if a.Key == "href" {
+ fmt.Println(a.Val)
+ break
+ }
+ }
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ f(doc)
+ // Output:
+ // foo
+ // /bar/baz
+}
diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go
new file mode 100644
index 000000000..d3b384409
--- /dev/null
+++ b/vendor/golang.org/x/net/html/foreign.go
@@ -0,0 +1,226 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
+ for i := range aa {
+ if newName, ok := nameMap[aa[i].Key]; ok {
+ aa[i].Key = newName
+ }
+ }
+}
+
+func adjustForeignAttributes(aa []Attribute) {
+ for i, a := range aa {
+ if a.Key == "" || a.Key[0] != 'x' {
+ continue
+ }
+ switch a.Key {
+ case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
+ "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
+ j := strings.Index(a.Key, ":")
+ aa[i].Namespace = a.Key[:j]
+ aa[i].Key = a.Key[j+1:]
+ }
+ }
+}
+
+func htmlIntegrationPoint(n *Node) bool {
+ if n.Type != ElementNode {
+ return false
+ }
+ switch n.Namespace {
+ case "math":
+ if n.Data == "annotation-xml" {
+ for _, a := range n.Attr {
+ if a.Key == "encoding" {
+ val := strings.ToLower(a.Val)
+ if val == "text/html" || val == "application/xhtml+xml" {
+ return true
+ }
+ }
+ }
+ }
+ case "svg":
+ switch n.Data {
+ case "desc", "foreignObject", "title":
+ return true
+ }
+ }
+ return false
+}
+
+func mathMLTextIntegrationPoint(n *Node) bool {
+ if n.Namespace != "math" {
+ return false
+ }
+ switch n.Data {
+ case "mi", "mo", "mn", "ms", "mtext":
+ return true
+ }
+ return false
+}
+
+// Section 12.2.5.5.
+var breakout = map[string]bool{
+ "b": true,
+ "big": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "center": true,
+ "code": true,
+ "dd": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "em": true,
+ "embed": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "hr": true,
+ "i": true,
+ "img": true,
+ "li": true,
+ "listing": true,
+ "menu": true,
+ "meta": true,
+ "nobr": true,
+ "ol": true,
+ "p": true,
+ "pre": true,
+ "ruby": true,
+ "s": true,
+ "small": true,
+ "span": true,
+ "strong": true,
+ "strike": true,
+ "sub": true,
+ "sup": true,
+ "table": true,
+ "tt": true,
+ "u": true,
+ "ul": true,
+ "var": true,
+}
+
+// Section 12.2.5.5.
+var svgTagNameAdjustments = map[string]string{
+ "altglyph": "altGlyph",
+ "altglyphdef": "altGlyphDef",
+ "altglyphitem": "altGlyphItem",
+ "animatecolor": "animateColor",
+ "animatemotion": "animateMotion",
+ "animatetransform": "animateTransform",
+ "clippath": "clipPath",
+ "feblend": "feBlend",
+ "fecolormatrix": "feColorMatrix",
+ "fecomponenttransfer": "feComponentTransfer",
+ "fecomposite": "feComposite",
+ "feconvolvematrix": "feConvolveMatrix",
+ "fediffuselighting": "feDiffuseLighting",
+ "fedisplacementmap": "feDisplacementMap",
+ "fedistantlight": "feDistantLight",
+ "feflood": "feFlood",
+ "fefunca": "feFuncA",
+ "fefuncb": "feFuncB",
+ "fefuncg": "feFuncG",
+ "fefuncr": "feFuncR",
+ "fegaussianblur": "feGaussianBlur",
+ "feimage": "feImage",
+ "femerge": "feMerge",
+ "femergenode": "feMergeNode",
+ "femorphology": "feMorphology",
+ "feoffset": "feOffset",
+ "fepointlight": "fePointLight",
+ "fespecularlighting": "feSpecularLighting",
+ "fespotlight": "feSpotLight",
+ "fetile": "feTile",
+ "feturbulence": "feTurbulence",
+ "foreignobject": "foreignObject",
+ "glyphref": "glyphRef",
+ "lineargradient": "linearGradient",
+ "radialgradient": "radialGradient",
+ "textpath": "textPath",
+}
+
+// Section 12.2.5.1
+var mathMLAttributeAdjustments = map[string]string{
+ "definitionurl": "definitionURL",
+}
+
+var svgAttributeAdjustments = map[string]string{
+ "attributename": "attributeName",
+ "attributetype": "attributeType",
+ "basefrequency": "baseFrequency",
+ "baseprofile": "baseProfile",
+ "calcmode": "calcMode",
+ "clippathunits": "clipPathUnits",
+ "contentscripttype": "contentScriptType",
+ "contentstyletype": "contentStyleType",
+ "diffuseconstant": "diffuseConstant",
+ "edgemode": "edgeMode",
+ "externalresourcesrequired": "externalResourcesRequired",
+ "filterres": "filterRes",
+ "filterunits": "filterUnits",
+ "glyphref": "glyphRef",
+ "gradienttransform": "gradientTransform",
+ "gradientunits": "gradientUnits",
+ "kernelmatrix": "kernelMatrix",
+ "kernelunitlength": "kernelUnitLength",
+ "keypoints": "keyPoints",
+ "keysplines": "keySplines",
+ "keytimes": "keyTimes",
+ "lengthadjust": "lengthAdjust",
+ "limitingconeangle": "limitingConeAngle",
+ "markerheight": "markerHeight",
+ "markerunits": "markerUnits",
+ "markerwidth": "markerWidth",
+ "maskcontentunits": "maskContentUnits",
+ "maskunits": "maskUnits",
+ "numoctaves": "numOctaves",
+ "pathlength": "pathLength",
+ "patterncontentunits": "patternContentUnits",
+ "patterntransform": "patternTransform",
+ "patternunits": "patternUnits",
+ "pointsatx": "pointsAtX",
+ "pointsaty": "pointsAtY",
+ "pointsatz": "pointsAtZ",
+ "preservealpha": "preserveAlpha",
+ "preserveaspectratio": "preserveAspectRatio",
+ "primitiveunits": "primitiveUnits",
+ "refx": "refX",
+ "refy": "refY",
+ "repeatcount": "repeatCount",
+ "repeatdur": "repeatDur",
+ "requiredextensions": "requiredExtensions",
+ "requiredfeatures": "requiredFeatures",
+ "specularconstant": "specularConstant",
+ "specularexponent": "specularExponent",
+ "spreadmethod": "spreadMethod",
+ "startoffset": "startOffset",
+ "stddeviation": "stdDeviation",
+ "stitchtiles": "stitchTiles",
+ "surfacescale": "surfaceScale",
+ "systemlanguage": "systemLanguage",
+ "tablevalues": "tableValues",
+ "targetx": "targetX",
+ "targety": "targetY",
+ "textlength": "textLength",
+ "viewbox": "viewBox",
+ "viewtarget": "viewTarget",
+ "xchannelselector": "xChannelSelector",
+ "ychannelselector": "yChannelSelector",
+ "zoomandpan": "zoomAndPan",
+}
diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go
new file mode 100644
index 000000000..26b657aec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/node.go
@@ -0,0 +1,193 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "golang.org/x/net/html/atom"
+)
+
+// A NodeType is the type of a Node.
+type NodeType uint32
+
+const (
+ ErrorNode NodeType = iota
+ TextNode
+ DocumentNode
+ ElementNode
+ CommentNode
+ DoctypeNode
+ scopeMarkerNode
+)
+
+// Section 12.2.3.3 says "scope markers are inserted when entering applet
+// elements, buttons, object elements, marquees, table cells, and table
+// captions, and are used to prevent formatting from 'leaking'".
+var scopeMarker = Node{Type: scopeMarkerNode}
+
+// A Node consists of a NodeType and some Data (tag name for element nodes,
+// content for text) and are part of a tree of Nodes. Element nodes may also
+// have a Namespace and contain a slice of Attributes. Data is unescaped, so
+// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
+// is the atom for Data, or zero if Data is not a known tag name.
+//
+// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
+// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
+// "svg" is short for "http://www.w3.org/2000/svg".
+type Node struct {
+ Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
+
+ Type NodeType
+ DataAtom atom.Atom
+ Data string
+ Namespace string
+ Attr []Attribute
+}
+
+// InsertBefore inserts newChild as a child of n, immediately before oldChild
+// in the sequence of n's children. oldChild may be nil, in which case newChild
+// is appended to the end of n's children.
+//
+// It will panic if newChild already has a parent or siblings.
+func (n *Node) InsertBefore(newChild, oldChild *Node) {
+ if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
+ panic("html: InsertBefore called for an attached child Node")
+ }
+ var prev, next *Node
+ if oldChild != nil {
+ prev, next = oldChild.PrevSibling, oldChild
+ } else {
+ prev = n.LastChild
+ }
+ if prev != nil {
+ prev.NextSibling = newChild
+ } else {
+ n.FirstChild = newChild
+ }
+ if next != nil {
+ next.PrevSibling = newChild
+ } else {
+ n.LastChild = newChild
+ }
+ newChild.Parent = n
+ newChild.PrevSibling = prev
+ newChild.NextSibling = next
+}
+
+// AppendChild adds a node c as a child of n.
+//
+// It will panic if c already has a parent or siblings.
+func (n *Node) AppendChild(c *Node) {
+ if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
+ panic("html: AppendChild called for an attached child Node")
+ }
+ last := n.LastChild
+ if last != nil {
+ last.NextSibling = c
+ } else {
+ n.FirstChild = c
+ }
+ n.LastChild = c
+ c.Parent = n
+ c.PrevSibling = last
+}
+
+// RemoveChild removes a node c that is a child of n. Afterwards, c will have
+// no parent and no siblings.
+//
+// It will panic if c's parent is not n.
+func (n *Node) RemoveChild(c *Node) {
+ if c.Parent != n {
+ panic("html: RemoveChild called for a non-child Node")
+ }
+ if n.FirstChild == c {
+ n.FirstChild = c.NextSibling
+ }
+ if c.NextSibling != nil {
+ c.NextSibling.PrevSibling = c.PrevSibling
+ }
+ if n.LastChild == c {
+ n.LastChild = c.PrevSibling
+ }
+ if c.PrevSibling != nil {
+ c.PrevSibling.NextSibling = c.NextSibling
+ }
+ c.Parent = nil
+ c.PrevSibling = nil
+ c.NextSibling = nil
+}
+
+// reparentChildren reparents all of src's child nodes to dst.
+func reparentChildren(dst, src *Node) {
+ for {
+ child := src.FirstChild
+ if child == nil {
+ break
+ }
+ src.RemoveChild(child)
+ dst.AppendChild(child)
+ }
+}
+
+// clone returns a new node with the same type, data and attributes.
+// The clone has no parent, no siblings and no children.
+func (n *Node) clone() *Node {
+ m := &Node{
+ Type: n.Type,
+ DataAtom: n.DataAtom,
+ Data: n.Data,
+ Attr: make([]Attribute, len(n.Attr)),
+ }
+ copy(m.Attr, n.Attr)
+ return m
+}
+
+// nodeStack is a stack of nodes.
+type nodeStack []*Node
+
+// pop pops the stack. It will panic if s is empty.
+func (s *nodeStack) pop() *Node {
+ i := len(*s)
+ n := (*s)[i-1]
+ *s = (*s)[:i-1]
+ return n
+}
+
+// top returns the most recently pushed node, or nil if s is empty.
+func (s *nodeStack) top() *Node {
+ if i := len(*s); i > 0 {
+ return (*s)[i-1]
+ }
+ return nil
+}
+
+// index returns the index of the top-most occurrence of n in the stack, or -1
+// if n is not present.
+func (s *nodeStack) index(n *Node) int {
+ for i := len(*s) - 1; i >= 0; i-- {
+ if (*s)[i] == n {
+ return i
+ }
+ }
+ return -1
+}
+
+// insert inserts a node at the given index.
+func (s *nodeStack) insert(i int, n *Node) {
+ (*s) = append(*s, nil)
+ copy((*s)[i+1:], (*s)[i:])
+ (*s)[i] = n
+}
+
+// remove removes a node from the stack. It is a no-op if n is not present.
+func (s *nodeStack) remove(n *Node) {
+ i := s.index(n)
+ if i == -1 {
+ return
+ }
+ copy((*s)[i:], (*s)[i+1:])
+ j := len(*s) - 1
+ (*s)[j] = nil
+ *s = (*s)[:j]
+}
diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go
new file mode 100644
index 000000000..471102f3a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/node_test.go
@@ -0,0 +1,146 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "fmt"
+)
+
+// checkTreeConsistency checks that a node and its descendants are all
+// consistent in their parent/child/sibling relationships.
+func checkTreeConsistency(n *Node) error {
+ return checkTreeConsistency1(n, 0)
+}
+
+func checkTreeConsistency1(n *Node, depth int) error {
+ if depth == 1e4 {
+ return fmt.Errorf("html: tree looks like it contains a cycle")
+ }
+ if err := checkNodeConsistency(n); err != nil {
+ return err
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := checkTreeConsistency1(c, depth+1); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checkNodeConsistency checks that a node's parent/child/sibling relationships
+// are consistent.
+func checkNodeConsistency(n *Node) error {
+ if n == nil {
+ return nil
+ }
+
+ nParent := 0
+ for p := n.Parent; p != nil; p = p.Parent {
+ nParent++
+ if nParent == 1e4 {
+ return fmt.Errorf("html: parent list looks like an infinite loop")
+ }
+ }
+
+ nForward := 0
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ nForward++
+ if nForward == 1e6 {
+ return fmt.Errorf("html: forward list of children looks like an infinite loop")
+ }
+ if c.Parent != n {
+ return fmt.Errorf("html: inconsistent child/parent relationship")
+ }
+ }
+
+ nBackward := 0
+ for c := n.LastChild; c != nil; c = c.PrevSibling {
+ nBackward++
+ if nBackward == 1e6 {
+ return fmt.Errorf("html: backward list of children looks like an infinite loop")
+ }
+ if c.Parent != n {
+ return fmt.Errorf("html: inconsistent child/parent relationship")
+ }
+ }
+
+ if n.Parent != nil {
+ if n.Parent == n {
+ return fmt.Errorf("html: inconsistent parent relationship")
+ }
+ if n.Parent == n.FirstChild {
+ return fmt.Errorf("html: inconsistent parent/first relationship")
+ }
+ if n.Parent == n.LastChild {
+ return fmt.Errorf("html: inconsistent parent/last relationship")
+ }
+ if n.Parent == n.PrevSibling {
+ return fmt.Errorf("html: inconsistent parent/prev relationship")
+ }
+ if n.Parent == n.NextSibling {
+ return fmt.Errorf("html: inconsistent parent/next relationship")
+ }
+
+ parentHasNAsAChild := false
+ for c := n.Parent.FirstChild; c != nil; c = c.NextSibling {
+ if c == n {
+ parentHasNAsAChild = true
+ break
+ }
+ }
+ if !parentHasNAsAChild {
+ return fmt.Errorf("html: inconsistent parent/child relationship")
+ }
+ }
+
+ if n.PrevSibling != nil && n.PrevSibling.NextSibling != n {
+ return fmt.Errorf("html: inconsistent prev/next relationship")
+ }
+ if n.NextSibling != nil && n.NextSibling.PrevSibling != n {
+ return fmt.Errorf("html: inconsistent next/prev relationship")
+ }
+
+ if (n.FirstChild == nil) != (n.LastChild == nil) {
+ return fmt.Errorf("html: inconsistent first/last relationship")
+ }
+ if n.FirstChild != nil && n.FirstChild == n.LastChild {
+ // We have a sole child.
+ if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil {
+ return fmt.Errorf("html: inconsistent sole child's sibling relationship")
+ }
+ }
+
+ seen := map[*Node]bool{}
+
+ var last *Node
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if seen[c] {
+ return fmt.Errorf("html: inconsistent repeated child")
+ }
+ seen[c] = true
+ last = c
+ }
+ if last != n.LastChild {
+ return fmt.Errorf("html: inconsistent last relationship")
+ }
+
+ var first *Node
+ for c := n.LastChild; c != nil; c = c.PrevSibling {
+ if !seen[c] {
+ return fmt.Errorf("html: inconsistent missing child")
+ }
+ delete(seen, c)
+ first = c
+ }
+ if first != n.FirstChild {
+ return fmt.Errorf("html: inconsistent first relationship")
+ }
+
+ if len(seen) != 0 {
+ return fmt.Errorf("html: inconsistent forwards/backwards child list")
+ }
+
+ return nil
+}
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
new file mode 100644
index 000000000..be4b2bf5a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -0,0 +1,2094 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ a "golang.org/x/net/html/atom"
+)
+
+// A parser implements the HTML5 parsing algorithm:
+// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction
+type parser struct {
+ // tokenizer provides the tokens for the parser.
+ tokenizer *Tokenizer
+ // tok is the most recently read token.
+ tok Token
+ // Self-closing tags like <hr/> are treated as start tags, except that
+ // hasSelfClosingToken is set while they are being processed.
+ hasSelfClosingToken bool
+ // doc is the document root element.
+ doc *Node
+ // The stack of open elements (section 12.2.3.2) and active formatting
+ // elements (section 12.2.3.3).
+ oe, afe nodeStack
+ // Element pointers (section 12.2.3.4).
+ head, form *Node
+ // Other parsing state flags (section 12.2.3.5).
+ scripting, framesetOK bool
+ // im is the current insertion mode.
+ im insertionMode
+ // originalIM is the insertion mode to go back to after completing a text
+ // or inTableText insertion mode.
+ originalIM insertionMode
+ // fosterParenting is whether new elements should be inserted according to
+ // the foster parenting rules (section 12.2.5.3).
+ fosterParenting bool
+ // quirks is whether the parser is operating in "quirks mode."
+ quirks bool
+ // fragment is whether the parser is parsing an HTML fragment.
+ fragment bool
+ // context is the context element when parsing an HTML fragment
+ // (section 12.4).
+ context *Node
+}
+
+func (p *parser) top() *Node {
+ if n := p.oe.top(); n != nil {
+ return n
+ }
+ return p.doc
+}
+
+// Stop tags for use in popUntil. These come from section 12.2.3.2.
+var (
+ defaultScopeStopTags = map[string][]a.Atom{
+ "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
+ "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
+ "svg": {a.Desc, a.ForeignObject, a.Title},
+ }
+)
+
+type scope int
+
+const (
+ defaultScope scope = iota
+ listItemScope
+ buttonScope
+ tableScope
+ tableRowScope
+ tableBodyScope
+ selectScope
+)
+
+// popUntil pops the stack of open elements at the highest element whose tag
+// is in matchTags, provided there is no higher element in the scope's stop
+// tags (as defined in section 12.2.3.2). It returns whether or not there was
+// such an element. If there was not, popUntil leaves the stack unchanged.
+//
+// For example, the set of stop tags for table scope is: "html", "table". If
+// the stack was:
+// ["html", "body", "font", "table", "b", "i", "u"]
+// then popUntil(tableScope, "font") would return false, but
+// popUntil(tableScope, "i") would return true and the stack would become:
+// ["html", "body", "font", "table", "b"]
+//
+// If an element's tag is in both the stop tags and matchTags, then the stack
+// will be popped and the function returns true (provided, of course, there was
+// no higher element in the stack that was also in the stop tags). For example,
+// popUntil(tableScope, "table") returns true and leaves:
+// ["html", "body", "font"]
+func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
+ if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
+ p.oe = p.oe[:i]
+ return true
+ }
+ return false
+}
+
+// indexOfElementInScope returns the index in p.oe of the highest element whose
+// tag is in matchTags that is in scope. If no matching element is in scope, it
+// returns -1.
+func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ tagAtom := p.oe[i].DataAtom
+ if p.oe[i].Namespace == "" {
+ for _, t := range matchTags {
+ if t == tagAtom {
+ return i
+ }
+ }
+ switch s {
+ case defaultScope:
+ // No-op.
+ case listItemScope:
+ if tagAtom == a.Ol || tagAtom == a.Ul {
+ return -1
+ }
+ case buttonScope:
+ if tagAtom == a.Button {
+ return -1
+ }
+ case tableScope:
+ if tagAtom == a.Html || tagAtom == a.Table {
+ return -1
+ }
+ case selectScope:
+ if tagAtom != a.Optgroup && tagAtom != a.Option {
+ return -1
+ }
+ default:
+ panic("unreachable")
+ }
+ }
+ switch s {
+ case defaultScope, listItemScope, buttonScope:
+ for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
+ if t == tagAtom {
+ return -1
+ }
+ }
+ }
+ }
+ return -1
+}
+
+// elementInScope is like popUntil, except that it doesn't modify the stack of
+// open elements.
+func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
+ return p.indexOfElementInScope(s, matchTags...) != -1
+}
+
+// clearStackToContext pops elements off the stack of open elements until a
+// scope-defined element is found.
+func (p *parser) clearStackToContext(s scope) {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ tagAtom := p.oe[i].DataAtom
+ switch s {
+ case tableScope:
+ if tagAtom == a.Html || tagAtom == a.Table {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ case tableRowScope:
+ if tagAtom == a.Html || tagAtom == a.Tr {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ case tableBodyScope:
+ if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+// generateImpliedEndTags pops nodes off the stack of open elements as long as
+// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt.
+// If exceptions are specified, nodes with that name will not be popped off.
+func (p *parser) generateImpliedEndTags(exceptions ...string) {
+ var i int
+loop:
+ for i = len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if n.Type == ElementNode {
+ switch n.DataAtom {
+ case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt:
+ for _, except := range exceptions {
+ if n.Data == except {
+ break loop
+ }
+ }
+ continue
+ }
+ }
+ break
+ }
+
+ p.oe = p.oe[:i+1]
+}
+
+// addChild adds a child node n to the top element, and pushes n onto the stack
+// of open elements if it is an element node.
+func (p *parser) addChild(n *Node) {
+ if p.shouldFosterParent() {
+ p.fosterParent(n)
+ } else {
+ p.top().AppendChild(n)
+ }
+
+ if n.Type == ElementNode {
+ p.oe = append(p.oe, n)
+ }
+}
+
+// shouldFosterParent returns whether the next node to be added should be
+// foster parented.
+func (p *parser) shouldFosterParent() bool {
+ if p.fosterParenting {
+ switch p.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ return true
+ }
+ }
+ return false
+}
+
+// fosterParent adds a child node according to the foster parenting rules.
+// Section 12.2.5.3, "foster parenting".
+func (p *parser) fosterParent(n *Node) {
+ var table, parent, prev *Node
+ var i int
+ for i = len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].DataAtom == a.Table {
+ table = p.oe[i]
+ break
+ }
+ }
+
+ if table == nil {
+ // The foster parent is the html element.
+ parent = p.oe[0]
+ } else {
+ parent = table.Parent
+ }
+ if parent == nil {
+ parent = p.oe[i-1]
+ }
+
+ if table != nil {
+ prev = table.PrevSibling
+ } else {
+ prev = parent.LastChild
+ }
+ if prev != nil && prev.Type == TextNode && n.Type == TextNode {
+ prev.Data += n.Data
+ return
+ }
+
+ parent.InsertBefore(n, table)
+}
+
+// addText adds text to the preceding node if it is a text node, or else it
+// calls addChild with a new text node.
+func (p *parser) addText(text string) {
+ if text == "" {
+ return
+ }
+
+ if p.shouldFosterParent() {
+ p.fosterParent(&Node{
+ Type: TextNode,
+ Data: text,
+ })
+ return
+ }
+
+ t := p.top()
+ if n := t.LastChild; n != nil && n.Type == TextNode {
+ n.Data += text
+ return
+ }
+ p.addChild(&Node{
+ Type: TextNode,
+ Data: text,
+ })
+}
+
+// addElement adds a child element based on the current token.
+func (p *parser) addElement() {
+ p.addChild(&Node{
+ Type: ElementNode,
+ DataAtom: p.tok.DataAtom,
+ Data: p.tok.Data,
+ Attr: p.tok.Attr,
+ })
+}
+
+// Section 12.2.3.3.
+func (p *parser) addFormattingElement() {
+ tagAtom, attr := p.tok.DataAtom, p.tok.Attr
+ p.addElement()
+
+ // Implement the Noah's Ark clause, but with three per family instead of two.
+ identicalElements := 0
+findIdenticalElements:
+ for i := len(p.afe) - 1; i >= 0; i-- {
+ n := p.afe[i]
+ if n.Type == scopeMarkerNode {
+ break
+ }
+ if n.Type != ElementNode {
+ continue
+ }
+ if n.Namespace != "" {
+ continue
+ }
+ if n.DataAtom != tagAtom {
+ continue
+ }
+ if len(n.Attr) != len(attr) {
+ continue
+ }
+ compareAttributes:
+ for _, t0 := range n.Attr {
+ for _, t1 := range attr {
+ if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
+ // Found a match for this attribute, continue with the next attribute.
+ continue compareAttributes
+ }
+ }
+ // If we get here, there is no attribute that matches a.
+ // Therefore the element is not identical to the new one.
+ continue findIdenticalElements
+ }
+
+ identicalElements++
+ if identicalElements >= 3 {
+ p.afe.remove(n)
+ }
+ }
+
+ p.afe = append(p.afe, p.top())
+}
+
+// Section 12.2.3.3.
+func (p *parser) clearActiveFormattingElements() {
+ for {
+ n := p.afe.pop()
+ if len(p.afe) == 0 || n.Type == scopeMarkerNode {
+ return
+ }
+ }
+}
+
+// Section 12.2.3.3.
+func (p *parser) reconstructActiveFormattingElements() {
+ n := p.afe.top()
+ if n == nil {
+ return
+ }
+ if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
+ return
+ }
+ i := len(p.afe) - 1
+ for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
+ if i == 0 {
+ i = -1
+ break
+ }
+ i--
+ n = p.afe[i]
+ }
+ for {
+ i++
+ clone := p.afe[i].clone()
+ p.addChild(clone)
+ p.afe[i] = clone
+ if i == len(p.afe)-1 {
+ break
+ }
+ }
+}
+
+// Section 12.2.4.
+func (p *parser) acknowledgeSelfClosingTag() {
+ p.hasSelfClosingToken = false
+}
+
+// An insertion mode (section 12.2.3.1) is the state transition function from
+// a particular state in the HTML5 parser's state machine. It updates the
+// parser's fields depending on parser.tok (where ErrorToken means EOF).
+// It returns whether the token was consumed.
+type insertionMode func(*parser) bool
+
+// setOriginalIM sets the insertion mode to return to after completing a text or
+// inTableText insertion mode.
+// Section 12.2.3.1, "using the rules for".
+func (p *parser) setOriginalIM() {
+ if p.originalIM != nil {
+ panic("html: bad parser state: originalIM was set twice")
+ }
+ p.originalIM = p.im
+}
+
+// Section 12.2.3.1, "reset the insertion mode".
+func (p *parser) resetInsertionMode() {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if i == 0 && p.context != nil {
+ n = p.context
+ }
+
+ switch n.DataAtom {
+ case a.Select:
+ p.im = inSelectIM
+ case a.Td, a.Th:
+ p.im = inCellIM
+ case a.Tr:
+ p.im = inRowIM
+ case a.Tbody, a.Thead, a.Tfoot:
+ p.im = inTableBodyIM
+ case a.Caption:
+ p.im = inCaptionIM
+ case a.Colgroup:
+ p.im = inColumnGroupIM
+ case a.Table:
+ p.im = inTableIM
+ case a.Head:
+ p.im = inBodyIM
+ case a.Body:
+ p.im = inBodyIM
+ case a.Frameset:
+ p.im = inFramesetIM
+ case a.Html:
+ p.im = beforeHeadIM
+ default:
+ continue
+ }
+ return
+ }
+ p.im = inBodyIM
+}
+
+const whitespace = " \t\r\n\f"
+
+// Section 12.2.5.4.1.
+func initialIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ n, quirks := parseDoctype(p.tok.Data)
+ p.doc.AppendChild(n)
+ p.quirks = quirks
+ p.im = beforeHTMLIM
+ return true
+ }
+ p.quirks = true
+ p.im = beforeHTMLIM
+ return false
+}
+
+// Section 12.2.5.4.2.
+func beforeHTMLIM(p *parser) bool {
+ switch p.tok.Type {
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ p.addElement()
+ p.im = beforeHeadIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head, a.Body, a.Html, a.Br:
+ p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+ p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
+ return false
+}
+
+// Section 12.2.5.4.3.
+func beforeHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Head:
+ p.addElement()
+ p.head = p.top()
+ p.im = inHeadIM
+ return true
+ case a.Html:
+ return inBodyIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head, a.Body, a.Html, a.Br:
+ p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
+ return false
+}
+
+// Section 12.2.5.4.4.
+func inHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ return true
+ case a.Script, a.Title, a.Noscript, a.Noframes, a.Style:
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ return true
+ case a.Head:
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head:
+ n := p.oe.pop()
+ if n.DataAtom != a.Head {
+ panic("html: bad parser state: <head> element not found, in the in-head insertion mode")
+ }
+ p.im = afterHeadIM
+ return true
+ case a.Body, a.Html, a.Br:
+ p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
+ return false
+}
+
+// Section 12.2.5.4.6.
+func afterHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Body:
+ p.addElement()
+ p.framesetOK = false
+ p.im = inBodyIM
+ return true
+ case a.Frameset:
+ p.addElement()
+ p.im = inFramesetIM
+ return true
+ case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+ p.oe = append(p.oe, p.head)
+ defer p.oe.remove(p.head)
+ return inHeadIM(p)
+ case a.Head:
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Body, a.Html, a.Br:
+ // Drop down to creating an implied <body> tag.
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
+ p.framesetOK = true
+ return false
+}
+
+// copyAttributes copies attributes of src not found on dst to dst.
+func copyAttributes(dst *Node, src Token) {
+ if len(src.Attr) == 0 {
+ return
+ }
+ attr := map[string]string{}
+ for _, t := range dst.Attr {
+ attr[t.Key] = t.Val
+ }
+ for _, t := range src.Attr {
+ if _, ok := attr[t.Key]; !ok {
+ dst.Attr = append(dst.Attr, t)
+ attr[t.Key] = t.Val
+ }
+ }
+}
+
+// Section 12.2.5.4.7.
+func inBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ d := p.tok.Data
+ switch n := p.oe.top(); n.DataAtom {
+ case a.Pre, a.Listing:
+ if n.FirstChild == nil {
+ // Ignore a newline at the start of a <pre> block.
+ if d != "" && d[0] == '\r' {
+ d = d[1:]
+ }
+ if d != "" && d[0] == '\n' {
+ d = d[1:]
+ }
+ }
+ }
+ d = strings.Replace(d, "\x00", "", -1)
+ if d == "" {
+ return true
+ }
+ p.reconstructActiveFormattingElements()
+ p.addText(d)
+ if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+ // There were non-whitespace characters inserted.
+ p.framesetOK = false
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ copyAttributes(p.oe[0], p.tok)
+ case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+ return inHeadIM(p)
+ case a.Body:
+ if len(p.oe) >= 2 {
+ body := p.oe[1]
+ if body.Type == ElementNode && body.DataAtom == a.Body {
+ p.framesetOK = false
+ copyAttributes(body, p.tok)
+ }
+ }
+ case a.Frameset:
+ if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+ // Ignore the token.
+ return true
+ }
+ body := p.oe[1]
+ if body.Parent != nil {
+ body.Parent.RemoveChild(body)
+ }
+ p.oe = p.oe[:1]
+ p.addElement()
+ p.im = inFramesetIM
+ return true
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.popUntil(buttonScope, a.P)
+ switch n := p.top(); n.DataAtom {
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Pre, a.Listing:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ // The newline, if any, will be dealt with by the TextToken case.
+ p.framesetOK = false
+ case a.Form:
+ if p.form == nil {
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ p.form = p.top()
+ }
+ case a.Li:
+ p.framesetOK = false
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ node := p.oe[i]
+ switch node.DataAtom {
+ case a.Li:
+ p.oe = p.oe[:i]
+ case a.Address, a.Div, a.P:
+ continue
+ default:
+ if !isSpecialElement(node) {
+ continue
+ }
+ }
+ break
+ }
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Dd, a.Dt:
+ p.framesetOK = false
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ node := p.oe[i]
+ switch node.DataAtom {
+ case a.Dd, a.Dt:
+ p.oe = p.oe[:i]
+ case a.Address, a.Div, a.P:
+ continue
+ default:
+ if !isSpecialElement(node) {
+ continue
+ }
+ }
+ break
+ }
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Plaintext:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Button:
+ p.popUntil(defaultScope, a.Button)
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ case a.A:
+ for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+ if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+ p.inBodyEndTagFormatting(a.A)
+ p.oe.remove(n)
+ p.afe.remove(n)
+ break
+ }
+ }
+ p.reconstructActiveFormattingElements()
+ p.addFormattingElement()
+ case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+ p.reconstructActiveFormattingElements()
+ p.addFormattingElement()
+ case a.Nobr:
+ p.reconstructActiveFormattingElements()
+ if p.elementInScope(defaultScope, a.Nobr) {
+ p.inBodyEndTagFormatting(a.Nobr)
+ p.reconstructActiveFormattingElements()
+ }
+ p.addFormattingElement()
+ case a.Applet, a.Marquee, a.Object:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.afe = append(p.afe, &scopeMarker)
+ p.framesetOK = false
+ case a.Table:
+ if !p.quirks {
+ p.popUntil(buttonScope, a.P)
+ }
+ p.addElement()
+ p.framesetOK = false
+ p.im = inTableIM
+ return true
+ case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ if p.tok.DataAtom == a.Input {
+ for _, t := range p.tok.Attr {
+ if t.Key == "type" {
+ if strings.ToLower(t.Val) == "hidden" {
+ // Skip setting framesetOK = false
+ return true
+ }
+ }
+ }
+ }
+ p.framesetOK = false
+ case a.Param, a.Source, a.Track:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ case a.Hr:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ p.framesetOK = false
+ case a.Image:
+ p.tok.DataAtom = a.Img
+ p.tok.Data = a.Img.String()
+ return false
+ case a.Isindex:
+ if p.form != nil {
+ // Ignore the token.
+ return true
+ }
+ action := ""
+ prompt := "This is a searchable index. Enter search keywords: "
+ attr := []Attribute{{Key: "name", Val: "isindex"}}
+ for _, t := range p.tok.Attr {
+ switch t.Key {
+ case "action":
+ action = t.Val
+ case "name":
+ // Ignore the attribute.
+ case "prompt":
+ prompt = t.Val
+ default:
+ attr = append(attr, t)
+ }
+ }
+ p.acknowledgeSelfClosingTag()
+ p.popUntil(buttonScope, a.P)
+ p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+ if action != "" {
+ p.form.Attr = []Attribute{{Key: "action", Val: action}}
+ }
+ p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+ p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+ p.addText(prompt)
+ p.addChild(&Node{
+ Type: ElementNode,
+ DataAtom: a.Input,
+ Data: a.Input.String(),
+ Attr: attr,
+ })
+ p.oe.pop()
+ p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+ p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+ p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+ case a.Textarea:
+ p.addElement()
+ p.setOriginalIM()
+ p.framesetOK = false
+ p.im = textIM
+ case a.Xmp:
+ p.popUntil(buttonScope, a.P)
+ p.reconstructActiveFormattingElements()
+ p.framesetOK = false
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Iframe:
+ p.framesetOK = false
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Noembed, a.Noscript:
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectIM
+ return true
+ case a.Optgroup, a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ case a.Rp, a.Rt:
+ if p.elementInScope(defaultScope, a.Ruby) {
+ p.generateImpliedEndTags()
+ }
+ p.addElement()
+ case a.Math, a.Svg:
+ p.reconstructActiveFormattingElements()
+ if p.tok.DataAtom == a.Math {
+ adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+ } else {
+ adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+ }
+ adjustForeignAttributes(p.tok.Attr)
+ p.addElement()
+ p.top().Namespace = p.tok.Data
+ if p.hasSelfClosingToken {
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ }
+ return true
+ case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ default:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Body:
+ if p.elementInScope(defaultScope, a.Body) {
+ p.im = afterBodyIM
+ }
+ case a.Html:
+ if p.elementInScope(defaultScope, a.Body) {
+ p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+ return false
+ }
+ return true
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+ p.popUntil(defaultScope, p.tok.DataAtom)
+ case a.Form:
+ node := p.form
+ p.form = nil
+ i := p.indexOfElementInScope(defaultScope, a.Form)
+ if node == nil || i == -1 || p.oe[i] != node {
+ // Ignore the token.
+ return true
+ }
+ p.generateImpliedEndTags()
+ p.oe.remove(node)
+ case a.P:
+ if !p.elementInScope(buttonScope, a.P) {
+ p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+ }
+ p.popUntil(buttonScope, a.P)
+ case a.Li:
+ p.popUntil(listItemScope, a.Li)
+ case a.Dd, a.Dt:
+ p.popUntil(defaultScope, p.tok.DataAtom)
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+ case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+ p.inBodyEndTagFormatting(p.tok.DataAtom)
+ case a.Applet, a.Marquee, a.Object:
+ if p.popUntil(defaultScope, p.tok.DataAtom) {
+ p.clearActiveFormattingElements()
+ }
+ case a.Br:
+ p.tok.Type = StartTagToken
+ return false
+ default:
+ p.inBodyEndTagOther(p.tok.DataAtom)
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ }
+
+ return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+ // This is the "adoption agency" algorithm, described at
+ // https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+ // TODO: this is a fairly literal line-by-line translation of that algorithm.
+ // Once the code successfully parses the comprehensive test suite, we should
+ // refactor this code to be more idiomatic.
+
+ // Steps 1-4. The outer loop.
+ for i := 0; i < 8; i++ {
+ // Step 5. Find the formatting element.
+ var formattingElement *Node
+ for j := len(p.afe) - 1; j >= 0; j-- {
+ if p.afe[j].Type == scopeMarkerNode {
+ break
+ }
+ if p.afe[j].DataAtom == tagAtom {
+ formattingElement = p.afe[j]
+ break
+ }
+ }
+ if formattingElement == nil {
+ p.inBodyEndTagOther(tagAtom)
+ return
+ }
+ feIndex := p.oe.index(formattingElement)
+ if feIndex == -1 {
+ p.afe.remove(formattingElement)
+ return
+ }
+ if !p.elementInScope(defaultScope, tagAtom) {
+ // Ignore the tag.
+ return
+ }
+
+ // Steps 9-10. Find the furthest block.
+ var furthestBlock *Node
+ for _, e := range p.oe[feIndex:] {
+ if isSpecialElement(e) {
+ furthestBlock = e
+ break
+ }
+ }
+ if furthestBlock == nil {
+ e := p.oe.pop()
+ for e != formattingElement {
+ e = p.oe.pop()
+ }
+ p.afe.remove(e)
+ return
+ }
+
+ // Steps 11-12. Find the common ancestor and bookmark node.
+ commonAncestor := p.oe[feIndex-1]
+ bookmark := p.afe.index(formattingElement)
+
+ // Step 13. The inner loop. Find the lastNode to reparent.
+ lastNode := furthestBlock
+ node := furthestBlock
+ x := p.oe.index(node)
+ // Steps 13.1-13.2
+ for j := 0; j < 3; j++ {
+ // Step 13.3.
+ x--
+ node = p.oe[x]
+ // Step 13.4 - 13.5.
+ if p.afe.index(node) == -1 {
+ p.oe.remove(node)
+ continue
+ }
+ // Step 13.6.
+ if node == formattingElement {
+ break
+ }
+ // Step 13.7.
+ clone := node.clone()
+ p.afe[p.afe.index(node)] = clone
+ p.oe[p.oe.index(node)] = clone
+ node = clone
+ // Step 13.8.
+ if lastNode == furthestBlock {
+ bookmark = p.afe.index(node) + 1
+ }
+ // Step 13.9.
+ if lastNode.Parent != nil {
+ lastNode.Parent.RemoveChild(lastNode)
+ }
+ node.AppendChild(lastNode)
+ // Step 13.10.
+ lastNode = node
+ }
+
+ // Step 14. Reparent lastNode to the common ancestor,
+ // or for misnested table nodes, to the foster parent.
+ if lastNode.Parent != nil {
+ lastNode.Parent.RemoveChild(lastNode)
+ }
+ switch commonAncestor.DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ p.fosterParent(lastNode)
+ default:
+ commonAncestor.AppendChild(lastNode)
+ }
+
+ // Steps 15-17. Reparent nodes from the furthest block's children
+ // to a clone of the formatting element.
+ clone := formattingElement.clone()
+ reparentChildren(clone, furthestBlock)
+ furthestBlock.AppendChild(clone)
+
+ // Step 18. Fix up the list of active formatting elements.
+ if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+ // Move the bookmark with the rest of the list.
+ bookmark--
+ }
+ p.afe.remove(formattingElement)
+ p.afe.insert(bookmark, clone)
+
+ // Step 19. Fix up the stack of open elements.
+ p.oe.remove(formattingElement)
+ p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+ }
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].DataAtom == tagAtom {
+ p.oe = p.oe[:i]
+ break
+ }
+ if isSpecialElement(p.oe[i]) {
+ break
+ }
+ }
+}
+
+// Section 12.2.5.4.8.
+func textIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ p.oe.pop()
+ case TextToken:
+ d := p.tok.Data
+ if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+ // Ignore a newline at the start of a <textarea> block.
+ if d != "" && d[0] == '\r' {
+ d = d[1:]
+ }
+ if d != "" && d[0] == '\n' {
+ d = d[1:]
+ }
+ }
+ if d == "" {
+ return true
+ }
+ p.addText(d)
+ return true
+ case EndTagToken:
+ p.oe.pop()
+ }
+ p.im = p.originalIM
+ p.originalIM = nil
+ return p.tok.Type == EndTagToken
+}
+
+// Section 12.2.5.4.9.
+func inTableIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
+ switch p.oe.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if strings.Trim(p.tok.Data, whitespace) == "" {
+ p.addText(p.tok.Data)
+ return true
+ }
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption:
+ p.clearStackToContext(tableScope)
+ p.afe = append(p.afe, &scopeMarker)
+ p.addElement()
+ p.im = inCaptionIM
+ return true
+ case a.Colgroup:
+ p.clearStackToContext(tableScope)
+ p.addElement()
+ p.im = inColumnGroupIM
+ return true
+ case a.Col:
+ p.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())
+ return false
+ case a.Tbody, a.Tfoot, a.Thead:
+ p.clearStackToContext(tableScope)
+ p.addElement()
+ p.im = inTableBodyIM
+ return true
+ case a.Td, a.Th, a.Tr:
+ p.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())
+ return false
+ case a.Table:
+ if p.popUntil(tableScope, a.Table) {
+ p.resetInsertionMode()
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Style, a.Script:
+ return inHeadIM(p)
+ case a.Input:
+ for _, t := range p.tok.Attr {
+ if t.Key == "type" && strings.ToLower(t.Val) == "hidden" {
+ p.addElement()
+ p.oe.pop()
+ return true
+ }
+ }
+ // Otherwise drop down to the default action.
+ case a.Form:
+ if p.form != nil {
+ // Ignore the token.
+ return true
+ }
+ p.addElement()
+ p.form = p.oe.pop()
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ switch p.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ p.fosterParenting = true
+ }
+ p.addElement()
+ p.fosterParenting = false
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Table:
+ if p.popUntil(tableScope, a.Table) {
+ p.resetInsertionMode()
+ return true
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.fosterParenting = true
+ defer func() { p.fosterParenting = false }()
+
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.11.
+func inCaptionIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ }
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ }
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.12.
+func inColumnGroupIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Col:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Colgroup:
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ p.im = inTableIM
+ }
+ return true
+ case a.Col:
+ // Ignore the token.
+ return true
+ }
+ }
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ p.im = inTableIM
+ return false
+ }
+ return true
+}
+
+// Section 12.2.5.4.13.
+func inTableBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Tr:
+ p.clearStackToContext(tableBodyScope)
+ p.addElement()
+ p.im = inRowIM
+ return true
+ case a.Td, a.Th:
+ p.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())
+ return false
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
+ if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
+ p.im = inTableIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Tbody, a.Tfoot, a.Thead:
+ if p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.clearStackToContext(tableBodyScope)
+ p.oe.pop()
+ p.im = inTableIM
+ }
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
+ p.im = inTableIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+
+ return inTableIM(p)
+}
+
+// Section 12.2.5.4.14.
+func inRowIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Td, a.Th:
+ p.clearStackToContext(tableRowScope)
+ p.addElement()
+ p.afe = append(p.afe, &scopeMarker)
+ p.im = inCellIM
+ return true
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Tr:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return true
+ }
+ // Ignore the token.
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Tbody, a.Tfoot, a.Thead:
+ if p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:
+ // Ignore the token.
+ return true
+ }
+ }
+
+ return inTableIM(p)
+}
+
+// Section 12.2.5.4.15.
+func inCellIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Td, a.Th) {
+ // Close the cell and reprocess.
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Td, a.Th:
+ if !p.popUntil(tableScope, p.tok.DataAtom) {
+ // Ignore the token.
+ return true
+ }
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html:
+ // Ignore the token.
+ return true
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if !p.elementInScope(tableScope, p.tok.DataAtom) {
+ // Ignore the token.
+ return true
+ }
+ // Close the cell and reprocess.
+ p.popUntil(tableScope, a.Td, a.Th)
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return false
+ }
+ }
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.16.
+func inSelectIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Optgroup:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ if p.top().DataAtom == a.Optgroup {
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Select:
+ p.tok.Type = EndTagToken
+ return false
+ case a.Input, a.Keygen, a.Textarea:
+ if p.elementInScope(selectScope, a.Select) {
+ p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
+ return false
+ }
+ // In order to properly ignore <textarea>, we need to change the tokenizer mode.
+ p.tokenizer.NextIsNotRawText()
+ // Ignore the token.
+ return true
+ case a.Script:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ case a.Optgroup:
+ i := len(p.oe) - 1
+ if p.oe[i].DataAtom == a.Option {
+ i--
+ }
+ if p.oe[i].DataAtom == a.Optgroup {
+ p.oe = p.oe[:i]
+ }
+ case a.Select:
+ if p.popUntil(selectScope, a.Select) {
+ p.resetInsertionMode()
+ }
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ return true
+}
+
+// Section 12.2.5.4.17.
+func inSelectInTableIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken, EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
+ if p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ }
+ }
+ return inSelectIM(p)
+}
+
+// Section 12.2.5.4.18.
+func afterBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) == 0 {
+ // It was all whitespace.
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ return inBodyIM(p)
+ }
+ case EndTagToken:
+ if p.tok.DataAtom == a.Html {
+ if !p.fragment {
+ p.im = afterAfterBodyIM
+ }
+ return true
+ }
+ case CommentToken:
+ // The comment is attached to the <html> element.
+ if len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {
+ panic("html: bad parser state: <html> element not found, in the after-body insertion mode")
+ }
+ p.oe[0].AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+ p.im = inBodyIM
+ return false
+}
+
+// Section 12.2.5.4.19.
+func inFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.addText(s)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Frameset:
+ p.addElement()
+ case a.Frame:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Frameset:
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ if p.oe.top().DataAtom != a.Frameset {
+ p.im = afterFramesetIM
+ return true
+ }
+ }
+ }
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.4.20.
+func afterFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.addText(s)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ p.im = afterAfterFramesetIM
+ return true
+ }
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.4.21.
+func afterAfterBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) == 0 {
+ // It was all whitespace.
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ return inBodyIM(p)
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ return inBodyIM(p)
+ }
+ p.im = inBodyIM
+ return false
+}
+
+// Section 12.2.5.4.22.
+func afterAfterFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.tok.Data = s
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case DoctypeToken:
+ return inBodyIM(p)
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+const whitespaceOrNUL = whitespace + "\x00"
+
+// Section 12.2.5.5.
+func parseForeignContent(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ if p.framesetOK {
+ p.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == ""
+ }
+ p.tok.Data = strings.Replace(p.tok.Data, "\x00", "\ufffd", -1)
+ p.addText(p.tok.Data)
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case StartTagToken:
+ b := breakout[p.tok.Data]
+ if p.tok.DataAtom == a.Font {
+ loop:
+ for _, attr := range p.tok.Attr {
+ switch attr.Key {
+ case "color", "face", "size":
+ b = true
+ break loop
+ }
+ }
+ }
+ if b {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
+ p.oe = p.oe[:i+1]
+ break
+ }
+ }
+ return false
+ }
+ switch p.top().Namespace {
+ case "math":
+ adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+ case "svg":
+ // Adjust SVG tag names. The tokenizer lower-cases tag names, but
+ // SVG wants e.g. "foreignObject" with a capital second "O".
+ if x := svgTagNameAdjustments[p.tok.Data]; x != "" {
+ p.tok.DataAtom = a.Lookup([]byte(x))
+ p.tok.Data = x
+ }
+ adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+ default:
+ panic("html: bad parser state: unexpected namespace")
+ }
+ adjustForeignAttributes(p.tok.Attr)
+ namespace := p.top().Namespace
+ p.addElement()
+ p.top().Namespace = namespace
+ if namespace != "" {
+ // Don't let the tokenizer go into raw text mode in foreign content
+ // (e.g. in an SVG <title> tag).
+ p.tokenizer.NextIsNotRawText()
+ }
+ if p.hasSelfClosingToken {
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ }
+ case EndTagToken:
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].Namespace == "" {
+ return p.im(p)
+ }
+ if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
+ p.oe = p.oe[:i]
+ break
+ }
+ }
+ return true
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.
+func (p *parser) inForeignContent() bool {
+ if len(p.oe) == 0 {
+ return false
+ }
+ n := p.oe[len(p.oe)-1]
+ if n.Namespace == "" {
+ return false
+ }
+ if mathMLTextIntegrationPoint(n) {
+ if p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {
+ return false
+ }
+ if p.tok.Type == TextToken {
+ return false
+ }
+ }
+ if n.Namespace == "math" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {
+ return false
+ }
+ if htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {
+ return false
+ }
+ if p.tok.Type == ErrorToken {
+ return false
+ }
+ return true
+}
+
+// parseImpliedToken parses a token as though it had appeared in the parser's
+// input.
+func (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {
+ realToken, selfClosing := p.tok, p.hasSelfClosingToken
+ p.tok = Token{
+ Type: t,
+ DataAtom: dataAtom,
+ Data: data,
+ }
+ p.hasSelfClosingToken = false
+ p.parseCurrentToken()
+ p.tok, p.hasSelfClosingToken = realToken, selfClosing
+}
+
+// parseCurrentToken runs the current token through the parsing routines
+// until it is consumed.
+func (p *parser) parseCurrentToken() {
+ if p.tok.Type == SelfClosingTagToken {
+ p.hasSelfClosingToken = true
+ p.tok.Type = StartTagToken
+ }
+
+ consumed := false
+ for !consumed {
+ if p.inForeignContent() {
+ consumed = parseForeignContent(p)
+ } else {
+ consumed = p.im(p)
+ }
+ }
+
+ if p.hasSelfClosingToken {
+ // This is a parse error, but ignore it.
+ p.hasSelfClosingToken = false
+ }
+}
+
+func (p *parser) parse() error {
+ // Iterate until EOF. Any other error will cause an early return.
+ var err error
+ for err != io.EOF {
+ // CDATA sections are allowed only in foreign content.
+ n := p.oe.top()
+ p.tokenizer.AllowCDATA(n != nil && n.Namespace != "")
+ // Read and parse the next token.
+ p.tokenizer.Next()
+ p.tok = p.tokenizer.Token()
+ if p.tok.Type == ErrorToken {
+ err = p.tokenizer.Err()
+ if err != nil && err != io.EOF {
+ return err
+ }
+ }
+ p.parseCurrentToken()
+ }
+ return nil
+}
+
+// Parse returns the parse tree for the HTML from the given Reader.
+// The input is assumed to be UTF-8 encoded.
+func Parse(r io.Reader) (*Node, error) {
+ p := &parser{
+ tokenizer: NewTokenizer(r),
+ doc: &Node{
+ Type: DocumentNode,
+ },
+ scripting: true,
+ framesetOK: true,
+ im: initialIM,
+ }
+ err := p.parse()
+ if err != nil {
+ return nil, err
+ }
+ return p.doc, nil
+}
+
+// ParseFragment parses a fragment of HTML and returns the nodes that were
+// found. If the fragment is the InnerHTML for an existing element, pass that
+// element in context.
+func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
+ contextTag := ""
+ if context != nil {
+ if context.Type != ElementNode {
+ return nil, errors.New("html: ParseFragment of non-element Node")
+ }
+ // The next check isn't just context.DataAtom.String() == context.Data because
+ // it is valid to pass an element whose tag isn't a known atom. For example,
+ // DataAtom == 0 and Data = "tagfromthefuture" is perfectly consistent.
+ if context.DataAtom != a.Lookup([]byte(context.Data)) {
+ return nil, fmt.Errorf("html: inconsistent Node: DataAtom=%q, Data=%q", context.DataAtom, context.Data)
+ }
+ contextTag = context.DataAtom.String()
+ }
+ p := &parser{
+ tokenizer: NewTokenizerFragment(r, contextTag),
+ doc: &Node{
+ Type: DocumentNode,
+ },
+ scripting: true,
+ fragment: true,
+ context: context,
+ }
+
+ root := &Node{
+ Type: ElementNode,
+ DataAtom: a.Html,
+ Data: a.Html.String(),
+ }
+ p.doc.AppendChild(root)
+ p.oe = nodeStack{root}
+ p.resetInsertionMode()
+
+ for n := context; n != nil; n = n.Parent {
+ if n.Type == ElementNode && n.DataAtom == a.Form {
+ p.form = n
+ break
+ }
+ }
+
+ err := p.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ parent := p.doc
+ if context != nil {
+ parent = root
+ }
+
+ var result []*Node
+ for c := parent.FirstChild; c != nil; {
+ next := c.NextSibling
+ parent.RemoveChild(c)
+ result = append(result, c)
+ c = next
+ }
+ return result, nil
+}
diff --git a/vendor/golang.org/x/net/html/parse_test.go b/vendor/golang.org/x/net/html/parse_test.go
new file mode 100644
index 000000000..7e47d11be
--- /dev/null
+++ b/vendor/golang.org/x/net/html/parse_test.go
@@ -0,0 +1,388 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/html/atom"
+)
+
+// readParseTest reads a single test case from r.
+func readParseTest(r *bufio.Reader) (text, want, context string, err error) {
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return "", "", "", err
+ }
+ var b []byte
+
+ // Read the HTML.
+ if string(line) != "#data\n" {
+ return "", "", "", fmt.Errorf(`got %q want "#data\n"`, line)
+ }
+ for {
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return "", "", "", err
+ }
+ if line[0] == '#' {
+ break
+ }
+ b = append(b, line...)
+ }
+ text = strings.TrimSuffix(string(b), "\n")
+ b = b[:0]
+
+ // Skip the error list.
+ if string(line) != "#errors\n" {
+ return "", "", "", fmt.Errorf(`got %q want "#errors\n"`, line)
+ }
+ for {
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return "", "", "", err
+ }
+ if line[0] == '#' {
+ break
+ }
+ }
+
+ if string(line) == "#document-fragment\n" {
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return "", "", "", err
+ }
+ context = strings.TrimSpace(string(line))
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return "", "", "", err
+ }
+ }
+
+ // Read the dump of what the parse tree should be.
+ if string(line) != "#document\n" {
+ return "", "", "", fmt.Errorf(`got %q want "#document\n"`, line)
+ }
+ inQuote := false
+ for {
+ line, err = r.ReadSlice('\n')
+ if err != nil && err != io.EOF {
+ return "", "", "", err
+ }
+ trimmed := bytes.Trim(line, "| \n")
+ if len(trimmed) > 0 {
+ if line[0] == '|' && trimmed[0] == '"' {
+ inQuote = true
+ }
+ if trimmed[len(trimmed)-1] == '"' && !(line[0] == '|' && len(trimmed) == 1) {
+ inQuote = false
+ }
+ }
+ if len(line) == 0 || len(line) == 1 && line[0] == '\n' && !inQuote {
+ break
+ }
+ b = append(b, line...)
+ }
+ return text, string(b), context, nil
+}
+
+func dumpIndent(w io.Writer, level int) {
+ io.WriteString(w, "| ")
+ for i := 0; i < level; i++ {
+ io.WriteString(w, " ")
+ }
+}
+
+type sortedAttributes []Attribute
+
+func (a sortedAttributes) Len() int {
+ return len(a)
+}
+
+func (a sortedAttributes) Less(i, j int) bool {
+ if a[i].Namespace != a[j].Namespace {
+ return a[i].Namespace < a[j].Namespace
+ }
+ return a[i].Key < a[j].Key
+}
+
+func (a sortedAttributes) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+
+func dumpLevel(w io.Writer, n *Node, level int) error {
+ dumpIndent(w, level)
+ switch n.Type {
+ case ErrorNode:
+ return errors.New("unexpected ErrorNode")
+ case DocumentNode:
+ return errors.New("unexpected DocumentNode")
+ case ElementNode:
+ if n.Namespace != "" {
+ fmt.Fprintf(w, "<%s %s>", n.Namespace, n.Data)
+ } else {
+ fmt.Fprintf(w, "<%s>", n.Data)
+ }
+ attr := sortedAttributes(n.Attr)
+ sort.Sort(attr)
+ for _, a := range attr {
+ io.WriteString(w, "\n")
+ dumpIndent(w, level+1)
+ if a.Namespace != "" {
+ fmt.Fprintf(w, `%s %s="%s"`, a.Namespace, a.Key, a.Val)
+ } else {
+ fmt.Fprintf(w, `%s="%s"`, a.Key, a.Val)
+ }
+ }
+ case TextNode:
+ fmt.Fprintf(w, `"%s"`, n.Data)
+ case CommentNode:
+ fmt.Fprintf(w, "<!-- %s -->", n.Data)
+ case DoctypeNode:
+ fmt.Fprintf(w, "<!DOCTYPE %s", n.Data)
+ if n.Attr != nil {
+ var p, s string
+ for _, a := range n.Attr {
+ switch a.Key {
+ case "public":
+ p = a.Val
+ case "system":
+ s = a.Val
+ }
+ }
+ if p != "" || s != "" {
+ fmt.Fprintf(w, ` "%s"`, p)
+ fmt.Fprintf(w, ` "%s"`, s)
+ }
+ }
+ io.WriteString(w, ">")
+ case scopeMarkerNode:
+ return errors.New("unexpected scopeMarkerNode")
+ default:
+ return errors.New("unknown node type")
+ }
+ io.WriteString(w, "\n")
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := dumpLevel(w, c, level+1); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func dump(n *Node) (string, error) {
+ if n == nil || n.FirstChild == nil {
+ return "", nil
+ }
+ var b bytes.Buffer
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := dumpLevel(&b, c, 0); err != nil {
+ return "", err
+ }
+ }
+ return b.String(), nil
+}
+
+const testDataDir = "testdata/webkit/"
+
+func TestParser(t *testing.T) {
+ testFiles, err := filepath.Glob(testDataDir + "*.dat")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tf := range testFiles {
+ f, err := os.Open(tf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ r := bufio.NewReader(f)
+
+ for i := 0; ; i++ {
+ text, want, context, err := readParseTest(r)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = testParseCase(text, want, context)
+
+ if err != nil {
+ t.Errorf("%s test #%d %q, %s", tf, i, text, err)
+ }
+ }
+ }
+}
+
+// testParseCase tests one test case from the test files. If the test does not
+// pass, it returns an error that explains the failure.
+// text is the HTML to be parsed, want is a dump of the correct parse tree,
+// and context is the name of the context node, if any.
+func testParseCase(text, want, context string) (err error) {
+ defer func() {
+ if x := recover(); x != nil {
+ switch e := x.(type) {
+ case error:
+ err = e
+ default:
+ err = fmt.Errorf("%v", e)
+ }
+ }
+ }()
+
+ var doc *Node
+ if context == "" {
+ doc, err = Parse(strings.NewReader(text))
+ if err != nil {
+ return err
+ }
+ } else {
+ contextNode := &Node{
+ Type: ElementNode,
+ DataAtom: atom.Lookup([]byte(context)),
+ Data: context,
+ }
+ nodes, err := ParseFragment(strings.NewReader(text), contextNode)
+ if err != nil {
+ return err
+ }
+ doc = &Node{
+ Type: DocumentNode,
+ }
+ for _, n := range nodes {
+ doc.AppendChild(n)
+ }
+ }
+
+ if err := checkTreeConsistency(doc); err != nil {
+ return err
+ }
+
+ got, err := dump(doc)
+ if err != nil {
+ return err
+ }
+ // Compare the parsed tree to the #document section.
+ if got != want {
+ return fmt.Errorf("got vs want:\n----\n%s----\n%s----", got, want)
+ }
+
+ if renderTestBlacklist[text] || context != "" {
+ return nil
+ }
+
+ // Check that rendering and re-parsing results in an identical tree.
+ pr, pw := io.Pipe()
+ go func() {
+ pw.CloseWithError(Render(pw, doc))
+ }()
+ doc1, err := Parse(pr)
+ if err != nil {
+ return err
+ }
+ got1, err := dump(doc1)
+ if err != nil {
+ return err
+ }
+ if got != got1 {
+ return fmt.Errorf("got vs got1:\n----\n%s----\n%s----", got, got1)
+ }
+
+ return nil
+}
+
+// Some test input result in parse trees are not 'well-formed' despite
+// following the HTML5 recovery algorithms. Rendering and re-parsing such a
+// tree will not result in an exact clone of that tree. We blacklist such
+// inputs from the render test.
+var renderTestBlacklist = map[string]bool{
+ // The second <a> will be reparented to the first <table>'s parent. This
+ // results in an <a> whose parent is an <a>, which is not 'well-formed'.
+ `<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,
+ // The same thing with a <p>:
+ `<p><table></p>`: true,
+ // More cases of <a> being reparented:
+ `<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe`: true,
+ `<a><table><a></table><p><a><div><a>`: true,
+ `<a><table><td><a><table></table><a></tr><a></table><a>`: true,
+ // A similar reparenting situation involving <nobr>:
+ `<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3`: true,
+ // A <plaintext> element is reparented, putting it before a table.
+ // A <plaintext> element can't have anything after it in HTML.
+ `<table><plaintext><td>`: true,
+ `<!doctype html><table><plaintext></plaintext>`: true,
+ `<!doctype html><table><tbody><plaintext></plaintext>`: true,
+ `<!doctype html><table><tbody><tr><plaintext></plaintext>`: true,
+ // A form inside a table inside a form doesn't work either.
+ `<!doctype html><form><table></form><form></table></form>`: true,
+ // A script that ends at EOF may escape its own closing tag when rendered.
+ `<!doctype html><script><!--<script `: true,
+ `<!doctype html><script><!--<script <`: true,
+ `<!doctype html><script><!--<script <a`: true,
+ `<!doctype html><script><!--<script </`: true,
+ `<!doctype html><script><!--<script </s`: true,
+ `<!doctype html><script><!--<script </script`: true,
+ `<!doctype html><script><!--<script </scripta`: true,
+ `<!doctype html><script><!--<script -`: true,
+ `<!doctype html><script><!--<script -a`: true,
+ `<!doctype html><script><!--<script -<`: true,
+ `<!doctype html><script><!--<script --`: true,
+ `<!doctype html><script><!--<script --a`: true,
+ `<!doctype html><script><!--<script --<`: true,
+ `<script><!--<script `: true,
+ `<script><!--<script <a`: true,
+ `<script><!--<script </script`: true,
+ `<script><!--<script </scripta`: true,
+ `<script><!--<script -`: true,
+ `<script><!--<script -a`: true,
+ `<script><!--<script --`: true,
+ `<script><!--<script --a`: true,
+ `<script><!--<script <`: true,
+ `<script><!--<script </`: true,
+ `<script><!--<script </s`: true,
+ // Reconstructing the active formatting elements results in a <plaintext>
+ // element that contains an <a> element.
+ `<!doctype html><p><a><plaintext>b`: true,
+}
+
+func TestNodeConsistency(t *testing.T) {
+ // inconsistentNode is a Node whose DataAtom and Data do not agree.
+ inconsistentNode := &Node{
+ Type: ElementNode,
+ DataAtom: atom.Frameset,
+ Data: "table",
+ }
+ _, err := ParseFragment(strings.NewReader("<p>hello</p>"), inconsistentNode)
+ if err == nil {
+ t.Errorf("got nil error, want non-nil")
+ }
+}
+
+func BenchmarkParser(b *testing.B) {
+ buf, err := ioutil.ReadFile("testdata/go1.html")
+ if err != nil {
+ b.Fatalf("could not read testdata/go1.html: %v", err)
+ }
+ b.SetBytes(int64(len(buf)))
+ runtime.GC()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Parse(bytes.NewBuffer(buf))
+ }
+}
diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
new file mode 100644
index 000000000..d34564f49
--- /dev/null
+++ b/vendor/golang.org/x/net/html/render.go
@@ -0,0 +1,271 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ WriteString(string) (int, error)
+}
+
+// Render renders the parse tree n to the given writer.
+//
+// Rendering is done on a 'best effort' basis: calling Parse on the output of
+// Render will always result in something similar to the original tree, but it
+// is not necessarily an exact clone unless the original tree was 'well-formed'.
+// 'Well-formed' is not easily specified; the HTML5 specification is
+// complicated.
+//
+// Calling Parse on arbitrary input typically results in a 'well-formed' parse
+// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
+// For example, in a 'well-formed' parse tree, no <a> element is a child of
+// another <a> element: parsing "<a><a>" results in two sibling elements.
+// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
+// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
+// children; the <a> is reparented to the <table>'s parent. However, calling
+// Parse on "<a><table><a>" does not return an error, but the result has an <a>
+// element with an <a> child, and is therefore not 'well-formed'.
+//
+// Programmatically constructed trees are typically also 'well-formed', but it
+// is possible to construct a tree that looks innocuous but, when rendered and
+// re-parsed, results in a different tree. A simple example is that a solitary
+// text node would become a tree containing <html>, <head> and <body> elements.
+// Another example is that the programmatic equivalent of "a<head>b</head>c"
+// becomes "<html><head><head/><body>abc</body></html>".
+func Render(w io.Writer, n *Node) error {
+ if x, ok := w.(writer); ok {
+ return render(x, n)
+ }
+ buf := bufio.NewWriter(w)
+ if err := render(buf, n); err != nil {
+ return err
+ }
+ return buf.Flush()
+}
+
+// plaintextAbort is returned from render1 when a <plaintext> element
+// has been rendered. No more end tags should be rendered after that.
+var plaintextAbort = errors.New("html: internal error (plaintext abort)")
+
+func render(w writer, n *Node) error {
+ err := render1(w, n)
+ if err == plaintextAbort {
+ err = nil
+ }
+ return err
+}
+
+func render1(w writer, n *Node) error {
+ // Render non-element nodes; these are the easy cases.
+ switch n.Type {
+ case ErrorNode:
+ return errors.New("html: cannot render an ErrorNode node")
+ case TextNode:
+ return escape(w, n.Data)
+ case DocumentNode:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ return nil
+ case ElementNode:
+ // No-op.
+ case CommentNode:
+ if _, err := w.WriteString("<!--"); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ if _, err := w.WriteString("-->"); err != nil {
+ return err
+ }
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("<!DOCTYPE "); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ if n.Attr != nil {
+ var p, s string
+ for _, a := range n.Attr {
+ switch a.Key {
+ case "public":
+ p = a.Val
+ case "system":
+ s = a.Val
+ }
+ }
+ if p != "" {
+ if _, err := w.WriteString(" PUBLIC "); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, p); err != nil {
+ return err
+ }
+ if s != "" {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, s); err != nil {
+ return err
+ }
+ }
+ } else if s != "" {
+ if _, err := w.WriteString(" SYSTEM "); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, s); err != nil {
+ return err
+ }
+ }
+ }
+ return w.WriteByte('>')
+ default:
+ return errors.New("html: unknown node type")
+ }
+
+ // Render the <xxx> opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if a.Namespace != "" {
+ if _, err := w.WriteString(a.Namespace); err != nil {
+ return err
+ }
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if n.FirstChild != nil {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Add initial newline where there is danger of a newline beging ignored.
+ if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
+ switch n.Data {
+ case "pre", "listing", "textarea":
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render any child nodes.
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == TextNode {
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ } else {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+ if n.Data == "plaintext" {
+ // Don't render anything else. <plaintext> must be the
+ // last element in the file, with no closing tag.
+ return plaintextAbort
+ }
+ default:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the </xxx> closing tag.
+ if _, err := w.WriteString("</"); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+// writeQuoted writes s to w surrounded by quotes. Normally it will use double
+// quotes, but if s contains a double quote, it will use single quotes.
+// It is used for writing the identifiers in a doctype declaration.
+// In valid HTML, they can't contain both types of quotes.
+func writeQuoted(w writer, s string) error {
+ var q byte = '"'
+ if strings.Contains(s, `"`) {
+ q = '\''
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(s); err != nil {
+ return err
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Section 12.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "command": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true,
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/vendor/golang.org/x/net/html/render_test.go b/vendor/golang.org/x/net/html/render_test.go
new file mode 100644
index 000000000..11da54b31
--- /dev/null
+++ b/vendor/golang.org/x/net/html/render_test.go
@@ -0,0 +1,156 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestRenderer(t *testing.T) {
+ nodes := [...]*Node{
+ 0: {
+ Type: ElementNode,
+ Data: "html",
+ },
+ 1: {
+ Type: ElementNode,
+ Data: "head",
+ },
+ 2: {
+ Type: ElementNode,
+ Data: "body",
+ },
+ 3: {
+ Type: TextNode,
+ Data: "0<1",
+ },
+ 4: {
+ Type: ElementNode,
+ Data: "p",
+ Attr: []Attribute{
+ {
+ Key: "id",
+ Val: "A",
+ },
+ {
+ Key: "foo",
+ Val: `abc"def`,
+ },
+ },
+ },
+ 5: {
+ Type: TextNode,
+ Data: "2",
+ },
+ 6: {
+ Type: ElementNode,
+ Data: "b",
+ Attr: []Attribute{
+ {
+ Key: "empty",
+ Val: "",
+ },
+ },
+ },
+ 7: {
+ Type: TextNode,
+ Data: "3",
+ },
+ 8: {
+ Type: ElementNode,
+ Data: "i",
+ Attr: []Attribute{
+ {
+ Key: "backslash",
+ Val: `\`,
+ },
+ },
+ },
+ 9: {
+ Type: TextNode,
+ Data: "&4",
+ },
+ 10: {
+ Type: TextNode,
+ Data: "5",
+ },
+ 11: {
+ Type: ElementNode,
+ Data: "blockquote",
+ },
+ 12: {
+ Type: ElementNode,
+ Data: "br",
+ },
+ 13: {
+ Type: TextNode,
+ Data: "6",
+ },
+ }
+
+ // Build a tree out of those nodes, based on a textual representation.
+ // Only the ".\t"s are significant. The trailing HTML-like text is
+ // just commentary. The "0:" prefixes are for easy cross-reference with
+ // the nodes array.
+ treeAsText := [...]string{
+ 0: `<html>`,
+ 1: `. <head>`,
+ 2: `. <body>`,
+ 3: `. . "0&lt;1"`,
+ 4: `. . <p id="A" foo="abc&#34;def">`,
+ 5: `. . . "2"`,
+ 6: `. . . <b empty="">`,
+ 7: `. . . . "3"`,
+ 8: `. . . <i backslash="\">`,
+ 9: `. . . . "&amp;4"`,
+ 10: `. . "5"`,
+ 11: `. . <blockquote>`,
+ 12: `. . <br>`,
+ 13: `. . "6"`,
+ }
+ if len(nodes) != len(treeAsText) {
+ t.Fatal("len(nodes) != len(treeAsText)")
+ }
+ var stack [8]*Node
+ for i, line := range treeAsText {
+ level := 0
+ for line[0] == '.' {
+ // Strip a leading ".\t".
+ line = line[2:]
+ level++
+ }
+ n := nodes[i]
+ if level == 0 {
+ if stack[0] != nil {
+ t.Fatal("multiple root nodes")
+ }
+ stack[0] = n
+ } else {
+ stack[level-1].AppendChild(n)
+ stack[level] = n
+ for i := level + 1; i < len(stack); i++ {
+ stack[i] = nil
+ }
+ }
+ // At each stage of tree construction, we check all nodes for consistency.
+ for j, m := range nodes {
+ if err := checkNodeConsistency(m); err != nil {
+ t.Fatalf("i=%d, j=%d: %v", i, j, err)
+ }
+ }
+ }
+
+ want := `<html><head></head><body>0&lt;1<p id="A" foo="abc&#34;def">` +
+ `2<b empty="">3</b><i backslash="\">&amp;4</i></p>` +
+ `5<blockquote></blockquote><br/>6</body></html>`
+ b := new(bytes.Buffer)
+ if err := Render(b, nodes[0]); err != nil {
+ t.Fatal(err)
+ }
+ if got := b.String(); got != want {
+ t.Errorf("got vs want:\n%s\n%s\n", got, want)
+ }
+}
diff --git a/vendor/golang.org/x/net/html/testdata/go1.html b/vendor/golang.org/x/net/html/testdata/go1.html
new file mode 100644
index 000000000..d238257c3
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/go1.html
@@ -0,0 +1,2237 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+
+ <title>Go 1 Release Notes - The Go Programming Language</title>
+
+<link type="text/css" rel="stylesheet" href="/doc/style.css">
+<script type="text/javascript" src="/doc/godocs.js"></script>
+
+<link rel="search" type="application/opensearchdescription+xml" title="godoc" href="/opensearch.xml" />
+
+<script type="text/javascript">
+var _gaq = _gaq || [];
+_gaq.push(["_setAccount", "UA-11222381-2"]);
+_gaq.push(["_trackPageview"]);
+</script>
+</head>
+<body>
+
+<div id="topbar"><div class="container wide">
+
+<form method="GET" action="/search">
+<div id="menu">
+<a href="/doc/">Documents</a>
+<a href="/ref/">References</a>
+<a href="/pkg/">Packages</a>
+<a href="/project/">The Project</a>
+<a href="/help/">Help</a>
+<input type="text" id="search" name="q" class="inactive" value="Search">
+</div>
+<div id="heading"><a href="/">The Go Programming Language</a></div>
+</form>
+
+</div></div>
+
+<div id="page" class="wide">
+
+
+ <div id="plusone"><g:plusone size="small" annotation="none"></g:plusone></div>
+ <h1>Go 1 Release Notes</h1>
+
+
+
+
+<div id="nav"></div>
+
+
+
+
+<h2 id="introduction">Introduction to Go 1</h2>
+
+<p>
+Go version 1, Go 1 for short, defines a language and a set of core libraries
+that provide a stable foundation for creating reliable products, projects, and
+publications.
+</p>
+
+<p>
+The driving motivation for Go 1 is stability for its users. People should be able to
+write Go programs and expect that they will continue to compile and run without
+change, on a time scale of years, including in production environments such as
+Google App Engine. Similarly, people should be able to write books about Go, be
+able to say which version of Go the book is describing, and have that version
+number still be meaningful much later.
+</p>
+
+<p>
+Code that compiles in Go 1 should, with few exceptions, continue to compile and
+run throughout the lifetime of that version, even as we issue updates and bug
+fixes such as Go version 1.1, 1.2, and so on. Other than critical fixes, changes
+made to the language and library for subsequent releases of Go 1 may
+add functionality but will not break existing Go 1 programs.
+<a href="go1compat.html">The Go 1 compatibility document</a>
+explains the compatibility guidelines in more detail.
+</p>
+
+<p>
+Go 1 is a representation of Go as it used today, not a wholesale rethinking of
+the language. We avoided designing new features and instead focused on cleaning
+up problems and inconsistencies and improving portability. There are a number
+changes to the Go language and packages that we had considered for some time and
+prototyped but not released primarily because they are significant and
+backwards-incompatible. Go 1 was an opportunity to get them out, which is
+helpful for the long term, but also means that Go 1 introduces incompatibilities
+for old programs. Fortunately, the <code>go</code> <code>fix</code> tool can
+automate much of the work needed to bring programs up to the Go 1 standard.
+</p>
+
+<p>
+This document outlines the major changes in Go 1 that will affect programmers
+updating existing code; its reference point is the prior release, r60 (tagged as
+r60.3). It also explains how to update code from r60 to run under Go 1.
+</p>
+
+<h2 id="language">Changes to the language</h2>
+
+<h3 id="append">Append</h3>
+
+<p>
+The <code>append</code> predeclared variadic function makes it easy to grow a slice
+by adding elements to the end.
+A common use is to add bytes to the end of a byte slice when generating output.
+However, <code>append</code> did not provide a way to append a string to a <code>[]byte</code>,
+which is another common case.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/greeting := ..byte/` `/append.*hello/`}}
+--> greeting := []byte{}
+ greeting = append(greeting, []byte(&#34;hello &#34;)...)</pre>
+
+<p>
+By analogy with the similar property of <code>copy</code>, Go 1
+permits a string to be appended (byte-wise) directly to a byte
+slice, reducing the friction between strings and byte slices.
+The conversion is no longer necessary:
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/append.*world/`}}
+--> greeting = append(greeting, &#34;world&#34;...)</pre>
+
+<p>
+<em>Updating</em>:
+This is a new feature, so existing code needs no changes.
+</p>
+
+<h3 id="close">Close</h3>
+
+<p>
+The <code>close</code> predeclared function provides a mechanism
+for a sender to signal that no more values will be sent.
+It is important to the implementation of <code>for</code> <code>range</code>
+loops over channels and is helpful in other situations.
+Partly by design and partly because of race conditions that can occur otherwise,
+it is intended for use only by the goroutine sending on the channel,
+not by the goroutine receiving data.
+However, before Go 1 there was no compile-time checking that <code>close</code>
+was being used correctly.
+</p>
+
+<p>
+To close this gap, at least in part, Go 1 disallows <code>close</code> on receive-only channels.
+Attempting to close such a channel is a compile-time error.
+</p>
+
+<pre>
+ var c chan int
+ var csend chan&lt;- int = c
+ var crecv &lt;-chan int = c
+ close(c) // legal
+ close(csend) // legal
+ close(crecv) // illegal
+</pre>
+
+<p>
+<em>Updating</em>:
+Existing code that attempts to close a receive-only channel was
+erroneous even before Go 1 and should be fixed. The compiler will
+now reject such code.
+</p>
+
+<h3 id="literals">Composite literals</h3>
+
+<p>
+In Go 1, a composite literal of array, slice, or map type can elide the
+type specification for the elements' initializers if they are of pointer type.
+All four of the initializations in this example are legal; the last one was illegal before Go 1.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/type Date struct/` `/STOP/`}}
+--> type Date struct {
+ month string
+ day int
+ }
+ <span class="comment">// Struct values, fully qualified; always legal.</span>
+ holiday1 := []Date{
+ Date{&#34;Feb&#34;, 14},
+ Date{&#34;Nov&#34;, 11},
+ Date{&#34;Dec&#34;, 25},
+ }
+ <span class="comment">// Struct values, type name elided; always legal.</span>
+ holiday2 := []Date{
+ {&#34;Feb&#34;, 14},
+ {&#34;Nov&#34;, 11},
+ {&#34;Dec&#34;, 25},
+ }
+ <span class="comment">// Pointers, fully qualified, always legal.</span>
+ holiday3 := []*Date{
+ &amp;Date{&#34;Feb&#34;, 14},
+ &amp;Date{&#34;Nov&#34;, 11},
+ &amp;Date{&#34;Dec&#34;, 25},
+ }
+ <span class="comment">// Pointers, type name elided; legal in Go 1.</span>
+ holiday4 := []*Date{
+ {&#34;Feb&#34;, 14},
+ {&#34;Nov&#34;, 11},
+ {&#34;Dec&#34;, 25},
+ }</pre>
+
+<p>
+<em>Updating</em>:
+This change has no effect on existing code, but the command
+<code>gofmt</code> <code>-s</code> applied to existing source
+will, among other things, elide explicit element types wherever permitted.
+</p>
+
+
+<h3 id="init">Goroutines during init</h3>
+
+<p>
+The old language defined that <code>go</code> statements executed during initialization created goroutines but that they did not begin to run until initialization of the entire program was complete.
+This introduced clumsiness in many places and, in effect, limited the utility
+of the <code>init</code> construct:
+if it was possible for another package to use the library during initialization, the library
+was forced to avoid goroutines.
+This design was done for reasons of simplicity and safety but,
+as our confidence in the language grew, it seemed unnecessary.
+Running goroutines during initialization is no more complex or unsafe than running them during normal execution.
+</p>
+
+<p>
+In Go 1, code that uses goroutines can be called from
+<code>init</code> routines and global initialization expressions
+without introducing a deadlock.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/PackageGlobal/` `/^}/`}}
+-->var PackageGlobal int
+
+func init() {
+ c := make(chan int)
+ go initializationFunction(c)
+ PackageGlobal = &lt;-c
+}</pre>
+
+<p>
+<em>Updating</em>:
+This is a new feature, so existing code needs no changes,
+although it's possible that code that depends on goroutines not starting before <code>main</code> will break.
+There was no such code in the standard repository.
+</p>
+
+<h3 id="rune">The rune type</h3>
+
+<p>
+The language spec allows the <code>int</code> type to be 32 or 64 bits wide, but current implementations set <code>int</code> to 32 bits even on 64-bit platforms.
+It would be preferable to have <code>int</code> be 64 bits on 64-bit platforms.
+(There are important consequences for indexing large slices.)
+However, this change would waste space when processing Unicode characters with
+the old language because the <code>int</code> type was also used to hold Unicode code points: each code point would waste an extra 32 bits of storage if <code>int</code> grew from 32 bits to 64.
+</p>
+
+<p>
+To make changing to 64-bit <code>int</code> feasible,
+Go 1 introduces a new basic type, <code>rune</code>, to represent
+individual Unicode code points.
+It is an alias for <code>int32</code>, analogous to <code>byte</code>
+as an alias for <code>uint8</code>.
+</p>
+
+<p>
+Character literals such as <code>'a'</code>, <code>'語'</code>, and <code>'\u0345'</code>
+now have default type <code>rune</code>,
+analogous to <code>1.0</code> having default type <code>float64</code>.
+A variable initialized to a character constant will therefore
+have type <code>rune</code> unless otherwise specified.
+</p>
+
+<p>
+Libraries have been updated to use <code>rune</code> rather than <code>int</code>
+when appropriate. For instance, the functions <code>unicode.ToLower</code> and
+relatives now take and return a <code>rune</code>.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/STARTRUNE/` `/ENDRUNE/`}}
+--> delta := &#39;δ&#39; <span class="comment">// delta has type rune.</span>
+ var DELTA rune
+ DELTA = unicode.ToUpper(delta)
+ epsilon := unicode.ToLower(DELTA + 1)
+ if epsilon != &#39;δ&#39;+1 {
+ log.Fatal(&#34;inconsistent casing for Greek&#34;)
+ }</pre>
+
+<p>
+<em>Updating</em>:
+Most source code will be unaffected by this because the type inference from
+<code>:=</code> initializers introduces the new type silently, and it propagates
+from there.
+Some code may get type errors that a trivial conversion will resolve.
+</p>
+
+<h3 id="error">The error type</h3>
+
+<p>
+Go 1 introduces a new built-in type, <code>error</code>, which has the following definition:
+</p>
+
+<pre>
+ type error interface {
+ Error() string
+ }
+</pre>
+
+<p>
+Since the consequences of this type are all in the package library,
+it is discussed <a href="#errors">below</a>.
+</p>
+
+<h3 id="delete">Deleting from maps</h3>
+
+<p>
+In the old language, to delete the entry with key <code>k</code> from map <code>m</code>, one wrote the statement,
+</p>
+
+<pre>
+ m[k] = value, false
+</pre>
+
+<p>
+This syntax was a peculiar special case, the only two-to-one assignment.
+It required passing a value (usually ignored) that is evaluated but discarded,
+plus a boolean that was nearly always the constant <code>false</code>.
+It did the job but was odd and a point of contention.
+</p>
+
+<p>
+In Go 1, that syntax has gone; instead there is a new built-in
+function, <code>delete</code>. The call
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/delete\(m, k\)/`}}
+--> delete(m, k)</pre>
+
+<p>
+will delete the map entry retrieved by the expression <code>m[k]</code>.
+There is no return value. Deleting a non-existent entry is a no-op.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will convert expressions of the form <code>m[k] = value,
+false</code> into <code>delete(m, k)</code> when it is clear that
+the ignored value can be safely discarded from the program and
+<code>false</code> refers to the predefined boolean constant.
+The fix tool
+will flag other uses of the syntax for inspection by the programmer.
+</p>
+
+<h3 id="iteration">Iterating in maps</h3>
+
+<p>
+The old language specification did not define the order of iteration for maps,
+and in practice it differed across hardware platforms.
+This caused tests that iterated over maps to be fragile and non-portable, with the
+unpleasant property that a test might always pass on one machine but break on another.
+</p>
+
+<p>
+In Go 1, the order in which elements are visited when iterating
+over a map using a <code>for</code> <code>range</code> statement
+is defined to be unpredictable, even if the same loop is run multiple
+times with the same map.
+Code should not assume that the elements are visited in any particular order.
+</p>
+
+<p>
+This change means that code that depends on iteration order is very likely to break early and be fixed long before it becomes a problem.
+Just as important, it allows the map implementation to ensure better map balancing even when programs are using range loops to select an element from a map.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/Sunday/` `/^ }/`}}
+--> m := map[string]int{&#34;Sunday&#34;: 0, &#34;Monday&#34;: 1}
+ for name, value := range m {
+ <span class="comment">// This loop should not assume Sunday will be visited first.</span>
+ f(name, value)
+ }</pre>
+
+<p>
+<em>Updating</em>:
+This is one change where tools cannot help. Most existing code
+will be unaffected, but some programs may break or misbehave; we
+recommend manual checking of all range statements over maps to
+verify they do not depend on iteration order. There were a few such
+examples in the standard repository; they have been fixed.
+Note that it was already incorrect to depend on the iteration order, which
+was unspecified. This change codifies the unpredictability.
+</p>
+
+<h3 id="multiple_assignment">Multiple assignment</h3>
+
+<p>
+The language specification has long guaranteed that in assignments
+the right-hand-side expressions are all evaluated before any left-hand-side expressions are assigned.
+To guarantee predictable behavior,
+Go 1 refines the specification further.
+</p>
+
+<p>
+If the left-hand side of the assignment
+statement contains expressions that require evaluation, such as
+function calls or array indexing operations, these will all be done
+using the usual left-to-right rule before any variables are assigned
+their value. Once everything is evaluated, the actual assignments
+proceed in left-to-right order.
+</p>
+
+<p>
+These examples illustrate the behavior.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/sa :=/` `/then sc.0. = 2/`}}
+--> sa := []int{1, 2, 3}
+ i := 0
+ i, sa[i] = 1, 2 <span class="comment">// sets i = 1, sa[0] = 2</span>
+
+ sb := []int{1, 2, 3}
+ j := 0
+ sb[j], j = 2, 1 <span class="comment">// sets sb[0] = 2, j = 1</span>
+
+ sc := []int{1, 2, 3}
+ sc[0], sc[0] = 1, 2 <span class="comment">// sets sc[0] = 1, then sc[0] = 2 (so sc[0] = 2 at end)</span></pre>
+
+<p>
+<em>Updating</em>:
+This is one change where tools cannot help, but breakage is unlikely.
+No code in the standard repository was broken by this change, and code
+that depended on the previous unspecified behavior was already incorrect.
+</p>
+
+<h3 id="shadowing">Returns and shadowed variables</h3>
+
+<p>
+A common mistake is to use <code>return</code> (without arguments) after an assignment to a variable that has the same name as a result variable but is not the same variable.
+This situation is called <em>shadowing</em>: the result variable has been shadowed by another variable with the same name declared in an inner scope.
+</p>
+
+<p>
+In functions with named return values,
+the Go 1 compilers disallow return statements without arguments if any of the named return values is shadowed at the point of the return statement.
+(It isn't part of the specification, because this is one area we are still exploring;
+the situation is analogous to the compilers rejecting functions that do not end with an explicit return statement.)
+</p>
+
+<p>
+This function implicitly returns a shadowed return value and will be rejected by the compiler:
+</p>
+
+<pre>
+ func Bug() (i, j, k int) {
+ for i = 0; i &lt; 5; i++ {
+ for j := 0; j &lt; 5; j++ { // Redeclares j.
+ k += i*j
+ if k > 100 {
+ return // Rejected: j is shadowed here.
+ }
+ }
+ }
+ return // OK: j is not shadowed here.
+ }
+</pre>
+
+<p>
+<em>Updating</em>:
+Code that shadows return values in this way will be rejected by the compiler and will need to be fixed by hand.
+The few cases that arose in the standard repository were mostly bugs.
+</p>
+
+<h3 id="unexported">Copying structs with unexported fields</h3>
+
+<p>
+The old language did not allow a package to make a copy of a struct value containing unexported fields belonging to a different package.
+There was, however, a required exception for a method receiver;
+also, the implementations of <code>copy</code> and <code>append</code> have never honored the restriction.
+</p>
+
+<p>
+Go 1 will allow packages to copy struct values containing unexported fields from other packages.
+Besides resolving the inconsistency,
+this change admits a new kind of API: a package can return an opaque value without resorting to a pointer or interface.
+The new implementations of <code>time.Time</code> and
+<code>reflect.Value</code> are examples of types taking advantage of this new property.
+</p>
+
+<p>
+As an example, if package <code>p</code> includes the definitions,
+</p>
+
+<pre>
+ type Struct struct {
+ Public int
+ secret int
+ }
+ func NewStruct(a int) Struct { // Note: not a pointer.
+ return Struct{a, f(a)}
+ }
+ func (s Struct) String() string {
+ return fmt.Sprintf("{%d (secret %d)}", s.Public, s.secret)
+ }
+</pre>
+
+<p>
+a package that imports <code>p</code> can assign and copy values of type
+<code>p.Struct</code> at will.
+Behind the scenes the unexported fields will be assigned and copied just
+as if they were exported,
+but the client code will never be aware of them. The code
+</p>
+
+<pre>
+ import "p"
+
+ myStruct := p.NewStruct(23)
+ copyOfMyStruct := myStruct
+ fmt.Println(myStruct, copyOfMyStruct)
+</pre>
+
+<p>
+will show that the secret field of the struct has been copied to the new value.
+</p>
+
+<p>
+<em>Updating</em>:
+This is a new feature, so existing code needs no changes.
+</p>
+
+<h3 id="equality">Equality</h3>
+
+<p>
+Before Go 1, the language did not define equality on struct and array values.
+This meant,
+among other things, that structs and arrays could not be used as map keys.
+On the other hand, Go did define equality on function and map values.
+Function equality was problematic in the presence of closures
+(when are two closures equal?)
+while map equality compared pointers, not the maps' content, which was usually
+not what the user would want.
+</p>
+
+<p>
+Go 1 addressed these issues.
+First, structs and arrays can be compared for equality and inequality
+(<code>==</code> and <code>!=</code>),
+and therefore be used as map keys,
+provided they are composed from elements for which equality is also defined,
+using element-wise comparison.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/type Day struct/` `/Printf/`}}
+--> type Day struct {
+ long string
+ short string
+ }
+ Christmas := Day{&#34;Christmas&#34;, &#34;XMas&#34;}
+ Thanksgiving := Day{&#34;Thanksgiving&#34;, &#34;Turkey&#34;}
+ holiday := map[Day]bool{
+ Christmas: true,
+ Thanksgiving: true,
+ }
+ fmt.Printf(&#34;Christmas is a holiday: %t\n&#34;, holiday[Christmas])</pre>
+
+<p>
+Second, Go 1 removes the definition of equality for function values,
+except for comparison with <code>nil</code>.
+Finally, map equality is gone too, also except for comparison with <code>nil</code>.
+</p>
+
+<p>
+Note that equality is still undefined for slices, for which the
+calculation is in general infeasible. Also note that the ordered
+comparison operators (<code>&lt;</code> <code>&lt;=</code>
+<code>&gt;</code> <code>&gt;=</code>) are still undefined for
+structs and arrays.
+
+<p>
+<em>Updating</em>:
+Struct and array equality is a new feature, so existing code needs no changes.
+Existing code that depends on function or map equality will be
+rejected by the compiler and will need to be fixed by hand.
+Few programs will be affected, but the fix may require some
+redesign.
+</p>
+
+<h2 id="packages">The package hierarchy</h2>
+
+<p>
+Go 1 addresses many deficiencies in the old standard library and
+cleans up a number of packages, making them more internally consistent
+and portable.
+</p>
+
+<p>
+This section describes how the packages have been rearranged in Go 1.
+Some have moved, some have been renamed, some have been deleted.
+New packages are described in later sections.
+</p>
+
+<h3 id="hierarchy">The package hierarchy</h3>
+
+<p>
+Go 1 has a rearranged package hierarchy that groups related items
+into subdirectories. For instance, <code>utf8</code> and
+<code>utf16</code> now occupy subdirectories of <code>unicode</code>.
+Also, <a href="#subrepo">some packages</a> have moved into
+subrepositories of
+<a href="http://code.google.com/p/go"><code>code.google.com/p/go</code></a>
+while <a href="#deleted">others</a> have been deleted outright.
+</p>
+
+<table class="codetable" frame="border" summary="Moved packages">
+<colgroup align="left" width="60%"></colgroup>
+<colgroup align="left" width="40%"></colgroup>
+<tr>
+<th align="left">Old path</th>
+<th align="left">New path</th>
+</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>asn1</td> <td>encoding/asn1</td></tr>
+<tr><td>csv</td> <td>encoding/csv</td></tr>
+<tr><td>gob</td> <td>encoding/gob</td></tr>
+<tr><td>json</td> <td>encoding/json</td></tr>
+<tr><td>xml</td> <td>encoding/xml</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>exp/template/html</td> <td>html/template</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>big</td> <td>math/big</td></tr>
+<tr><td>cmath</td> <td>math/cmplx</td></tr>
+<tr><td>rand</td> <td>math/rand</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>http</td> <td>net/http</td></tr>
+<tr><td>http/cgi</td> <td>net/http/cgi</td></tr>
+<tr><td>http/fcgi</td> <td>net/http/fcgi</td></tr>
+<tr><td>http/httptest</td> <td>net/http/httptest</td></tr>
+<tr><td>http/pprof</td> <td>net/http/pprof</td></tr>
+<tr><td>mail</td> <td>net/mail</td></tr>
+<tr><td>rpc</td> <td>net/rpc</td></tr>
+<tr><td>rpc/jsonrpc</td> <td>net/rpc/jsonrpc</td></tr>
+<tr><td>smtp</td> <td>net/smtp</td></tr>
+<tr><td>url</td> <td>net/url</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>exec</td> <td>os/exec</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>scanner</td> <td>text/scanner</td></tr>
+<tr><td>tabwriter</td> <td>text/tabwriter</td></tr>
+<tr><td>template</td> <td>text/template</td></tr>
+<tr><td>template/parse</td> <td>text/template/parse</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>utf8</td> <td>unicode/utf8</td></tr>
+<tr><td>utf16</td> <td>unicode/utf16</td></tr>
+</table>
+
+<p>
+Note that the package names for the old <code>cmath</code> and
+<code>exp/template/html</code> packages have changed to <code>cmplx</code>
+and <code>template</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update all imports and package renames for packages that
+remain inside the standard repository. Programs that import packages
+that are no longer in the standard repository will need to be edited
+by hand.
+</p>
+
+<h3 id="exp">The package tree exp</h3>
+
+<p>
+Because they are not standardized, the packages under the <code>exp</code> directory will not be available in the
+standard Go 1 release distributions, although they will be available in source code form
+in <a href="http://code.google.com/p/go/">the repository</a> for
+developers who wish to use them.
+</p>
+
+<p>
+Several packages have moved under <code>exp</code> at the time of Go 1's release:
+</p>
+
+<ul>
+<li><code>ebnf</code></li>
+<li><code>html</code><sup>&#8224;</sup></li>
+<li><code>go/types</code></li>
+</ul>
+
+<p>
+(<sup>&#8224;</sup>The <code>EscapeString</code> and <code>UnescapeString</code> types remain
+in package <code>html</code>.)
+</p>
+
+<p>
+All these packages are available under the same names, with the prefix <code>exp/</code>: <code>exp/ebnf</code> etc.
+</p>
+
+<p>
+Also, the <code>utf8.String</code> type has been moved to its own package, <code>exp/utf8string</code>.
+</p>
+
+<p>
+Finally, the <code>gotype</code> command now resides in <code>exp/gotype</code>, while
+<code>ebnflint</code> is now in <code>exp/ebnflint</code>.
+If they are installed, they now reside in <code>$GOROOT/bin/tool</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses packages in <code>exp</code> will need to be updated by hand,
+or else compiled from an installation that has <code>exp</code> available.
+The <code>go</code> <code>fix</code> tool or the compiler will complain about such uses.
+</p>
+
+<h3 id="old">The package tree old</h3>
+
+<p>
+Because they are deprecated, the packages under the <code>old</code> directory will not be available in the
+standard Go 1 release distributions, although they will be available in source code form for
+developers who wish to use them.
+</p>
+
+<p>
+The packages in their new locations are:
+</p>
+
+<ul>
+<li><code>old/netchan</code></li>
+<li><code>old/regexp</code></li>
+<li><code>old/template</code></li>
+</ul>
+
+<p>
+<em>Updating</em>:
+Code that uses packages now in <code>old</code> will need to be updated by hand,
+or else compiled from an installation that has <code>old</code> available.
+The <code>go</code> <code>fix</code> tool will warn about such uses.
+</p>
+
+<h3 id="deleted">Deleted packages</h3>
+
+<p>
+Go 1 deletes several packages outright:
+</p>
+
+<ul>
+<li><code>container/vector</code></li>
+<li><code>exp/datafmt</code></li>
+<li><code>go/typechecker</code></li>
+<li><code>try</code></li>
+</ul>
+
+<p>
+and also the command <code>gotry</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses <code>container/vector</code> should be updated to use
+slices directly. See
+<a href="http://code.google.com/p/go-wiki/wiki/SliceTricks">the Go
+Language Community Wiki</a> for some suggestions.
+Code that uses the other packages (there should be almost zero) will need to be rethought.
+</p>
+
+<h3 id="subrepo">Packages moving to subrepositories</h3>
+
+<p>
+Go 1 has moved a number of packages into other repositories, usually sub-repositories of
+<a href="http://code.google.com/p/go/">the main Go repository</a>.
+This table lists the old and new import paths:
+
+<table class="codetable" frame="border" summary="Sub-repositories">
+<colgroup align="left" width="40%"></colgroup>
+<colgroup align="left" width="60%"></colgroup>
+<tr>
+<th align="left">Old</th>
+<th align="left">New</th>
+</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>crypto/bcrypt</td> <td>code.google.com/p/go.crypto/bcrypt</tr>
+<tr><td>crypto/blowfish</td> <td>code.google.com/p/go.crypto/blowfish</tr>
+<tr><td>crypto/cast5</td> <td>code.google.com/p/go.crypto/cast5</tr>
+<tr><td>crypto/md4</td> <td>code.google.com/p/go.crypto/md4</tr>
+<tr><td>crypto/ocsp</td> <td>code.google.com/p/go.crypto/ocsp</tr>
+<tr><td>crypto/openpgp</td> <td>code.google.com/p/go.crypto/openpgp</tr>
+<tr><td>crypto/openpgp/armor</td> <td>code.google.com/p/go.crypto/openpgp/armor</tr>
+<tr><td>crypto/openpgp/elgamal</td> <td>code.google.com/p/go.crypto/openpgp/elgamal</tr>
+<tr><td>crypto/openpgp/errors</td> <td>code.google.com/p/go.crypto/openpgp/errors</tr>
+<tr><td>crypto/openpgp/packet</td> <td>code.google.com/p/go.crypto/openpgp/packet</tr>
+<tr><td>crypto/openpgp/s2k</td> <td>code.google.com/p/go.crypto/openpgp/s2k</tr>
+<tr><td>crypto/ripemd160</td> <td>code.google.com/p/go.crypto/ripemd160</tr>
+<tr><td>crypto/twofish</td> <td>code.google.com/p/go.crypto/twofish</tr>
+<tr><td>crypto/xtea</td> <td>code.google.com/p/go.crypto/xtea</tr>
+<tr><td>exp/ssh</td> <td>code.google.com/p/go.crypto/ssh</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>image/bmp</td> <td>code.google.com/p/go.image/bmp</tr>
+<tr><td>image/tiff</td> <td>code.google.com/p/go.image/tiff</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>net/dict</td> <td>code.google.com/p/go.net/dict</tr>
+<tr><td>net/websocket</td> <td>code.google.com/p/go.net/websocket</tr>
+<tr><td>exp/spdy</td> <td>code.google.com/p/go.net/spdy</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>encoding/git85</td> <td>code.google.com/p/go.codereview/git85</tr>
+<tr><td>patch</td> <td>code.google.com/p/go.codereview/patch</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>exp/wingui</td> <td>code.google.com/p/gowingui</tr>
+</table>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update imports of these packages to use the new import paths.
+Installations that depend on these packages will need to install them using
+a <code>go get</code> command.
+</p>
+
+<h2 id="major">Major changes to the library</h2>
+
+<p>
+This section describes significant changes to the core libraries, the ones that
+affect the most programs.
+</p>
+
+<h3 id="errors">The error type and errors package</h3>
+
+<p>
+The placement of <code>os.Error</code> in package <code>os</code> is mostly historical: errors first came up when implementing package <code>os</code>, and they seemed system-related at the time.
+Since then it has become clear that errors are more fundamental than the operating system. For example, it would be nice to use <code>Errors</code> in packages that <code>os</code> depends on, like <code>syscall</code>.
+Also, having <code>Error</code> in <code>os</code> introduces many dependencies on <code>os</code> that would otherwise not exist.
+</p>
+
+<p>
+Go 1 solves these problems by introducing a built-in <code>error</code> interface type and a separate <code>errors</code> package (analogous to <code>bytes</code> and <code>strings</code>) that contains utility functions.
+It replaces <code>os.NewError</code> with
+<a href="/pkg/errors/#New"><code>errors.New</code></a>,
+giving errors a more central place in the environment.
+</p>
+
+<p>
+So the widely-used <code>String</code> method does not cause accidental satisfaction
+of the <code>error</code> interface, the <code>error</code> interface uses instead
+the name <code>Error</code> for that method:
+</p>
+
+<pre>
+ type error interface {
+ Error() string
+ }
+</pre>
+
+<p>
+The <code>fmt</code> library automatically invokes <code>Error</code>, as it already
+does for <code>String</code>, for easy printing of error values.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/START ERROR EXAMPLE/` `/END ERROR EXAMPLE/`}}
+-->type SyntaxError struct {
+ File string
+ Line int
+ Message string
+}
+
+func (se *SyntaxError) Error() string {
+ return fmt.Sprintf(&#34;%s:%d: %s&#34;, se.File, se.Line, se.Message)
+}</pre>
+
+<p>
+All standard packages have been updated to use the new interface; the old <code>os.Error</code> is gone.
+</p>
+
+<p>
+A new package, <a href="/pkg/errors/"><code>errors</code></a>, contains the function
+</p>
+
+<pre>
+func New(text string) error
+</pre>
+
+<p>
+to turn a string into an error. It replaces the old <code>os.NewError</code>.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/ErrSyntax/`}}
+--> var ErrSyntax = errors.New(&#34;syntax error&#34;)</pre>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
+Code that defines error types with a <code>String</code> method will need to be updated
+by hand to rename the methods to <code>Error</code>.
+</p>
+
+<h3 id="errno">System call errors</h3>
+
+<p>
+The old <code>syscall</code> package, which predated <code>os.Error</code>
+(and just about everything else),
+returned errors as <code>int</code> values.
+In turn, the <code>os</code> package forwarded many of these errors, such
+as <code>EINVAL</code>, but using a different set of errors on each platform.
+This behavior was unpleasant and unportable.
+</p>
+
+<p>
+In Go 1, the
+<a href="/pkg/syscall/"><code>syscall</code></a>
+package instead returns an <code>error</code> for system call errors.
+On Unix, the implementation is done by a
+<a href="/pkg/syscall/#Errno"><code>syscall.Errno</code></a> type
+that satisfies <code>error</code> and replaces the old <code>os.Errno</code>.
+</p>
+
+<p>
+The changes affecting <code>os.EINVAL</code> and relatives are
+described <a href="#os">elsewhere</a>.
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
+Regardless, most code should use the <code>os</code> package
+rather than <code>syscall</code> and so will be unaffected.
+</p>
+
+<h3 id="time">Time</h3>
+
+<p>
+Time is always a challenge to support well in a programming language.
+The old Go <code>time</code> package had <code>int64</code> units, no
+real type safety,
+and no distinction between absolute times and durations.
+</p>
+
+<p>
+One of the most sweeping changes in the Go 1 library is therefore a
+complete redesign of the
+<a href="/pkg/time/"><code>time</code></a> package.
+Instead of an integer number of nanoseconds as an <code>int64</code>,
+and a separate <code>*time.Time</code> type to deal with human
+units such as hours and years,
+there are now two fundamental types:
+<a href="/pkg/time/#Time"><code>time.Time</code></a>
+(a value, so the <code>*</code> is gone), which represents a moment in time;
+and <a href="/pkg/time/#Duration"><code>time.Duration</code></a>,
+which represents an interval.
+Both have nanosecond resolution.
+A <code>Time</code> can represent any time into the ancient
+past and remote future, while a <code>Duration</code> can
+span plus or minus only about 290 years.
+There are methods on these types, plus a number of helpful
+predefined constant durations such as <code>time.Second</code>.
+</p>
+
+<p>
+Among the new methods are things like
+<a href="/pkg/time/#Time.Add"><code>Time.Add</code></a>,
+which adds a <code>Duration</code> to a <code>Time</code>, and
+<a href="/pkg/time/#Time.Sub"><code>Time.Sub</code></a>,
+which subtracts two <code>Times</code> to yield a <code>Duration</code>.
+</p>
+
+<p>
+The most important semantic change is that the Unix epoch (Jan 1, 1970) is now
+relevant only for those functions and methods that mention Unix:
+<a href="/pkg/time/#Unix"><code>time.Unix</code></a>
+and the <a href="/pkg/time/#Time.Unix"><code>Unix</code></a>
+and <a href="/pkg/time/#Time.UnixNano"><code>UnixNano</code></a> methods
+of the <code>Time</code> type.
+In particular,
+<a href="/pkg/time/#Now"><code>time.Now</code></a>
+returns a <code>time.Time</code> value rather than, in the old
+API, an integer nanosecond count since the Unix epoch.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/sleepUntil/` `/^}/`}}
+--><span class="comment">// sleepUntil sleeps until the specified time. It returns immediately if it&#39;s too late.</span>
+func sleepUntil(wakeup time.Time) {
+ now := time.Now() <span class="comment">// A Time.</span>
+ if !wakeup.After(now) {
+ return
+ }
+ delta := wakeup.Sub(now) <span class="comment">// A Duration.</span>
+ fmt.Printf(&#34;Sleeping for %.3fs\n&#34;, delta.Seconds())
+ time.Sleep(delta)
+}</pre>
+
+<p>
+The new types, methods, and constants have been propagated through
+all the standard packages that use time, such as <code>os</code> and
+its representation of file time stamps.
+</p>
+
+<p>
+<em>Updating</em>:
+The <code>go</code> <code>fix</code> tool will update many uses of the old <code>time</code> package to use the new
+types and methods, although it does not replace values such as <code>1e9</code>
+representing nanoseconds per second.
+Also, because of type changes in some of the values that arise,
+some of the expressions rewritten by the fix tool may require
+further hand editing; in such cases the rewrite will include
+the correct function or method for the old functionality, but
+may have the wrong type or require further analysis.
+</p>
+
+<h2 id="minor">Minor changes to the library</h2>
+
+<p>
+This section describes smaller changes, such as those to less commonly
+used packages or that affect
+few programs beyond the need to run <code>go</code> <code>fix</code>.
+This category includes packages that are new in Go 1.
+Collectively they improve portability, regularize behavior, and
+make the interfaces more modern and Go-like.
+</p>
+
+<h3 id="archive_zip">The archive/zip package</h3>
+
+<p>
+In Go 1, <a href="/pkg/archive/zip/#Writer"><code>*zip.Writer</code></a> no
+longer has a <code>Write</code> method. Its presence was a mistake.
+</p>
+
+<p>
+<em>Updating</em>:
+What little code is affected will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="bufio">The bufio package</h3>
+
+<p>
+In Go 1, <a href="/pkg/bufio/#NewReaderSize"><code>bufio.NewReaderSize</code></a>
+and
+<a href="/pkg/bufio/#NewWriterSize"><code>bufio.NewWriterSize</code></a>
+functions no longer return an error for invalid sizes.
+If the argument size is too small or invalid, it is adjusted.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update calls that assign the error to _.
+Calls that aren't fixed will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="compress">The compress/flate, compress/gzip and compress/zlib packages</h3>
+
+<p>
+In Go 1, the <code>NewWriterXxx</code> functions in
+<a href="/pkg/compress/flate"><code>compress/flate</code></a>,
+<a href="/pkg/compress/gzip"><code>compress/gzip</code></a> and
+<a href="/pkg/compress/zlib"><code>compress/zlib</code></a>
+all return <code>(*Writer, error)</code> if they take a compression level,
+and <code>*Writer</code> otherwise. Package <code>gzip</code>'s
+<code>Compressor</code> and <code>Decompressor</code> types have been renamed
+to <code>Writer</code> and <code>Reader</code>. Package <code>flate</code>'s
+<code>WrongValueError</code> type has been removed.
+</p>
+
+<p>
+<em>Updating</em>
+Running <code>go</code> <code>fix</code> will update old names and calls that assign the error to _.
+Calls that aren't fixed will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="crypto_aes_des">The crypto/aes and crypto/des packages</h3>
+
+<p>
+In Go 1, the <code>Reset</code> method has been removed. Go does not guarantee
+that memory is not copied and therefore this method was misleading.
+</p>
+
+<p>
+The cipher-specific types <code>*aes.Cipher</code>, <code>*des.Cipher</code>,
+and <code>*des.TripleDESCipher</code> have been removed in favor of
+<code>cipher.Block</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Remove the calls to Reset. Replace uses of the specific cipher types with
+cipher.Block.
+</p>
+
+<h3 id="crypto_elliptic">The crypto/elliptic package</h3>
+
+<p>
+In Go 1, <a href="/pkg/crypto/elliptic/#Curve"><code>elliptic.Curve</code></a>
+has been made an interface to permit alternative implementations. The curve
+parameters have been moved to the
+<a href="/pkg/crypto/elliptic/#CurveParams"><code>elliptic.CurveParams</code></a>
+structure.
+</p>
+
+<p>
+<em>Updating</em>:
+Existing users of <code>*elliptic.Curve</code> will need to change to
+simply <code>elliptic.Curve</code>. Calls to <code>Marshal</code>,
+<code>Unmarshal</code> and <code>GenerateKey</code> are now functions
+in <code>crypto/elliptic</code> that take an <code>elliptic.Curve</code>
+as their first argument.
+</p>
+
+<h3 id="crypto_hmac">The crypto/hmac package</h3>
+
+<p>
+In Go 1, the hash-specific functions, such as <code>hmac.NewMD5</code>, have
+been removed from <code>crypto/hmac</code>. Instead, <code>hmac.New</code> takes
+a function that returns a <code>hash.Hash</code>, such as <code>md5.New</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will perform the needed changes.
+</p>
+
+<h3 id="crypto_x509">The crypto/x509 package</h3>
+
+<p>
+In Go 1, the
+<a href="/pkg/crypto/x509/#CreateCertificate"><code>CreateCertificate</code></a>
+and
+<a href="/pkg/crypto/x509/#CreateCRL"><code>CreateCRL</code></a>
+functions in <code>crypto/x509</code> have been altered to take an
+<code>interface{}</code> where they previously took a <code>*rsa.PublicKey</code>
+or <code>*rsa.PrivateKey</code>. This will allow other public key algorithms
+to be implemented in the future.
+</p>
+
+<p>
+<em>Updating</em>:
+No changes will be needed.
+</p>
+
+<h3 id="encoding_binary">The encoding/binary package</h3>
+
+<p>
+In Go 1, the <code>binary.TotalSize</code> function has been replaced by
+<a href="/pkg/encoding/binary/#Size"><code>Size</code></a>,
+which takes an <code>interface{}</code> argument rather than
+a <code>reflect.Value</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+What little code is affected will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="encoding_xml">The encoding/xml package</h3>
+
+<p>
+In Go 1, the <a href="/pkg/encoding/xml/"><code>xml</code></a> package
+has been brought closer in design to the other marshaling packages such
+as <a href="/pkg/encoding/gob/"><code>encoding/gob</code></a>.
+</p>
+
+<p>
+The old <code>Parser</code> type is renamed
+<a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> and has a new
+<a href="/pkg/encoding/xml/#Decoder.Decode"><code>Decode</code></a> method. An
+<a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a> type was also introduced.
+</p>
+
+<p>
+The functions <a href="/pkg/encoding/xml/#Marshal"><code>Marshal</code></a>
+and <a href="/pkg/encoding/xml/#Unmarshal"><code>Unmarshal</code></a>
+work with <code>[]byte</code> values now. To work with streams,
+use the new <a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a>
+and <a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> types.
+</p>
+
+<p>
+When marshaling or unmarshaling values, the format of supported flags in
+field tags has changed to be closer to the
+<a href="/pkg/encoding/json"><code>json</code></a> package
+(<code>`xml:"name,flag"`</code>). The matching done between field tags, field
+names, and the XML attribute and element names is now case-sensitive.
+The <code>XMLName</code> field tag, if present, must also match the name
+of the XML element being marshaled.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update most uses of the package except for some calls to
+<code>Unmarshal</code>. Special care must be taken with field tags,
+since the fix tool will not update them and if not fixed by hand they will
+misbehave silently in some cases. For example, the old
+<code>"attr"</code> is now written <code>",attr"</code> while plain
+<code>"attr"</code> remains valid but with a different meaning.
+</p>
+
+<h3 id="expvar">The expvar package</h3>
+
+<p>
+In Go 1, the <code>RemoveAll</code> function has been removed.
+The <code>Iter</code> function and Iter method on <code>*Map</code> have
+been replaced by
+<a href="/pkg/expvar/#Do"><code>Do</code></a>
+and
+<a href="/pkg/expvar/#Map.Do"><code>(*Map).Do</code></a>.
+</p>
+
+<p>
+<em>Updating</em>:
+Most code using <code>expvar</code> will not need changing. The rare code that used
+<code>Iter</code> can be updated to pass a closure to <code>Do</code> to achieve the same effect.
+</p>
+
+<h3 id="flag">The flag package</h3>
+
+<p>
+In Go 1, the interface <a href="/pkg/flag/#Value"><code>flag.Value</code></a> has changed slightly.
+The <code>Set</code> method now returns an <code>error</code> instead of
+a <code>bool</code> to indicate success or failure.
+</p>
+
+<p>
+There is also a new kind of flag, <code>Duration</code>, to support argument
+values specifying time intervals.
+Values for such flags must be given units, just as <code>time.Duration</code>
+formats them: <code>10s</code>, <code>1h30m</code>, etc.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/timeout/`}}
+-->var timeout = flag.Duration(&#34;timeout&#34;, 30*time.Second, &#34;how long to wait for completion&#34;)</pre>
+
+<p>
+<em>Updating</em>:
+Programs that implement their own flags will need minor manual fixes to update their
+<code>Set</code> methods.
+The <code>Duration</code> flag is new and affects no existing code.
+</p>
+
+
+<h3 id="go">The go/* packages</h3>
+
+<p>
+Several packages under <code>go</code> have slightly revised APIs.
+</p>
+
+<p>
+A concrete <code>Mode</code> type was introduced for configuration mode flags
+in the packages
+<a href="/pkg/go/scanner/"><code>go/scanner</code></a>,
+<a href="/pkg/go/parser/"><code>go/parser</code></a>,
+<a href="/pkg/go/printer/"><code>go/printer</code></a>, and
+<a href="/pkg/go/doc/"><code>go/doc</code></a>.
+</p>
+
+<p>
+The modes <code>AllowIllegalChars</code> and <code>InsertSemis</code> have been removed
+from the <a href="/pkg/go/scanner/"><code>go/scanner</code></a> package. They were mostly
+useful for scanning text other then Go source files. Instead, the
+<a href="/pkg/text/scanner/"><code>text/scanner</code></a> package should be used
+for that purpose.
+</p>
+
+<p>
+The <a href="/pkg/go/scanner/#ErrorHandler"><code>ErrorHandler</code></a> provided
+to the scanner's <a href="/pkg/go/scanner/#Scanner.Init"><code>Init</code></a> method is
+now simply a function rather than an interface. The <code>ErrorVector</code> type has
+been removed in favor of the (existing) <a href="/pkg/go/scanner/#ErrorList"><code>ErrorList</code></a>
+type, and the <code>ErrorVector</code> methods have been migrated. Instead of embedding
+an <code>ErrorVector</code> in a client of the scanner, now a client should maintain
+an <code>ErrorList</code>.
+</p>
+
+<p>
+The set of parse functions provided by the <a href="/pkg/go/parser/"><code>go/parser</code></a>
+package has been reduced to the primary parse function
+<a href="/pkg/go/parser/#ParseFile"><code>ParseFile</code></a>, and a couple of
+convenience functions <a href="/pkg/go/parser/#ParseDir"><code>ParseDir</code></a>
+and <a href="/pkg/go/parser/#ParseExpr"><code>ParseExpr</code></a>.
+</p>
+
+<p>
+The <a href="/pkg/go/printer/"><code>go/printer</code></a> package supports an additional
+configuration mode <a href="/pkg/go/printer/#Mode"><code>SourcePos</code></a>;
+if set, the printer will emit <code>//line</code> comments such that the generated
+output contains the original source code position information. The new type
+<a href="/pkg/go/printer/#CommentedNode"><code>CommentedNode</code></a> can be
+used to provide comments associated with an arbitrary
+<a href="/pkg/go/ast/#Node"><code>ast.Node</code></a> (until now only
+<a href="/pkg/go/ast/#File"><code>ast.File</code></a> carried comment information).
+</p>
+
+<p>
+The type names of the <a href="/pkg/go/doc/"><code>go/doc</code></a> package have been
+streamlined by removing the <code>Doc</code> suffix: <code>PackageDoc</code>
+is now <code>Package</code>, <code>ValueDoc</code> is <code>Value</code>, etc.
+Also, all types now consistently have a <code>Name</code> field (or <code>Names</code>,
+in the case of type <code>Value</code>) and <code>Type.Factories</code> has become
+<code>Type.Funcs</code>.
+Instead of calling <code>doc.NewPackageDoc(pkg, importpath)</code>,
+documentation for a package is created with:
+</p>
+
+<pre>
+ doc.New(pkg, importpath, mode)
+</pre>
+
+<p>
+where the new <code>mode</code> parameter specifies the operation mode:
+if set to <a href="/pkg/go/doc/#AllDecls"><code>AllDecls</code></a>, all declarations
+(not just exported ones) are considered.
+The function <code>NewFileDoc</code> was removed, and the function
+<code>CommentText</code> has become the method
+<a href="/pkg/go/ast/#Text"><code>Text</code></a> of
+<a href="/pkg/go/ast/#CommentGroup"><code>ast.CommentGroup</code></a>.
+</p>
+
+<p>
+In package <a href="/pkg/go/token/"><code>go/token</code></a>, the
+<a href="/pkg/go/token/#FileSet"><code>token.FileSet</code></a> method <code>Files</code>
+(which originally returned a channel of <code>*token.File</code>s) has been replaced
+with the iterator <a href="/pkg/go/token/#FileSet.Iterate"><code>Iterate</code></a> that
+accepts a function argument instead.
+</p>
+
+<p>
+In package <a href="/pkg/go/build/"><code>go/build</code></a>, the API
+has been nearly completely replaced.
+The package still computes Go package information
+but it does not run the build: the <code>Cmd</code> and <code>Script</code>
+types are gone.
+(To build code, use the new
+<a href="/cmd/go/"><code>go</code></a> command instead.)
+The <code>DirInfo</code> type is now named
+<a href="/pkg/go/build/#Package"><code>Package</code></a>.
+<code>FindTree</code> and <code>ScanDir</code> are replaced by
+<a href="/pkg/go/build/#Import"><code>Import</code></a>
+and
+<a href="/pkg/go/build/#ImportDir"><code>ImportDir</code></a>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses packages in <code>go</code> will have to be updated by hand; the
+compiler will reject incorrect uses. Templates used in conjunction with any of the
+<code>go/doc</code> types may need manual fixes; the renamed fields will lead
+to run-time errors.
+</p>
+
+<h3 id="hash">The hash package</h3>
+
+<p>
+In Go 1, the definition of <a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> includes
+a new method, <code>BlockSize</code>. This new method is used primarily in the
+cryptographic libraries.
+</p>
+
+<p>
+The <code>Sum</code> method of the
+<a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> interface now takes a
+<code>[]byte</code> argument, to which the hash value will be appended.
+The previous behavior can be recreated by adding a <code>nil</code> argument to the call.
+</p>
+
+<p>
+<em>Updating</em>:
+Existing implementations of <code>hash.Hash</code> will need to add a
+<code>BlockSize</code> method. Hashes that process the input one byte at
+a time can implement <code>BlockSize</code> to return 1.
+Running <code>go</code> <code>fix</code> will update calls to the <code>Sum</code> methods of the various
+implementations of <code>hash.Hash</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Since the package's functionality is new, no updating is necessary.
+</p>
+
+<h3 id="http">The http package</h3>
+
+<p>
+In Go 1 the <a href="/pkg/net/http/"><code>http</code></a> package is refactored,
+putting some of the utilities into a
+<a href="/pkg/net/http/httputil/"><code>httputil</code></a> subdirectory.
+These pieces are only rarely needed by HTTP clients.
+The affected items are:
+</p>
+
+<ul>
+<li>ClientConn</li>
+<li>DumpRequest</li>
+<li>DumpRequestOut</li>
+<li>DumpResponse</li>
+<li>NewChunkedReader</li>
+<li>NewChunkedWriter</li>
+<li>NewClientConn</li>
+<li>NewProxyClientConn</li>
+<li>NewServerConn</li>
+<li>NewSingleHostReverseProxy</li>
+<li>ReverseProxy</li>
+<li>ServerConn</li>
+</ul>
+
+<p>
+The <code>Request.RawURL</code> field has been removed; it was a
+historical artifact.
+</p>
+
+<p>
+The <code>Handle</code> and <code>HandleFunc</code>
+functions, and the similarly-named methods of <code>ServeMux</code>,
+now panic if an attempt is made to register the same pattern twice.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update the few programs that are affected except for
+uses of <code>RawURL</code>, which must be fixed by hand.
+</p>
+
+<h3 id="image">The image package</h3>
+
+<p>
+The <a href="/pkg/image/"><code>image</code></a> package has had a number of
+minor changes, rearrangements and renamings.
+</p>
+
+<p>
+Most of the color handling code has been moved into its own package,
+<a href="/pkg/image/color/"><code>image/color</code></a>.
+For the elements that moved, a symmetry arises; for instance,
+each pixel of an
+<a href="/pkg/image/#RGBA"><code>image.RGBA</code></a>
+is a
+<a href="/pkg/image/color/#RGBA"><code>color.RGBA</code></a>.
+</p>
+
+<p>
+The old <code>image/ycbcr</code> package has been folded, with some
+renamings, into the
+<a href="/pkg/image/"><code>image</code></a>
+and
+<a href="/pkg/image/color/"><code>image/color</code></a>
+packages.
+</p>
+
+<p>
+The old <code>image.ColorImage</code> type is still in the <code>image</code>
+package but has been renamed
+<a href="/pkg/image/#Uniform"><code>image.Uniform</code></a>,
+while <code>image.Tiled</code> has been removed.
+</p>
+
+<p>
+This table lists the renamings.
+</p>
+
+<table class="codetable" frame="border" summary="image renames">
+<colgroup align="left" width="50%"></colgroup>
+<colgroup align="left" width="50%"></colgroup>
+<tr>
+<th align="left">Old</th>
+<th align="left">New</th>
+</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>image.Color</td> <td>color.Color</td></tr>
+<tr><td>image.ColorModel</td> <td>color.Model</td></tr>
+<tr><td>image.ColorModelFunc</td> <td>color.ModelFunc</td></tr>
+<tr><td>image.PalettedColorModel</td> <td>color.Palette</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>image.RGBAColor</td> <td>color.RGBA</td></tr>
+<tr><td>image.RGBA64Color</td> <td>color.RGBA64</td></tr>
+<tr><td>image.NRGBAColor</td> <td>color.NRGBA</td></tr>
+<tr><td>image.NRGBA64Color</td> <td>color.NRGBA64</td></tr>
+<tr><td>image.AlphaColor</td> <td>color.Alpha</td></tr>
+<tr><td>image.Alpha16Color</td> <td>color.Alpha16</td></tr>
+<tr><td>image.GrayColor</td> <td>color.Gray</td></tr>
+<tr><td>image.Gray16Color</td> <td>color.Gray16</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>image.RGBAColorModel</td> <td>color.RGBAModel</td></tr>
+<tr><td>image.RGBA64ColorModel</td> <td>color.RGBA64Model</td></tr>
+<tr><td>image.NRGBAColorModel</td> <td>color.NRGBAModel</td></tr>
+<tr><td>image.NRGBA64ColorModel</td> <td>color.NRGBA64Model</td></tr>
+<tr><td>image.AlphaColorModel</td> <td>color.AlphaModel</td></tr>
+<tr><td>image.Alpha16ColorModel</td> <td>color.Alpha16Model</td></tr>
+<tr><td>image.GrayColorModel</td> <td>color.GrayModel</td></tr>
+<tr><td>image.Gray16ColorModel</td> <td>color.Gray16Model</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>ycbcr.RGBToYCbCr</td> <td>color.RGBToYCbCr</td></tr>
+<tr><td>ycbcr.YCbCrToRGB</td> <td>color.YCbCrToRGB</td></tr>
+<tr><td>ycbcr.YCbCrColorModel</td> <td>color.YCbCrModel</td></tr>
+<tr><td>ycbcr.YCbCrColor</td> <td>color.YCbCr</td></tr>
+<tr><td>ycbcr.YCbCr</td> <td>image.YCbCr</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>ycbcr.SubsampleRatio444</td> <td>image.YCbCrSubsampleRatio444</td></tr>
+<tr><td>ycbcr.SubsampleRatio422</td> <td>image.YCbCrSubsampleRatio422</td></tr>
+<tr><td>ycbcr.SubsampleRatio420</td> <td>image.YCbCrSubsampleRatio420</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>image.ColorImage</td> <td>image.Uniform</td></tr>
+</table>
+
+<p>
+The image package's <code>New</code> functions
+(<a href="/pkg/image/#NewRGBA"><code>NewRGBA</code></a>,
+<a href="/pkg/image/#NewRGBA64"><code>NewRGBA64</code></a>, etc.)
+take an <a href="/pkg/image/#Rectangle"><code>image.Rectangle</code></a> as an argument
+instead of four integers.
+</p>
+
+<p>
+Finally, there are new predefined <code>color.Color</code> variables
+<a href="/pkg/image/color/#Black"><code>color.Black</code></a>,
+<a href="/pkg/image/color/#White"><code>color.White</code></a>,
+<a href="/pkg/image/color/#Opaque"><code>color.Opaque</code></a>
+and
+<a href="/pkg/image/color/#Transparent"><code>color.Transparent</code></a>.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
+</p>
+
+<h3 id="log_syslog">The log/syslog package</h3>
+
+<p>
+In Go 1, the <a href="/pkg/log/syslog/#NewLogger"><code>syslog.NewLogger</code></a>
+function returns an error as well as a <code>log.Logger</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+What little code is affected will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="mime">The mime package</h3>
+
+<p>
+In Go 1, the <a href="/pkg/mime/#FormatMediaType"><code>FormatMediaType</code></a> function
+of the <code>mime</code> package has been simplified to make it
+consistent with
+<a href="/pkg/mime/#ParseMediaType"><code>ParseMediaType</code></a>.
+It now takes <code>"text/html"</code> rather than <code>"text"</code> and <code>"html"</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+What little code is affected will be caught by the compiler and must be updated by hand.
+</p>
+
+<h3 id="net">The net package</h3>
+
+<p>
+In Go 1, the various <code>SetTimeout</code>,
+<code>SetReadTimeout</code>, and <code>SetWriteTimeout</code> methods
+have been replaced with
+<a href="/pkg/net/#IPConn.SetDeadline"><code>SetDeadline</code></a>,
+<a href="/pkg/net/#IPConn.SetReadDeadline"><code>SetReadDeadline</code></a>, and
+<a href="/pkg/net/#IPConn.SetWriteDeadline"><code>SetWriteDeadline</code></a>,
+respectively. Rather than taking a timeout value in nanoseconds that
+apply to any activity on the connection, the new methods set an
+absolute deadline (as a <code>time.Time</code> value) after which
+reads and writes will time out and no longer block.
+</p>
+
+<p>
+There are also new functions
+<a href="/pkg/net/#DialTimeout"><code>net.DialTimeout</code></a>
+to simplify timing out dialing a network address and
+<a href="/pkg/net/#ListenMulticastUDP"><code>net.ListenMulticastUDP</code></a>
+to allow multicast UDP to listen concurrently across multiple listeners.
+The <code>net.ListenMulticastUDP</code> function replaces the old
+<code>JoinGroup</code> and <code>LeaveGroup</code> methods.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses the old methods will fail to compile and must be updated by hand.
+The semantic change makes it difficult for the fix tool to update automatically.
+</p>
+
+<h3 id="os">The os package</h3>
+
+<p>
+The <code>Time</code> function has been removed; callers should use
+the <a href="/pkg/time/#Time"><code>Time</code></a> type from the
+<code>time</code> package.
+</p>
+
+<p>
+The <code>Exec</code> function has been removed; callers should use
+<code>Exec</code> from the <code>syscall</code> package, where available.
+</p>
+
+<p>
+The <code>ShellExpand</code> function has been renamed to <a
+href="/pkg/os/#ExpandEnv"><code>ExpandEnv</code></a>.
+</p>
+
+<p>
+The <a href="/pkg/os/#NewFile"><code>NewFile</code></a> function
+now takes a <code>uintptr</code> fd, instead of an <code>int</code>.
+The <a href="/pkg/os/#File.Fd"><code>Fd</code></a> method on files now
+also returns a <code>uintptr</code>.
+</p>
+
+<p>
+There are no longer error constants such as <code>EINVAL</code>
+in the <code>os</code> package, since the set of values varied with
+the underlying operating system. There are new portable functions like
+<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>
+to test common error properties, plus a few new error values
+with more Go-like names, such as
+<a href="/pkg/os/#ErrPermission"><code>ErrPermission</code></a>
+and
+<a href="/pkg/os/#ErrNoEnv"><code>ErrNoEnv</code></a>.
+</p>
+
+<p>
+The <code>Getenverror</code> function has been removed. To distinguish
+between a non-existent environment variable and an empty string,
+use <a href="/pkg/os/#Environ"><code>os.Environ</code></a> or
+<a href="/pkg/syscall/#Getenv"><code>syscall.Getenv</code></a>.
+</p>
+
+
+<p>
+The <a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a> method has
+dropped its option argument and the associated constants are gone
+from the package.
+Also, the function <code>Wait</code> is gone; only the method of
+the <code>Process</code> type persists.
+</p>
+
+<p>
+The <code>Waitmsg</code> type returned by
+<a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a>
+has been replaced with a more portable
+<a href="/pkg/os/#ProcessState"><code>ProcessState</code></a>
+type with accessor methods to recover information about the
+process.
+Because of changes to <code>Wait</code>, the <code>ProcessState</code>
+value always describes an exited process.
+Portability concerns simplified the interface in other ways, but the values returned by the
+<a href="/pkg/os/#ProcessState.Sys"><code>ProcessState.Sys</code></a> and
+<a href="/pkg/os/#ProcessState.SysUsage"><code>ProcessState.SysUsage</code></a>
+methods can be type-asserted to underlying system-specific data structures such as
+<a href="/pkg/syscall/#WaitStatus"><code>syscall.WaitStatus</code></a> and
+<a href="/pkg/syscall/#Rusage"><code>syscall.Rusage</code></a> on Unix.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will drop a zero argument to <code>Process.Wait</code>.
+All other changes will be caught by the compiler and must be updated by hand.
+</p>
+
+<h4 id="os_fileinfo">The os.FileInfo type</h4>
+
+<p>
+Go 1 redefines the <a href="/pkg/os/#FileInfo"><code>os.FileInfo</code></a> type,
+changing it from a struct to an interface:
+</p>
+
+<pre>
+ type FileInfo interface {
+ Name() string // base name of the file
+ Size() int64 // length in bytes
+ Mode() FileMode // file mode bits
+ ModTime() time.Time // modification time
+ IsDir() bool // abbreviation for Mode().IsDir()
+ Sys() interface{} // underlying data source (can return nil)
+ }
+</pre>
+
+<p>
+The file mode information has been moved into a subtype called
+<a href="/pkg/os/#FileMode"><code>os.FileMode</code></a>,
+a simple integer type with <code>IsDir</code>, <code>Perm</code>, and <code>String</code>
+methods.
+</p>
+
+<p>
+The system-specific details of file modes and properties such as (on Unix)
+i-number have been removed from <code>FileInfo</code> altogether.
+Instead, each operating system's <code>os</code> package provides an
+implementation of the <code>FileInfo</code> interface, which
+has a <code>Sys</code> method that returns the
+system-specific representation of file metadata.
+For instance, to discover the i-number of a file on a Unix system, unpack
+the <code>FileInfo</code> like this:
+</p>
+
+<pre>
+ fi, err := os.Stat("hello.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Check that it's a Unix file.
+ unixStat, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ log.Fatal("hello.go: not a Unix file")
+ }
+ fmt.Printf("file i-number: %d\n", unixStat.Ino)
+</pre>
+
+<p>
+Assuming (which is unwise) that <code>"hello.go"</code> is a Unix file,
+the i-number expression could be contracted to
+</p>
+
+<pre>
+ fi.Sys().(*syscall.Stat_t).Ino
+</pre>
+
+<p>
+The vast majority of uses of <code>FileInfo</code> need only the methods
+of the standard interface.
+</p>
+
+<p>
+The <code>os</code> package no longer contains wrappers for the POSIX errors
+such as <code>ENOENT</code>.
+For the few programs that need to verify particular error conditions, there are
+now the boolean functions
+<a href="/pkg/os/#IsExist"><code>IsExist</code></a>,
+<a href="/pkg/os/#IsNotExist"><code>IsNotExist</code></a>
+and
+<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/os\.Open/` `/}/`}}
+--> f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ log.Printf(&#34;%s already exists&#34;, name)
+ }</pre>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update code that uses the old equivalent of the current <code>os.FileInfo</code>
+and <code>os.FileMode</code> API.
+Code that needs system-specific file details will need to be updated by hand.
+Code that uses the old POSIX error values from the <code>os</code> package
+will fail to compile and will also need to be updated by hand.
+</p>
+
+<h3 id="os_signal">The os/signal package</h3>
+
+<p>
+The <code>os/signal</code> package in Go 1 replaces the
+<code>Incoming</code> function, which returned a channel
+that received all incoming signals,
+with the selective <code>Notify</code> function, which asks
+for delivery of specific signals on an existing channel.
+</p>
+
+<p>
+<em>Updating</em>:
+Code must be updated by hand.
+A literal translation of
+</p>
+<pre>
+c := signal.Incoming()
+</pre>
+<p>
+is
+</p>
+<pre>
+c := make(chan os.Signal)
+signal.Notify(c) // ask for all signals
+</pre>
+<p>
+but most code should list the specific signals it wants to handle instead:
+</p>
+<pre>
+c := make(chan os.Signal)
+signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT)
+</pre>
+
+<h3 id="path_filepath">The path/filepath package</h3>
+
+<p>
+In Go 1, the <a href="/pkg/path/filepath/#Walk"><code>Walk</code></a> function of the
+<code>path/filepath</code> package
+has been changed to take a function value of type
+<a href="/pkg/path/filepath/#WalkFunc"><code>WalkFunc</code></a>
+instead of a <code>Visitor</code> interface value.
+<code>WalkFunc</code> unifies the handling of both files and directories.
+</p>
+
+<pre>
+ type WalkFunc func(path string, info os.FileInfo, err error) error
+</pre>
+
+<p>
+The <code>WalkFunc</code> function will be called even for files or directories that could not be opened;
+in such cases the error argument will describe the failure.
+If a directory's contents are to be skipped,
+the function should return the value <a href="/pkg/path/filepath/#variables"><code>filepath.SkipDir</code></a>
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/STARTWALK/` `/ENDWALK/`}}
+--> markFn := func(path string, info os.FileInfo, err error) error {
+ if path == &#34;pictures&#34; { <span class="comment">// Will skip walking of directory pictures and its contents.</span>
+ return filepath.SkipDir
+ }
+ if err != nil {
+ return err
+ }
+ log.Println(path)
+ return nil
+ }
+ err := filepath.Walk(&#34;.&#34;, markFn)
+ if err != nil {
+ log.Fatal(err)
+ }</pre>
+
+<p>
+<em>Updating</em>:
+The change simplifies most code but has subtle consequences, so affected programs
+will need to be updated by hand.
+The compiler will catch code using the old interface.
+</p>
+
+<h3 id="regexp">The regexp package</h3>
+
+<p>
+The <a href="/pkg/regexp/"><code>regexp</code></a> package has been rewritten.
+It has the same interface but the specification of the regular expressions
+it supports has changed from the old "egrep" form to that of
+<a href="http://code.google.com/p/re2/">RE2</a>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses the package should have its regular expressions checked by hand.
+</p>
+
+<h3 id="runtime">The runtime package</h3>
+
+<p>
+In Go 1, much of the API exported by package
+<code>runtime</code> has been removed in favor of
+functionality provided by other packages.
+Code using the <code>runtime.Type</code> interface
+or its specific concrete type implementations should
+now use package <a href="/pkg/reflect/"><code>reflect</code></a>.
+Code using <code>runtime.Semacquire</code> or <code>runtime.Semrelease</code>
+should use channels or the abstractions in package <a href="/pkg/sync/"><code>sync</code></a>.
+The <code>runtime.Alloc</code>, <code>runtime.Free</code>,
+and <code>runtime.Lookup</code> functions, an unsafe API created for
+debugging the memory allocator, have no replacement.
+</p>
+
+<p>
+Before, <code>runtime.MemStats</code> was a global variable holding
+statistics about memory allocation, and calls to <code>runtime.UpdateMemStats</code>
+ensured that it was up to date.
+In Go 1, <code>runtime.MemStats</code> is a struct type, and code should use
+<a href="/pkg/runtime/#ReadMemStats"><code>runtime.ReadMemStats</code></a>
+to obtain the current statistics.
+</p>
+
+<p>
+The package adds a new function,
+<a href="/pkg/runtime/#NumCPU"><code>runtime.NumCPU</code></a>, that returns the number of CPUs available
+for parallel execution, as reported by the operating system kernel.
+Its value can inform the setting of <code>GOMAXPROCS</code>.
+The <code>runtime.Cgocalls</code> and <code>runtime.Goroutines</code> functions
+have been renamed to <code>runtime.NumCgoCall</code> and <code>runtime.NumGoroutine</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update code for the function renamings.
+Other code will need to be updated by hand.
+</p>
+
+<h3 id="strconv">The strconv package</h3>
+
+<p>
+In Go 1, the
+<a href="/pkg/strconv/"><code>strconv</code></a>
+package has been significantly reworked to make it more Go-like and less C-like,
+although <code>Atoi</code> lives on (it's similar to
+<code>int(ParseInt(x, 10, 0))</code>, as does
+<code>Itoa(x)</code> (<code>FormatInt(int64(x), 10)</code>).
+There are also new variants of some of the functions that append to byte slices rather than
+return strings, to allow control over allocation.
+</p>
+
+<p>
+This table summarizes the renamings; see the
+<a href="/pkg/strconv/">package documentation</a>
+for full details.
+</p>
+
+<table class="codetable" frame="border" summary="strconv renames">
+<colgroup align="left" width="50%"></colgroup>
+<colgroup align="left" width="50%"></colgroup>
+<tr>
+<th align="left">Old call</th>
+<th align="left">New call</th>
+</tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Atob(x)</td> <td>ParseBool(x)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Atof32(x)</td> <td>ParseFloat(x, 32)§</td></tr>
+<tr><td>Atof64(x)</td> <td>ParseFloat(x, 64)</td></tr>
+<tr><td>AtofN(x, n)</td> <td>ParseFloat(x, n)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Atoi(x)</td> <td>Atoi(x)</td></tr>
+<tr><td>Atoi(x)</td> <td>ParseInt(x, 10, 0)§</td></tr>
+<tr><td>Atoi64(x)</td> <td>ParseInt(x, 10, 64)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Atoui(x)</td> <td>ParseUint(x, 10, 0)§</td></tr>
+<tr><td>Atoui64(x)</td> <td>ParseUint(x, 10, 64)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Btoi64(x, b)</td> <td>ParseInt(x, b, 64)</td></tr>
+<tr><td>Btoui64(x, b)</td> <td>ParseUint(x, b, 64)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Btoa(x)</td> <td>FormatBool(x)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Ftoa32(x, f, p)</td> <td>FormatFloat(float64(x), f, p, 32)</td></tr>
+<tr><td>Ftoa64(x, f, p)</td> <td>FormatFloat(x, f, p, 64)</td></tr>
+<tr><td>FtoaN(x, f, p, n)</td> <td>FormatFloat(x, f, p, n)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Itoa(x)</td> <td>Itoa(x)</td></tr>
+<tr><td>Itoa(x)</td> <td>FormatInt(int64(x), 10)</td></tr>
+<tr><td>Itoa64(x)</td> <td>FormatInt(x, 10)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Itob(x, b)</td> <td>FormatInt(int64(x), b)</td></tr>
+<tr><td>Itob64(x, b)</td> <td>FormatInt(x, b)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Uitoa(x)</td> <td>FormatUint(uint64(x), 10)</td></tr>
+<tr><td>Uitoa64(x)</td> <td>FormatUint(x, 10)</td></tr>
+<tr>
+<td colspan="2"><hr></td>
+</tr>
+<tr><td>Uitob(x, b)</td> <td>FormatUint(uint64(x), b)</td></tr>
+<tr><td>Uitob64(x, b)</td> <td>FormatUint(x, b)</td></tr>
+</table>
+
+<p>
+<em>Updating</em>:
+Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
+<br>
+§ <code>Atoi</code> persists but <code>Atoui</code> and <code>Atof32</code> do not, so
+they may require
+a cast that must be added by hand; the <code>go</code> <code>fix</code> tool will warn about it.
+</p>
+
+
+<h3 id="templates">The template packages</h3>
+
+<p>
+The <code>template</code> and <code>exp/template/html</code> packages have moved to
+<a href="/pkg/text/template/"><code>text/template</code></a> and
+<a href="/pkg/html/template/"><code>html/template</code></a>.
+More significant, the interface to these packages has been simplified.
+The template language is the same, but the concept of "template set" is gone
+and the functions and methods of the packages have changed accordingly,
+often by elimination.
+</p>
+
+<p>
+Instead of sets, a <code>Template</code> object
+may contain multiple named template definitions,
+in effect constructing
+name spaces for template invocation.
+A template can invoke any other template associated with it, but only those
+templates associated with it.
+The simplest way to associate templates is to parse them together, something
+made easier with the new structure of the packages.
+</p>
+
+<p>
+<em>Updating</em>:
+The imports will be updated by fix tool.
+Single-template uses will be otherwise be largely unaffected.
+Code that uses multiple templates in concert will need to be updated by hand.
+The <a href="/pkg/text/template/#examples">examples</a> in
+the documentation for <code>text/template</code> can provide guidance.
+</p>
+
+<h3 id="testing">The testing package</h3>
+
+<p>
+The testing package has a type, <code>B</code>, passed as an argument to benchmark functions.
+In Go 1, <code>B</code> has new methods, analogous to those of <code>T</code>, enabling
+logging and failure reporting.
+</p>
+
+<pre><!--{{code "/doc/progs/go1.go" `/func.*Benchmark/` `/^}/`}}
+-->func BenchmarkSprintf(b *testing.B) {
+ <span class="comment">// Verify correctness before running benchmark.</span>
+ b.StopTimer()
+ got := fmt.Sprintf(&#34;%x&#34;, 23)
+ const expect = &#34;17&#34;
+ if expect != got {
+ b.Fatalf(&#34;expected %q; got %q&#34;, expect, got)
+ }
+ b.StartTimer()
+ for i := 0; i &lt; b.N; i++ {
+ fmt.Sprintf(&#34;%x&#34;, 23)
+ }
+}</pre>
+
+<p>
+<em>Updating</em>:
+Existing code is unaffected, although benchmarks that use <code>println</code>
+or <code>panic</code> should be updated to use the new methods.
+</p>
+
+<h3 id="testing_script">The testing/script package</h3>
+
+<p>
+The testing/script package has been deleted. It was a dreg.
+</p>
+
+<p>
+<em>Updating</em>:
+No code is likely to be affected.
+</p>
+
+<h3 id="unsafe">The unsafe package</h3>
+
+<p>
+In Go 1, the functions
+<code>unsafe.Typeof</code>, <code>unsafe.Reflect</code>,
+<code>unsafe.Unreflect</code>, <code>unsafe.New</code>, and
+<code>unsafe.NewArray</code> have been removed;
+they duplicated safer functionality provided by
+package <a href="/pkg/reflect/"><code>reflect</code></a>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code using these functions must be rewritten to use
+package <a href="/pkg/reflect/"><code>reflect</code></a>.
+The changes to <a href="http://code.google.com/p/go/source/detail?r=2646dc956207">encoding/gob</a> and the <a href="http://code.google.com/p/goprotobuf/source/detail?r=5340ad310031">protocol buffer library</a>
+may be helpful as examples.
+</p>
+
+<h3 id="url">The url package</h3>
+
+<p>
+In Go 1 several fields from the <a href="/pkg/net/url/#URL"><code>url.URL</code></a> type
+were removed or replaced.
+</p>
+
+<p>
+The <a href="/pkg/net/url/#URL.String"><code>String</code></a> method now
+predictably rebuilds an encoded URL string using all of <code>URL</code>'s
+fields as necessary. The resulting string will also no longer have
+passwords escaped.
+</p>
+
+<p>
+The <code>Raw</code> field has been removed. In most cases the <code>String</code>
+method may be used in its place.
+</p>
+
+<p>
+The old <code>RawUserinfo</code> field is replaced by the <code>User</code>
+field, of type <a href="/pkg/net/url/#Userinfo"><code>*net.Userinfo</code></a>.
+Values of this type may be created using the new <a href="/pkg/net/url/#User"><code>net.User</code></a>
+and <a href="/pkg/net/url/#UserPassword"><code>net.UserPassword</code></a>
+functions. The <code>EscapeUserinfo</code> and <code>UnescapeUserinfo</code>
+functions are also gone.
+</p>
+
+<p>
+The <code>RawAuthority</code> field has been removed. The same information is
+available in the <code>Host</code> and <code>User</code> fields.
+</p>
+
+<p>
+The <code>RawPath</code> field and the <code>EncodedPath</code> method have
+been removed. The path information in rooted URLs (with a slash following the
+schema) is now available only in decoded form in the <code>Path</code> field.
+Occasionally, the encoded data may be required to obtain information that
+was lost in the decoding process. These cases must be handled by accessing
+the data the URL was built from.
+</p>
+
+<p>
+URLs with non-rooted paths, such as <code>"mailto:dev@golang.org?subject=Hi"</code>,
+are also handled differently. The <code>OpaquePath</code> boolean field has been
+removed and a new <code>Opaque</code> string field introduced to hold the encoded
+path for such URLs. In Go 1, the cited URL parses as:
+</p>
+
+<pre>
+ URL{
+ Scheme: "mailto",
+ Opaque: "dev@golang.org",
+ RawQuery: "subject=Hi",
+ }
+</pre>
+
+<p>
+A new <a href="/pkg/net/url/#URL.RequestURI"><code>RequestURI</code></a> method was
+added to <code>URL</code>.
+</p>
+
+<p>
+The <code>ParseWithReference</code> function has been renamed to <code>ParseWithFragment</code>.
+</p>
+
+<p>
+<em>Updating</em>:
+Code that uses the old fields will fail to compile and must be updated by hand.
+The semantic changes make it difficult for the fix tool to update automatically.
+</p>
+
+<h2 id="cmd_go">The go command</h2>
+
+<p>
+Go 1 introduces the <a href="/cmd/go/">go command</a>, a tool for fetching,
+building, and installing Go packages and commands. The <code>go</code> command
+does away with makefiles, instead using Go source code to find dependencies and
+determine build conditions. Most existing Go programs will no longer require
+makefiles to be built.
+</p>
+
+<p>
+See <a href="/doc/code.html">How to Write Go Code</a> for a primer on the
+<code>go</code> command and the <a href="/cmd/go/">go command documentation</a>
+for the full details.
+</p>
+
+<p>
+<em>Updating</em>:
+Projects that depend on the Go project's old makefile-based build
+infrastructure (<code>Make.pkg</code>, <code>Make.cmd</code>, and so on) should
+switch to using the <code>go</code> command for building Go code and, if
+necessary, rewrite their makefiles to perform any auxiliary build tasks.
+</p>
+
+<h2 id="cmd_cgo">The cgo command</h2>
+
+<p>
+In Go 1, the <a href="/cmd/cgo">cgo command</a>
+uses a different <code>_cgo_export.h</code>
+file, which is generated for packages containing <code>//export</code> lines.
+The <code>_cgo_export.h</code> file now begins with the C preamble comment,
+so that exported function definitions can use types defined there.
+This has the effect of compiling the preamble multiple times, so a
+package using <code>//export</code> must not put function definitions
+or variable initializations in the C preamble.
+</p>
+
+<h2 id="releases">Packaged releases</h2>
+
+<p>
+One of the most significant changes associated with Go 1 is the availability
+of prepackaged, downloadable distributions.
+They are available for many combinations of architecture and operating system
+(including Windows) and the list will grow.
+Installation details are described on the
+<a href="/doc/install">Getting Started</a> page, while
+the distributions themselves are listed on the
+<a href="http://code.google.com/p/go/downloads/list">downloads page</a>.
+
+
+</div>
+
+<div id="footer">
+Build version go1.0.1.<br>
+A link <a href="http://code.google.com/policies.html#restrictions">noted</a>,
+and then, coming up on the very next line, we will
+find yet another link, link 3.0 if you will,
+after a few more words <a href="/LINK">link text</a>.<br>
+<a href="/doc/tos.html">Terms of Service</a> |
+<a href="http://www.google.com/intl/en/privacy/privacy-policy.html">Privacy Policy</a>
+</div>
+
+<script type="text/javascript">
+(function() {
+ var ga = document.createElement("script"); ga.type = "text/javascript"; ga.async = true;
+ ga.src = ("https:" == document.location.protocol ? "https://ssl" : "http://www") + ".google-analytics.com/ga.js";
+ var s = document.getElementsByTagName("script")[0]; s.parentNode.insertBefore(ga, s);
+})();
+</script>
+</body>
+<script type="text/javascript">
+ (function() {
+ var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
+ po.src = 'https://apis.google.com/js/plusone.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
+ })();
+</script>
+</html>
+
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/README b/vendor/golang.org/x/net/html/testdata/webkit/README
new file mode 100644
index 000000000..9b4c2d8be
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/README
@@ -0,0 +1,28 @@
+The *.dat files in this directory are copied from The WebKit Open Source
+Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources.
+WebKit is licensed under a BSD style license.
+http://webkit.org/coding/bsd-license.html says:
+
+Copyright (C) 2009 Apple Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat b/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat
new file mode 100644
index 000000000..787e1b01e
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat
@@ -0,0 +1,194 @@
+#data
+<a><p></a></p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <p>
+| <a>
+
+#data
+<a>1<p>2</a>3</p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <p>
+| <a>
+| "2"
+| "3"
+
+#data
+<a>1<button>2</a>3</button>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <button>
+| <a>
+| "2"
+| "3"
+
+#data
+<a>1<b>2</a>3</b>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <b>
+| "2"
+| <b>
+| "3"
+
+#data
+<a>1<div>2<div>3</a>4</div>5</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <div>
+| <a>
+| "2"
+| <div>
+| <a>
+| "3"
+| "4"
+| "5"
+
+#data
+<table><a>1<p>2</a>3</p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <p>
+| <a>
+| "2"
+| "3"
+| <table>
+
+#data
+<b><b><a><p></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <b>
+| <a>
+| <p>
+| <a>
+
+#data
+<b><a><b><p></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <a>
+| <b>
+| <b>
+| <p>
+| <a>
+
+#data
+<a><b><b><p></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <b>
+| <b>
+| <p>
+| <a>
+
+#data
+<p>1<s id="A">2<b id="B">3</p>4</s>5</b>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "1"
+| <s>
+| id="A"
+| "2"
+| <b>
+| id="B"
+| "3"
+| <s>
+| id="A"
+| <b>
+| id="B"
+| "4"
+| <b>
+| id="B"
+| "5"
+
+#data
+<table><a>1<td>2</td>3</table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <a>
+| "3"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "2"
+
+#data
+<table>A<td>B</td>C</table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "AC"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "B"
+
+#data
+<a><svg><tr><input></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <svg svg>
+| <svg tr>
+| <svg input>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat b/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat
new file mode 100644
index 000000000..d18151b44
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat
@@ -0,0 +1,31 @@
+#data
+<b>1<i>2<p>3</b>4
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "1"
+| <i>
+| "2"
+| <i>
+| <p>
+| <b>
+| "3"
+| "4"
+
+#data
+<a><div><style></style><address><a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <div>
+| <a>
+| <style>
+| <address>
+| <a>
+| <a>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat b/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat
new file mode 100644
index 000000000..44f187683
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat
@@ -0,0 +1,135 @@
+#data
+FOO<!-- BAR -->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -->
+| "BAZ"
+
+#data
+FOO<!-- BAR --!>BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- >BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- >BAZ -->
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX -->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX --!>BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX -- >BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -- >BAZ -->
+
+#data
+FOO<!---->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+FOO<!--->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+FOO<!-->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+<?xml version="1.0">Hi
+#errors
+#document
+| <!-- ?xml version="1.0" -->
+| <html>
+| <head>
+| <body>
+| "Hi"
+
+#data
+<?xml version="1.0">
+#errors
+#document
+| <!-- ?xml version="1.0" -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?xml version
+#errors
+#document
+| <!-- ?xml version -->
+| <html>
+| <head>
+| <body>
+
+#data
+FOO<!----->BAZ
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- - -->
+| "BAZ"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat b/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat
new file mode 100644
index 000000000..ae457328a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat
@@ -0,0 +1,370 @@
+#data
+<!DOCTYPE html>Hello
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!dOctYpE HtMl>Hello
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPEhtml>Hello
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE>Hello
+#errors
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE >Hello
+#errors
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato >Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato taco>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato taco "ddd>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM >Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM ggg>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM taco >Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM 'taco"'>Hello
+#errors
+#document
+| <!DOCTYPE potato "" "taco"">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM "taco">Hello
+#errors
+#document
+| <!DOCTYPE potato "" "taco">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM "tai'co">Hello
+#errors
+#document
+| <!DOCTYPE potato "" "tai'co">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEMtaco "ddd">Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato grass SYSTEM taco>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIc>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIc >Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIcgoof>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC goof>Hello
+#errors
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC "go'of">Hello
+#errors
+#document
+| <!DOCTYPE potato "go'of" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC 'go'of'>Hello
+#errors
+#document
+| <!DOCTYPE potato "go" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC 'go:hh of' >Hello
+#errors
+#document
+| <!DOCTYPE potato "go:hh of" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC "W3C-//dfdf" SYSTEM ggg>Hello
+#errors
+#document
+| <!DOCTYPE potato "W3C-//dfdf" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">Hello
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE ...>Hello
+#errors
+#document
+| <!DOCTYPE ...>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE root-element [SYSTEM OR PUBLIC FPI] "uri" [
+<!-- internal declarations -->
+]>
+#errors
+#document
+| <!DOCTYPE root-element>
+| <html>
+| <head>
+| <body>
+| "]>"
+
+#data
+<!DOCTYPE html PUBLIC
+ "-//WAPFORUM//DTD XHTML Mobile 1.0//EN"
+ "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
+#errors
+#document
+| <!DOCTYPE html "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML SYSTEM "http://www.w3.org/DTD/HTML4-strict.dtd"><body><b>Mine!</b></body>
+#errors
+#document
+| <!DOCTYPE html "" "http://www.w3.org/DTD/HTML4-strict.dtd">
+| <html>
+| <head>
+| <body>
+| <b>
+| "Mine!"
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN""http://www.w3.org/TR/html4/strict.dtd">
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC"-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC'-//W3C//DTD HTML 4.01//EN''http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat b/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat
new file mode 100644
index 000000000..c8073b781
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat
@@ -0,0 +1,603 @@
+#data
+FOO&gt;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>BAR"
+
+#data
+FOO&gtBAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>BAR"
+
+#data
+FOO&gt BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO> BAR"
+
+#data
+FOO&gt;;;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>;;BAR"
+
+#data
+I'm &notit; I tell you
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "I'm ¬it; I tell you"
+
+#data
+I'm &notin; I tell you
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "I'm ∉ I tell you"
+
+#data
+FOO& BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO& BAR"
+
+#data
+FOO&<BAR>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&"
+| <bar>
+
+#data
+FOO&&&&gt;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&&&>BAR"
+
+#data
+FOO&#41;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO)BAR"
+
+#data
+FOO&#x41;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOABAR"
+
+#data
+FOO&#X41;BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOABAR"
+
+#data
+FOO&#BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#BAR"
+
+#data
+FOO&#ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#ZOO"
+
+#data
+FOO&#xBAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOºR"
+
+#data
+FOO&#xZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#xZOO"
+
+#data
+FOO&#XZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#XZOO"
+
+#data
+FOO&#41BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO)BAR"
+
+#data
+FOO&#x41BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO䆺R"
+
+#data
+FOO&#x41ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOAZOO"
+
+#data
+FOO&#x0000;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#x0078;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOxZOO"
+
+#data
+FOO&#x0079;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOyZOO"
+
+#data
+FOO&#x0080;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO€ZOO"
+
+#data
+FOO&#x0081;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0082;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‚ZOO"
+
+#data
+FOO&#x0083;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÆ’ZOO"
+
+#data
+FOO&#x0084;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO„ZOO"
+
+#data
+FOO&#x0085;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO…ZOO"
+
+#data
+FOO&#x0086;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO†ZOO"
+
+#data
+FOO&#x0087;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‡ZOO"
+
+#data
+FOO&#x0088;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOˆZOO"
+
+#data
+FOO&#x0089;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‰ZOO"
+
+#data
+FOO&#x008A;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ ZOO"
+
+#data
+FOO&#x008B;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‹ZOO"
+
+#data
+FOO&#x008C;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ’ZOO"
+
+#data
+FOO&#x008D;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x008E;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOŽZOO"
+
+#data
+FOO&#x008F;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0090;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0091;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‘ZOO"
+
+#data
+FOO&#x0092;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO’ZOO"
+
+#data
+FOO&#x0093;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO“ZOO"
+
+#data
+FOO&#x0094;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOâ€ZOO"
+
+#data
+FOO&#x0095;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO•ZOO"
+
+#data
+FOO&#x0096;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO–ZOO"
+
+#data
+FOO&#x0097;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO—ZOO"
+
+#data
+FOO&#x0098;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOËœZOO"
+
+#data
+FOO&#x0099;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOâ„¢ZOO"
+
+#data
+FOO&#x009A;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ¡ZOO"
+
+#data
+FOO&#x009B;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO›ZOO"
+
+#data
+FOO&#x009C;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ“ZOO"
+
+#data
+FOO&#x009D;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x009E;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOžZOO"
+
+#data
+FOO&#x009F;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOŸZOO"
+
+#data
+FOO&#x00A0;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO ZOO"
+
+#data
+FOO&#xD7FF;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO퟿ZOO"
+
+#data
+FOO&#xD800;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xD801;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xDFFE;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xDFFF;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xE000;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOZOO"
+
+#data
+FOO&#x10FFFE;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOô¿¾ZOO"
+
+#data
+FOO&#x1087D4;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO􈟔ZOO"
+
+#data
+FOO&#x10FFFF;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOOô¿¿ZOO"
+
+#data
+FOO&#x110000;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xFFFFFF;ZOO
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat b/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat
new file mode 100644
index 000000000..e2fb42a07
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat
@@ -0,0 +1,249 @@
+#data
+<div bar="ZZ&gt;YY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>YY"
+
+#data
+<div bar="ZZ&"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar='ZZ&'></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar=ZZ&></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar="ZZ&gt=YY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt=YY"
+
+#data
+<div bar="ZZ&gt0YY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt0YY"
+
+#data
+<div bar="ZZ&gt9YY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt9YY"
+
+#data
+<div bar="ZZ&gtaYY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gtaYY"
+
+#data
+<div bar="ZZ&gtZYY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gtZYY"
+
+#data
+<div bar="ZZ&gt YY"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ> YY"
+
+#data
+<div bar="ZZ&gt"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar='ZZ&gt'></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar=ZZ&gt></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar="ZZ&pound_id=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ£_id=23"
+
+#data
+<div bar="ZZ&prod_id=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&prod_id=23"
+
+#data
+<div bar="ZZ&pound;_id=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ£_id=23"
+
+#data
+<div bar="ZZ&prod;_id=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZâˆ_id=23"
+
+#data
+<div bar="ZZ&pound=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&pound=23"
+
+#data
+<div bar="ZZ&prod=23"></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&prod=23"
+
+#data
+<div>ZZ&pound_id=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£_id=23"
+
+#data
+<div>ZZ&prod_id=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ&prod_id=23"
+
+#data
+<div>ZZ&pound;_id=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£_id=23"
+
+#data
+<div>ZZ&prod;_id=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZâˆ_id=23"
+
+#data
+<div>ZZ&pound=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£=23"
+
+#data
+<div>ZZ&prod=23</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ&prod=23"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat b/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat
new file mode 100644
index 000000000..d7cb71db0
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat
@@ -0,0 +1,246 @@
+#data
+<div<div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div<div>
+
+#data
+<div foo<bar=''>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo<bar=""
+
+#data
+<div foo=`bar`>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo="`bar`"
+
+#data
+<div \"foo=''>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| \"foo=""
+
+#data
+<a href='\nbar'></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="\nbar"
+
+#data
+<!DOCTYPE html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+&lang;&rang;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "⟨⟩"
+
+#data
+&apos;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "'"
+
+#data
+&ImaginaryI;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "â…ˆ"
+
+#data
+&Kopf;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "ð•‚"
+
+#data
+&notinva;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "∉"
+
+#data
+<?import namespace="foo" implementation="#bar">
+#errors
+#document
+| <!-- ?import namespace="foo" implementation="#bar" -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!--foo--bar-->
+#errors
+#document
+| <!-- foo--bar -->
+| <html>
+| <head>
+| <body>
+
+#data
+<![CDATA[x]]>
+#errors
+#document
+| <!-- [CDATA[x]] -->
+| <html>
+| <head>
+| <body>
+
+#data
+<textarea><!--</textarea>--></textarea>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--"
+| "-->"
+
+#data
+<textarea><!--</textarea>-->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--"
+| "-->"
+
+#data
+<style><!--</style>--></style>
+#errors
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "-->"
+
+#data
+<style><!--</style>-->
+#errors
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "-->"
+
+#data
+<ul><li>A </li> <li>B</li></ul>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| "A "
+| " "
+| <li>
+| "B"
+
+#data
+<table><form><input type=hidden><input></form><div></div></table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <input>
+| <div>
+| <table>
+| <form>
+| <input>
+| type="hidden"
+
+#data
+<i>A<b>B<p></i>C</b>D
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <i>
+| "A"
+| <b>
+| "B"
+| <b>
+| <p>
+| <b>
+| <i>
+| "C"
+| "D"
+
+#data
+<div></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<svg></svg>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<math></math>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat b/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat
new file mode 100644
index 000000000..3f2bd374c
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat
@@ -0,0 +1,43 @@
+#data
+<button>1</foo>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <button>
+| "1"
+
+#data
+<foo>1<p>2</foo>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| "1"
+| <p>
+| "2"
+
+#data
+<dd>1</foo>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <dd>
+| "1"
+
+#data
+<foo>1<dd>2</foo>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| "1"
+| <dd>
+| "2"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat b/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat
new file mode 100644
index 000000000..88325ffe6
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat
@@ -0,0 +1,40 @@
+#data
+<isindex>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<isindex name="A" action="B" prompt="C" foo="D">
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| action="B"
+| <hr>
+| <label>
+| "C"
+| <input>
+| foo="D"
+| name="isindex"
+| <hr>
+
+#data
+<form><isindex>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <form>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat
new file mode 100644
index 000000000..a5ebb1eb2
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat
Binary files differ
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
new file mode 100644
index 000000000..5a9208465
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
@@ -0,0 +1,52 @@
+#data
+<input type="hidden"><frameset>
+#errors
+21: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+31: “frameset†start tag seen.
+31: End of file seen and there were open elements.
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><table><caption><svg>foo</table>bar
+#errors
+47: End tag “table†did not match the name of the current open element (“svgâ€).
+47: “table†closed but “caption†was still open.
+47: End tag “table†seen, but there were open elements.
+36: Unclosed element “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| "foo"
+| "bar"
+
+#data
+<table><tr><td><svg><desc><td></desc><circle>
+#errors
+7: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+30: A table cell was implicitly closed, but there were open elements.
+26: Unclosed element “descâ€.
+20: Unclosed element “svgâ€.
+37: Stray end tag “descâ€.
+45: End of file seen and there were open elements.
+45: Unclosed element “circleâ€.
+7: Unclosed element “tableâ€.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg desc>
+| <td>
+| <circle>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat b/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat
new file mode 100644
index 000000000..04cc11fb9
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat
Binary files differ
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
new file mode 100644
index 000000000..76b67f4ba
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
@@ -0,0 +1,308 @@
+#data
+FOO<script>'Hello'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'Hello'"
+| "BAR"
+
+#data
+FOO<script></script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script >BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script/>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script/ >BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script type="text/plain"></scriptx>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "</scriptx>BAR"
+
+#data
+FOO<script></script foo=">" dd>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script>'<'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<'"
+| "BAR"
+
+#data
+FOO<script>'<!'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!'"
+| "BAR"
+
+#data
+FOO<script>'<!-'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-'"
+| "BAR"
+
+#data
+FOO<script>'<!--'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!--'"
+| "BAR"
+
+#data
+FOO<script>'<!---'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!---'"
+| "BAR"
+
+#data
+FOO<script>'<!-->'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-->'"
+| "BAR"
+
+#data
+FOO<script>'<!-->'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-->'"
+| "BAR"
+
+#data
+FOO<script>'<!-- potato'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- potato'"
+| "BAR"
+
+#data
+FOO<script>'<!-- <sCrIpt'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- <sCrIpt'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt>'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt>'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> -'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> -'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> --'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> --'</script>BAR"
+
+#data
+FOO<script>'<!-- <sCrIpt> -->'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- <sCrIpt> -->'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> --!>'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> --!>'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> -- >'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> -- >'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt '</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt '</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt/'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt\'</script>BAR
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt\'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR</script>QUX
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt/'</script>BAR"
+| "QUX"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
new file mode 100644
index 000000000..4e08d0e84
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
@@ -0,0 +1,15 @@
+#data
+<p><b id="A"><script>document.getElementById("A").id = "B"</script></p>TEXT</b>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| id="B"
+| <script>
+| "document.getElementById("A").id = "B""
+| <b>
+| id="A"
+| "TEXT"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
new file mode 100644
index 000000000..ef4a41ca0
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
@@ -0,0 +1,28 @@
+#data
+1<script>document.write("2")</script>3
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "1"
+| <script>
+| "document.write("2")"
+| "23"
+
+#data
+1<script>document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")</script>4
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "1"
+| <script>
+| "document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")"
+| <script>
+| "document.write('2')"
+| "2"
+| <script>
+| "document.write('3')"
+| "34"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat b/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat
new file mode 100644
index 000000000..c4b47e48a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat
@@ -0,0 +1,212 @@
+#data
+<table><th>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <th>
+
+#data
+<table><td>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><col foo='bar'>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+| <col>
+| foo="bar"
+
+#data
+<table><colgroup></html>foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "foo"
+| <table>
+| <colgroup>
+
+#data
+<table></table><p>foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <p>
+| "foo"
+
+#data
+<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr><td>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><select><option>3</select></table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "3"
+| <table>
+
+#data
+<table><select><table></table></select></table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <table>
+
+#data
+<table><select></table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+
+#data
+<table><select><option>A<tr><td>B</td></tr></table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "A"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "B"
+
+#data
+<table><td></body></caption></col></colgroup></html>foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "foo"
+
+#data
+<table><td>A</table>B
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+| "B"
+
+#data
+<table><tr><caption>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <caption>
+
+#data
+<table><tr></body></caption></col></colgroup></html></td></th><td>foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "foo"
+
+#data
+<table><td><tr>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <tr>
+
+#data
+<table><td><button><td>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <button>
+| <td>
+
+#data
+<table><tr><td><svg><desc><td>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg desc>
+| <td>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat
new file mode 100644
index 000000000..cbf8bdda6
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat
@@ -0,0 +1,1952 @@
+#data
+Test
+#errors
+Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<p>One<p>Two
+#errors
+Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "One"
+| <p>
+| "Two"
+
+#data
+Line1<br>Line2<br>Line3<br>Line4
+#errors
+Line: 1 Col: 5 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "Line1"
+| <br>
+| "Line2"
+| <br>
+| "Line3"
+| <br>
+| "Line4"
+
+#data
+<html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<head>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<body>
+#errors
+Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head><body>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head><body></body>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head><body></body></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></body></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+Line: 1 Col: 19 Unexpected end tag (body).
+Line: 1 Col: 26 Unexpected end tag (html).
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head><body></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><body></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<body></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<head></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end tag (html). Ignored.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</head>
+#errors
+Line: 1 Col: 7 Unexpected end tag (head). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</body>
+#errors
+Line: 1 Col: 7 Unexpected end tag (body). Expected DOCTYPE.
+Line: 1 Col: 7 Unexpected end tag (body) after the (implied) root element.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</html>
+#errors
+Line: 1 Col: 7 Unexpected end tag (html). Expected DOCTYPE.
+Line: 1 Col: 7 Unexpected end tag (html) after the (implied) root element.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<b><table><td><i></table>
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 25 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+
+#data
+<b><table><td></b><i></table>X
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 18 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 29 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 30 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+| "X"
+
+#data
+<h1>Hello<h2>World
+#errors
+4: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+13: Heading cannot be a child of another heading.
+18: End of file seen and there were open elements.
+#document
+| <html>
+| <head>
+| <body>
+| <h1>
+| "Hello"
+| <h2>
+| "World"
+
+#data
+<a><p>X<a>Y</a>Z</p></a>
+#errors
+Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 10 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 10 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 24 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <p>
+| <a>
+| "X"
+| <a>
+| "Y"
+| "Z"
+
+#data
+<b><button>foo</b>bar
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 15 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <button>
+| <b>
+| "foo"
+| "bar"
+
+#data
+<!DOCTYPE html><span><button>foo</span>bar
+#errors
+39: End tag “span†seen but there were unclosed elements.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <span>
+| <button>
+| "foobar"
+
+#data
+<p><b><div><marquee></p></b></div>X
+#errors
+Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end tag (p). Ignored.
+Line: 1 Col: 24 Unexpected end tag (p). Ignored.
+Line: 1 Col: 28 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 34 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <div>
+| <b>
+| <marquee>
+| <p>
+| "X"
+
+#data
+<script><div></script></div><title><p></title><p><p>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 28 Unexpected end tag (div). Ignored.
+#document
+| <html>
+| <head>
+| <script>
+| "<div>"
+| <title>
+| "<p>"
+| <body>
+| <p>
+| <p>
+
+#data
+<!--><div>--<!-->
+#errors
+Line: 1 Col: 5 Incorrect comment.
+Line: 1 Col: 10 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 17 Incorrect comment.
+Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
+#document
+| <!-- -->
+| <html>
+| <head>
+| <body>
+| <div>
+| "--"
+| <!-- -->
+
+#data
+<p><hr></p>
+#errors
+Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end tag (p). Ignored.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <hr>
+| <p>
+
+#data
+<select><b><option><select><option></b></select>X
+#errors
+Line: 1 Col: 8 Unexpected start tag (select). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected start tag token (b) in the select phase. Ignored.
+Line: 1 Col: 27 Unexpected select start tag in the select phase treated as select end tag.
+Line: 1 Col: 39 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 48 Unexpected end tag (select). Ignored.
+Line: 1 Col: 49 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <option>
+| "X"
+
+#data
+<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y
+#errors
+Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 35 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 40 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 43 Unexpected start tag (a) in table context caused voodoo mode.
+Line: 1 Col: 43 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 43 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 51 Unexpected implied end tag (a) in the table phase.
+Line: 1 Col: 63 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 64 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| <table>
+| <a>
+| <a>
+| <b>
+| "X"
+| "C"
+| <a>
+| "Y"
+
+#data
+<a X>0<b>1<a Y>2
+#errors
+Line: 1 Col: 5 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 15 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| x=""
+| "0"
+| <b>
+| "1"
+| <b>
+| <a>
+| y=""
+| "2"
+
+#data
+<!-----><font><div>hello<table>excite!<b>me!<th><i>please!</tr><!--X-->
+#errors
+Line: 1 Col: 7 Unexpected '-' after '--' found in comment.
+Line: 1 Col: 14 Unexpected start tag (font). Expected DOCTYPE.
+Line: 1 Col: 38 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 41 Unexpected start tag (b) in table context caused voodoo mode.
+Line: 1 Col: 48 Unexpected implied end tag (b) in the table phase.
+Line: 1 Col: 48 Unexpected table cell start tag (th) in the table body phase.
+Line: 1 Col: 63 Got table cell end tag (th) while required end tags are missing.
+Line: 1 Col: 71 Unexpected end of file. Expected table content.
+#document
+| <!-- - -->
+| <html>
+| <head>
+| <body>
+| <font>
+| <div>
+| "helloexcite!"
+| <b>
+| "me!"
+| <table>
+| <tbody>
+| <tr>
+| <th>
+| <i>
+| "please!"
+| <!-- X -->
+
+#data
+<!DOCTYPE html><li>hello<li>world<ul>how<li>do</ul>you</body><!--do-->
+#errors
+Line: 1 Col: 61 Unexpected end tag (li). Missing end tag (body).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <li>
+| "hello"
+| <li>
+| "world"
+| <ul>
+| "how"
+| <li>
+| "do"
+| "you"
+| <!-- do -->
+
+#data
+<!DOCTYPE html>A<option>B<optgroup>C<select>D</option>E
+#errors
+Line: 1 Col: 54 Unexpected end tag (option) in the select phase. Ignored.
+Line: 1 Col: 55 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+| <option>
+| "B"
+| <optgroup>
+| "C"
+| <select>
+| "DE"
+
+#data
+<
+#errors
+Line: 1 Col: 1 Expected tag name. Got something else instead
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "<"
+
+#data
+<#
+#errors
+Line: 1 Col: 1 Expected tag name. Got something else instead
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "<#"
+
+#data
+</
+#errors
+Line: 1 Col: 2 Expected closing tag. Unexpected end of file.
+Line: 1 Col: 2 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "</"
+
+#data
+</#
+#errors
+Line: 1 Col: 2 Expected closing tag. Unexpected character '#' found.
+Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- # -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?
+#errors
+Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
+Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- ? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?#
+#errors
+Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
+Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- ?# -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!
+#errors
+Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
+Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!#
+#errors
+Line: 1 Col: 3 Expected '--' or 'DOCTYPE'. Not found.
+Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- # -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?COMMENT?>
+#errors
+Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
+Line: 1 Col: 11 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- ?COMMENT? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!COMMENT>
+#errors
+Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
+Line: 1 Col: 10 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- COMMENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+</ COMMENT >
+#errors
+Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
+Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- COMMENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?COM--MENT?>
+#errors
+Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
+Line: 1 Col: 13 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- ?COM--MENT? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!COM--MENT>
+#errors
+Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
+Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- COM--MENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+</ COM--MENT >
+#errors
+Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
+Line: 1 Col: 14 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- COM--MENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><style> EOF
+#errors
+Line: 1 Col: 26 Unexpected end of file. Expected end tag (style).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| " EOF"
+| <body>
+
+#data
+<!DOCTYPE html><script> <!-- </script> --> </script> EOF
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| " <!-- "
+| " "
+| <body>
+| "--> EOF"
+
+#data
+<b><p></b>TEST
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 10 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <p>
+| <b>
+| "TEST"
+
+#data
+<p id=a><b><p id=b></b>TEST
+#errors
+Line: 1 Col: 8 Unexpected start tag (p). Expected DOCTYPE.
+Line: 1 Col: 19 Unexpected end tag (p). Ignored.
+Line: 1 Col: 23 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| id="a"
+| <b>
+| <p>
+| id="b"
+| "TEST"
+
+#data
+<b id=a><p><b id=b></p></b>TEST
+#errors
+Line: 1 Col: 8 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected end tag (p). Ignored.
+Line: 1 Col: 27 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
+Line: 1 Col: 31 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| id="a"
+| <p>
+| <b>
+| id="b"
+| "TEST"
+
+#data
+<!DOCTYPE html><title>U-test</title><body><div><p>Test<u></p></div></body>
+#errors
+Line: 1 Col: 61 Unexpected end tag (p). Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "U-test"
+| <body>
+| <div>
+| <p>
+| "Test"
+| <u>
+
+#data
+<!DOCTYPE html><font><table></font></table></font>
+#errors
+Line: 1 Col: 35 Unexpected end tag (font) in table context caused voodoo mode.
+Line: 1 Col: 35 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <table>
+
+#data
+<font><p>hello<b>cruel</font>world
+#errors
+Line: 1 Col: 6 Unexpected start tag (font). Expected DOCTYPE.
+Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 34 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <font>
+| "hello"
+| <b>
+| "cruel"
+| <b>
+| "world"
+
+#data
+<b>Test</i>Test
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 11 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "TestTest"
+
+#data
+<b>A<cite>B<div>C
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| "C"
+
+#data
+<b>A<cite>B<div>C</cite>D
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 24 Unexpected end tag (cite). Ignored.
+Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| "CD"
+
+#data
+<b>A<cite>B<div>C</b>D
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 21 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 22 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| <b>
+| "C"
+| "D"
+
+#data
+
+#errors
+Line: 1 Col: 0 Unexpected End of file. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<DIV>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 5 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<DIV> abc
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 9 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc"
+
+#data
+<DIV> abc <B>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 13 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+
+#data
+<DIV> abc <B> def
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def"
+
+#data
+<DIV> abc <B> def <I>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 21 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+
+#data
+<DIV> abc <B> def <I> ghi
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi"
+
+#data
+<DIV> abc <B> def <I> ghi <P>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <p>
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <p>
+| " jkl"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 38 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <b>
+| " jkl "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 42 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <b>
+| " jkl "
+| " mno"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P>
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 56 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P> stu
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 60 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr "
+| " stu"
+
+#data
+<test attribute---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
+#errors
+Line: 1 Col: 1040 Unexpected start tag (test). Expected DOCTYPE.
+Line: 1 Col: 1040 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <test>
+| attribute----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------=""
+
+#data
+<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe
+#errors
+Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 39 Unexpected start tag (a) in table context caused voodoo mode.
+Line: 1 Col: 39 Unexpected start tag (a) implies end tag (a).
+Line: 1 Col: 39 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 45 Unexpected implied end tag (a) in the table phase.
+Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
+Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
+
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "aba"
+| <a>
+| href="foo"
+| "br"
+| <a>
+| href="foo"
+| "x"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "aoe"
+
+#data
+<a href="blah">aba<table><tr><td><a href="foo">br</td></tr>x</table>aoe
+#errors
+Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 60 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "abax"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "br"
+| "aoe"
+
+#data
+<table><a href="blah">aba<tr><td><a href="foo">br</td></tr>x</table>aoe
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected start tag (a) in table context caused voodoo mode.
+Line: 1 Col: 29 Unexpected implied end tag (a) in the table phase.
+Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
+Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "aba"
+| <a>
+| href="blah"
+| "x"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "br"
+| <a>
+| href="blah"
+| "aoe"
+
+#data
+<a href=a>aa<marquee>aa<a href=b>bb</marquee>aa
+#errors
+Line: 1 Col: 10 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 45 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="a"
+| "aa"
+| <marquee>
+| "aa"
+| <a>
+| href="b"
+| "bb"
+| "aa"
+
+#data
+<wbr><strike><code></strike><code><strike></code>
+#errors
+Line: 1 Col: 5 Unexpected start tag (wbr). Expected DOCTYPE.
+Line: 1 Col: 28 End tag (strike) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 49 Unexpected end tag (code). Ignored.
+#document
+| <html>
+| <head>
+| <body>
+| <wbr>
+| <strike>
+| <code>
+| <code>
+| <code>
+| <strike>
+
+#data
+<!DOCTYPE html><spacer>foo
+#errors
+26: End of file seen and there were open elements.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <spacer>
+| "foo"
+
+#data
+<title><meta></title><link><title><meta></title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <title>
+| "<meta>"
+| <link>
+| <title>
+| "<meta>"
+| <body>
+
+#data
+<style><!--</style><meta><script>--><link></script>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 51 Unexpected end of file. Expected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <meta>
+| <script>
+| "--><link>"
+| <body>
+
+#data
+<head><meta></head><link>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+Line: 1 Col: 25 Unexpected start tag (link) that can be in head. Moved.
+#document
+| <html>
+| <head>
+| <meta>
+| <link>
+| <body>
+
+#data
+<table><tr><tr><td><td><span><th><span>X</table>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 33 Got table cell end tag (td) while required end tags are missing.
+Line: 1 Col: 48 Got table cell end tag (th) while required end tags are missing.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <tr>
+| <td>
+| <td>
+| <span>
+| <th>
+| <span>
+| "X"
+
+#data
+<body><body><base><link><meta><title><p></title><body><p></body>
+#errors
+Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected start tag (body).
+Line: 1 Col: 54 Unexpected start tag (body).
+Line: 1 Col: 64 Unexpected end tag (p). Missing end tag (body).
+#document
+| <html>
+| <head>
+| <body>
+| <base>
+| <link>
+| <meta>
+| <title>
+| "<p>"
+| <p>
+
+#data
+<textarea><p></textarea>
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<p>"
+
+#data
+<p></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
+#errors
+Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
+Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
+Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
+Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
+Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
+Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
+Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
+Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
+Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
+Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
+Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
+Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
+Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
+Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
+Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
+Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
+Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
+Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
+Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
+Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
+Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
+Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 140 This element (img) has no end tag.
+Line: 1 Col: 148 Unexpected end tag (title). Ignored.
+Line: 1 Col: 155 Unexpected end tag (span). Ignored.
+Line: 1 Col: 163 Unexpected end tag (style). Ignored.
+Line: 1 Col: 172 Unexpected end tag (script). Ignored.
+Line: 1 Col: 180 Unexpected end tag (table). Ignored.
+Line: 1 Col: 185 Unexpected end tag (th). Ignored.
+Line: 1 Col: 190 Unexpected end tag (td). Ignored.
+Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 203 This element (frame) has no end tag.
+Line: 1 Col: 210 This element (area) has no end tag.
+Line: 1 Col: 217 Unexpected end tag (link). Ignored.
+Line: 1 Col: 225 This element (param) has no end tag.
+Line: 1 Col: 230 This element (hr) has no end tag.
+Line: 1 Col: 238 This element (input) has no end tag.
+Line: 1 Col: 244 Unexpected end tag (col). Ignored.
+Line: 1 Col: 251 Unexpected end tag (base). Ignored.
+Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 269 This element (basefont) has no end tag.
+Line: 1 Col: 279 This element (bgsound) has no end tag.
+Line: 1 Col: 287 This element (embed) has no end tag.
+Line: 1 Col: 296 This element (spacer) has no end tag.
+Line: 1 Col: 300 Unexpected end tag (p). Ignored.
+Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 460 This element (wbr) has no end tag.
+Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 513 Unexpected end tag (html). Ignored.
+Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 520 Unexpected end tag (head). Ignored.
+Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 537 This element (image) has no end tag.
+Line: 1 Col: 547 This element (isindex) has no end tag.
+Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 599 Unexpected end tag (option). Ignored.
+Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+| <p>
+
+#data
+<table><tr></strong></b></em></i></u></strike></s></blink></tt></pre></big></small></font></select></h1></h2></h3></h4></h5></h6></body></br></a></img></title></span></style></script></table></th></td></tr></frame></area></link></param></hr></input></col></base></meta></basefont></bgsound></embed></spacer></p></dd></dt></caption></colgroup></tbody></tfoot></thead></address></blockquote></center></dir></div></dl></fieldset></listing></menu></ol></ul></li></nobr></wbr></form></button></marquee></object></html></frameset></head></iframe></image></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode.
+Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode.
+Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode.
+Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode.
+Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode.
+Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode.
+Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode.
+Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode.
+Line: 1 Col: 58 Unexpected end tag (blink). Ignored.
+Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode.
+Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode.
+Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag.
+Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode.
+Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode.
+Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode.
+Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode.
+Line: 1 Col: 99 Unexpected end tag (select). Ignored.
+Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode.
+Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag.
+Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode.
+Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag.
+Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode.
+Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag.
+Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode.
+Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag.
+Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode.
+Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag.
+Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode.
+Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag.
+Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored.
+Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode.
+Line: 1 Col: 141 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode.
+Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode.
+Line: 1 Col: 151 This element (img) has no end tag.
+Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode.
+Line: 1 Col: 159 Unexpected end tag (title). Ignored.
+Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode.
+Line: 1 Col: 166 Unexpected end tag (span). Ignored.
+Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode.
+Line: 1 Col: 174 Unexpected end tag (style). Ignored.
+Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode.
+Line: 1 Col: 183 Unexpected end tag (script). Ignored.
+Line: 1 Col: 196 Unexpected end tag (th). Ignored.
+Line: 1 Col: 201 Unexpected end tag (td). Ignored.
+Line: 1 Col: 206 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 214 This element (frame) has no end tag.
+Line: 1 Col: 221 This element (area) has no end tag.
+Line: 1 Col: 228 Unexpected end tag (link). Ignored.
+Line: 1 Col: 236 This element (param) has no end tag.
+Line: 1 Col: 241 This element (hr) has no end tag.
+Line: 1 Col: 249 This element (input) has no end tag.
+Line: 1 Col: 255 Unexpected end tag (col). Ignored.
+Line: 1 Col: 262 Unexpected end tag (base). Ignored.
+Line: 1 Col: 269 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 280 This element (basefont) has no end tag.
+Line: 1 Col: 290 This element (bgsound) has no end tag.
+Line: 1 Col: 298 This element (embed) has no end tag.
+Line: 1 Col: 307 This element (spacer) has no end tag.
+Line: 1 Col: 311 Unexpected end tag (p). Ignored.
+Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 331 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 350 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 366 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 404 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 471 This element (wbr) has no end tag.
+Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 524 Unexpected end tag (html). Ignored.
+Line: 1 Col: 524 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 531 Unexpected end tag (head). Ignored.
+Line: 1 Col: 540 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 548 This element (image) has no end tag.
+Line: 1 Col: 558 This element (isindex) has no end tag.
+Line: 1 Col: 568 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 590 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 610 Unexpected end tag (option). Ignored.
+Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 633 Unexpected end tag (textarea). Ignored.
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+| <table>
+| <tbody>
+| <tr>
+| <p>
+
+#data
+<frameset>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 1 Col: 10 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <frameset>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat
new file mode 100644
index 000000000..4f8df86f2
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat
@@ -0,0 +1,799 @@
+#data
+<!DOCTYPE html><svg></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!DOCTYPE html><svg></svg><![CDATA[a]]>
+#errors
+29: Bogus comment
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <!-- [CDATA[a]] -->
+
+#data
+<!DOCTYPE html><body><svg></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!DOCTYPE html><body><select><svg></svg></select>
+#errors
+35: Stray “svg†start tag.
+42: Stray end tag “svgâ€
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!DOCTYPE html><body><select><option><svg></svg></option></select>
+#errors
+43: Stray “svg†start tag.
+50: Stray end tag “svgâ€
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!DOCTYPE html><body><table><svg></svg></table>
+#errors
+34: Start tag “svg†seen in “tableâ€.
+41: Stray end tag “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <table>
+
+#data
+<!DOCTYPE html><body><table><svg><g>foo</g></svg></table>
+#errors
+34: Start tag “svg†seen in “tableâ€.
+46: Stray end tag “gâ€.
+53: Stray end tag “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><svg><g>foo</g><g>bar</g></svg></table>
+#errors
+34: Start tag “svg†seen in “tableâ€.
+46: Stray end tag “gâ€.
+58: Stray end tag “gâ€.
+65: Stray end tag “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><tbody><svg><g>foo</g><g>bar</g></svg></tbody></table>
+#errors
+41: Start tag “svg†seen in “tableâ€.
+53: Stray end tag “gâ€.
+65: Stray end tag “gâ€.
+72: Stray end tag “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+| <tbody>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><svg><g>foo</g><g>bar</g></svg></tr></tbody></table>
+#errors
+45: Start tag “svg†seen in “tableâ€.
+57: Stray end tag “gâ€.
+69: Stray end tag “gâ€.
+76: Stray end tag “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg></td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg><p>baz</td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g></svg><p>baz</caption></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+70: HTML start tag “p†in a foreign namespace context.
+81: “table†closed but “caption†was still open.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g>baz</table><p>quux
+#errors
+78: “table†closed but “caption†was still open.
+78: Unclosed elements on stack.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><colgroup><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+44: Start tag “svg†seen in “tableâ€.
+56: Stray end tag “gâ€.
+68: Stray end tag “gâ€.
+71: HTML start tag “p†in a foreign namespace context.
+71: Start tag “p†seen in “tableâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+| <table>
+| <colgroup>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><tr><td><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+50: Stray “svg†start tag.
+54: Stray “g†start tag.
+62: Stray end tag “gâ€
+66: Stray “g†start tag.
+74: Stray end tag “gâ€
+77: Stray “p†start tag.
+88: “table†end tag with “select†open.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| "foobarbaz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+36: Start tag “select†seen in “tableâ€.
+42: Stray “svg†start tag.
+46: Stray “g†start tag.
+54: Stray end tag “gâ€
+58: Stray “g†start tag.
+66: Stray end tag “gâ€
+69: Stray “p†start tag.
+80: “table†end tag with “select†open.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "foobarbaz"
+| <table>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body></body></html><svg><g>foo</g><g>bar</g><p>baz
+#errors
+41: Stray “svg†start tag.
+68: HTML start tag “p†in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body></body><svg><g>foo</g><g>bar</g><p>baz
+#errors
+34: Stray “svg†start tag.
+61: HTML start tag “p†in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><frameset><svg><g></g><g></g><p><span>
+#errors
+31: Stray “svg†start tag.
+35: Stray “g†start tag.
+40: Stray end tag “gâ€
+44: Stray “g†start tag.
+49: Stray end tag “gâ€
+52: Stray “p†start tag.
+58: Stray “span†start tag.
+58: End of file seen and there were open elements.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset></frameset><svg><g></g><g></g><p><span>
+#errors
+42: Stray “svg†start tag.
+46: Stray “g†start tag.
+51: Stray end tag “gâ€
+55: Stray “g†start tag.
+60: Stray end tag “gâ€
+63: Stray “p†start tag.
+69: Stray “span†start tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><body xlink:href=foo><svg xlink:href=foo></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| <svg svg>
+| xlink href="foo"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo></g></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo />bar</svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+| "bar"
+
+#data
+<svg></path>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<div><svg></div>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| "a"
+
+#data
+<div><svg><path></div>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| "a"
+
+#data
+<div><svg><path></svg><path>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <path>
+
+#data
+<div><svg><path><foreignObject><math></div>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <math math>
+| "a"
+
+#data
+<div><svg><path><foreignObject><p></div>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <p>
+| "a"
+
+#data
+<!DOCTYPE html><svg><desc><div><svg><ul>a
+#errors
+40: HTML start tag “ul†in a foreign namespace context.
+41: End of file in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg desc>
+| <div>
+| <svg svg>
+| <ul>
+| "a"
+
+#data
+<!DOCTYPE html><svg><desc><svg><ul>a
+#errors
+35: HTML start tag “ul†in a foreign namespace context.
+36: End of file in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg desc>
+| <svg svg>
+| <ul>
+| "a"
+
+#data
+<!DOCTYPE html><p><svg><desc><p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <svg svg>
+| <svg desc>
+| <p>
+
+#data
+<!DOCTYPE html><p><svg><title><p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <svg svg>
+| <svg title>
+| <p>
+
+#data
+<div><svg><path><foreignObject><p></foreignObject><p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <p>
+| <p>
+
+#data
+<math><mi><div><object><div><span></span></div></object></div></mi><mi>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <div>
+| <object>
+| <div>
+| <span>
+| <math mi>
+
+#data
+<math><mi><svg><foreignObject><div><div></div></div></foreignObject></svg></mi><mi>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <div>
+| <math mi>
+
+#data
+<svg><script></script><path>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg script>
+| <svg path>
+
+#data
+<table><svg></svg><tr>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<math><mi><mglyph>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <math mglyph>
+
+#data
+<math><mi><malignmark>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <math malignmark>
+
+#data
+<math><mo><mglyph>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mo>
+| <math mglyph>
+
+#data
+<math><mo><malignmark>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mo>
+| <math malignmark>
+
+#data
+<math><mn><mglyph>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| <math mglyph>
+
+#data
+<math><mn><malignmark>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| <math malignmark>
+
+#data
+<math><ms><mglyph>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math ms>
+| <math mglyph>
+
+#data
+<math><ms><malignmark>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math ms>
+| <math malignmark>
+
+#data
+<math><mtext><mglyph>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <math mglyph>
+
+#data
+<math><mtext><malignmark>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <math malignmark>
+
+#data
+<math><annotation-xml><svg></svg></annotation-xml><mi>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <math mi>
+
+#data
+<math><annotation-xml><svg><foreignObject><div><math><mi></mi></math><span></span></div></foreignObject><path></path></svg></annotation-xml><mi>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <math math>
+| <math mi>
+| <span>
+| <svg path>
+| <math mi>
+
+#data
+<math><annotation-xml><svg><foreignObject><math><mi><svg></svg></mi><mo></mo></math><span></span></foreignObject><path></path></svg></annotation-xml><mi>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <svg foreignObject>
+| <math math>
+| <math mi>
+| <svg svg>
+| <math mo>
+| <span>
+| <svg path>
+| <math mi>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat
new file mode 100644
index 000000000..638cde479
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat
@@ -0,0 +1,482 @@
+#data
+<!DOCTYPE html><body><svg attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentScriptType=""
+| contentStyleType=""
+| diffuseConstant=""
+| edgeMode=""
+| externalResourcesRequired=""
+| filterRes=""
+| filterUnits=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><BODY><SVG ATTRIBUTENAME='' ATTRIBUTETYPE='' BASEFREQUENCY='' BASEPROFILE='' CALCMODE='' CLIPPATHUNITS='' CONTENTSCRIPTTYPE='' CONTENTSTYLETYPE='' DIFFUSECONSTANT='' EDGEMODE='' EXTERNALRESOURCESREQUIRED='' FILTERRES='' FILTERUNITS='' GLYPHREF='' GRADIENTTRANSFORM='' GRADIENTUNITS='' KERNELMATRIX='' KERNELUNITLENGTH='' KEYPOINTS='' KEYSPLINES='' KEYTIMES='' LENGTHADJUST='' LIMITINGCONEANGLE='' MARKERHEIGHT='' MARKERUNITS='' MARKERWIDTH='' MASKCONTENTUNITS='' MASKUNITS='' NUMOCTAVES='' PATHLENGTH='' PATTERNCONTENTUNITS='' PATTERNTRANSFORM='' PATTERNUNITS='' POINTSATX='' POINTSATY='' POINTSATZ='' PRESERVEALPHA='' PRESERVEASPECTRATIO='' PRIMITIVEUNITS='' REFX='' REFY='' REPEATCOUNT='' REPEATDUR='' REQUIREDEXTENSIONS='' REQUIREDFEATURES='' SPECULARCONSTANT='' SPECULAREXPONENT='' SPREADMETHOD='' STARTOFFSET='' STDDEVIATION='' STITCHTILES='' SURFACESCALE='' SYSTEMLANGUAGE='' TABLEVALUES='' TARGETX='' TARGETY='' TEXTLENGTH='' VIEWBOX='' VIEWTARGET='' XCHANNELSELECTOR='' YCHANNELSELECTOR='' ZOOMANDPAN=''></SVG>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentScriptType=""
+| contentStyleType=""
+| diffuseConstant=""
+| edgeMode=""
+| externalResourcesRequired=""
+| filterRes=""
+| filterUnits=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><body><svg attributename='' attributetype='' basefrequency='' baseprofile='' calcmode='' clippathunits='' contentscripttype='' contentstyletype='' diffuseconstant='' edgemode='' externalresourcesrequired='' filterres='' filterunits='' glyphref='' gradienttransform='' gradientunits='' kernelmatrix='' kernelunitlength='' keypoints='' keysplines='' keytimes='' lengthadjust='' limitingconeangle='' markerheight='' markerunits='' markerwidth='' maskcontentunits='' maskunits='' numoctaves='' pathlength='' patterncontentunits='' patterntransform='' patternunits='' pointsatx='' pointsaty='' pointsatz='' preservealpha='' preserveaspectratio='' primitiveunits='' refx='' refy='' repeatcount='' repeatdur='' requiredextensions='' requiredfeatures='' specularconstant='' specularexponent='' spreadmethod='' startoffset='' stddeviation='' stitchtiles='' surfacescale='' systemlanguage='' tablevalues='' targetx='' targety='' textlength='' viewbox='' viewtarget='' xchannelselector='' ychannelselector='' zoomandpan=''></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentScriptType=""
+| contentStyleType=""
+| diffuseConstant=""
+| edgeMode=""
+| externalResourcesRequired=""
+| filterRes=""
+| filterUnits=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><body><math attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| attributename=""
+| attributetype=""
+| basefrequency=""
+| baseprofile=""
+| calcmode=""
+| clippathunits=""
+| contentscripttype=""
+| contentstyletype=""
+| diffuseconstant=""
+| edgemode=""
+| externalresourcesrequired=""
+| filterres=""
+| filterunits=""
+| glyphref=""
+| gradienttransform=""
+| gradientunits=""
+| kernelmatrix=""
+| kernelunitlength=""
+| keypoints=""
+| keysplines=""
+| keytimes=""
+| lengthadjust=""
+| limitingconeangle=""
+| markerheight=""
+| markerunits=""
+| markerwidth=""
+| maskcontentunits=""
+| maskunits=""
+| numoctaves=""
+| pathlength=""
+| patterncontentunits=""
+| patterntransform=""
+| patternunits=""
+| pointsatx=""
+| pointsaty=""
+| pointsatz=""
+| preservealpha=""
+| preserveaspectratio=""
+| primitiveunits=""
+| refx=""
+| refy=""
+| repeatcount=""
+| repeatdur=""
+| requiredextensions=""
+| requiredfeatures=""
+| specularconstant=""
+| specularexponent=""
+| spreadmethod=""
+| startoffset=""
+| stddeviation=""
+| stitchtiles=""
+| surfacescale=""
+| systemlanguage=""
+| tablevalues=""
+| targetx=""
+| targety=""
+| textlength=""
+| viewbox=""
+| viewtarget=""
+| xchannelselector=""
+| ychannelselector=""
+| zoomandpan=""
+
+#data
+<!DOCTYPE html><body><svg><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><body><svg><altglyph /><altglyphdef /><altglyphitem /><animatecolor /><animatemotion /><animatetransform /><clippath /><feblend /><fecolormatrix /><fecomponenttransfer /><fecomposite /><feconvolvematrix /><fediffuselighting /><fedisplacementmap /><fedistantlight /><feflood /><fefunca /><fefuncb /><fefuncg /><fefuncr /><fegaussianblur /><feimage /><femerge /><femergenode /><femorphology /><feoffset /><fepointlight /><fespecularlighting /><fespotlight /><fetile /><feturbulence /><foreignobject /><glyphref /><lineargradient /><radialgradient /><textpath /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><BODY><SVG><ALTGLYPH /><ALTGLYPHDEF /><ALTGLYPHITEM /><ANIMATECOLOR /><ANIMATEMOTION /><ANIMATETRANSFORM /><CLIPPATH /><FEBLEND /><FECOLORMATRIX /><FECOMPONENTTRANSFER /><FECOMPOSITE /><FECONVOLVEMATRIX /><FEDIFFUSELIGHTING /><FEDISPLACEMENTMAP /><FEDISTANTLIGHT /><FEFLOOD /><FEFUNCA /><FEFUNCB /><FEFUNCG /><FEFUNCR /><FEGAUSSIANBLUR /><FEIMAGE /><FEMERGE /><FEMERGENODE /><FEMORPHOLOGY /><FEOFFSET /><FEPOINTLIGHT /><FESPECULARLIGHTING /><FESPOTLIGHT /><FETILE /><FETURBULENCE /><FOREIGNOBJECT /><GLYPHREF /><LINEARGRADIENT /><RADIALGRADIENT /><TEXTPATH /></SVG>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><body><math><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math altglyph>
+| <math altglyphdef>
+| <math altglyphitem>
+| <math animatecolor>
+| <math animatemotion>
+| <math animatetransform>
+| <math clippath>
+| <math feblend>
+| <math fecolormatrix>
+| <math fecomponenttransfer>
+| <math fecomposite>
+| <math feconvolvematrix>
+| <math fediffuselighting>
+| <math fedisplacementmap>
+| <math fedistantlight>
+| <math feflood>
+| <math fefunca>
+| <math fefuncb>
+| <math fefuncg>
+| <math fefuncr>
+| <math fegaussianblur>
+| <math feimage>
+| <math femerge>
+| <math femergenode>
+| <math femorphology>
+| <math feoffset>
+| <math fepointlight>
+| <math fespecularlighting>
+| <math fespotlight>
+| <math fetile>
+| <math feturbulence>
+| <math foreignobject>
+| <math glyphref>
+| <math lineargradient>
+| <math radialgradient>
+| <math textpath>
+
+#data
+<!DOCTYPE html><body><svg><solidColor /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg solidcolor>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat
new file mode 100644
index 000000000..63107d277
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat
@@ -0,0 +1,62 @@
+#data
+<!DOCTYPE html><body><p>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "foo"
+| <math math>
+| <math mtext>
+| <i>
+| "baz"
+| <math annotation-xml>
+| <svg svg>
+| <svg desc>
+| <b>
+| "eggs"
+| <svg g>
+| <svg foreignObject>
+| <p>
+| "spam"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <img>
+| <svg g>
+| "quux"
+| "bar"
+
+#data
+<!DOCTYPE html><body>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "foo"
+| <math math>
+| <math mtext>
+| <i>
+| "baz"
+| <math annotation-xml>
+| <svg svg>
+| <svg desc>
+| <b>
+| "eggs"
+| <svg g>
+| <svg foreignObject>
+| <p>
+| "spam"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <img>
+| <svg g>
+| "quux"
+| "bar"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat
new file mode 100644
index 000000000..b8713f885
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat
@@ -0,0 +1,74 @@
+#data
+<!DOCTYPE html><html><body><xyz:abc></xyz:abc>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xyz:abc>
+
+#data
+<!DOCTYPE html><html><body><xyz:abc></xyz:abc><span></span>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xyz:abc>
+| <span>
+
+#data
+<!DOCTYPE html><html><html abc:def=gh><xyz:abc></xyz:abc>
+#errors
+15: Unexpected start tag html
+#document
+| <!DOCTYPE html>
+| <html>
+| abc:def="gh"
+| <head>
+| <body>
+| <xyz:abc>
+
+#data
+<!DOCTYPE html><html xml:lang=bar><html xml:lang=foo>
+#errors
+15: Unexpected start tag html
+#document
+| <!DOCTYPE html>
+| <html>
+| xml:lang="bar"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html 123=456>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| 123="456"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html 123=456><html 789=012>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| 123="456"
+| 789="012"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html><body 789=012>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| 789="012"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat
new file mode 100644
index 000000000..6ce1c0d16
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat
@@ -0,0 +1,208 @@
+#data
+<!DOCTYPE html><p><b><i><u></p> <p>X
+#errors
+Line: 1 Col: 31 Unexpected end tag (p). Ignored.
+Line: 1 Col: 36 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <i>
+| <u>
+| <b>
+| <i>
+| <u>
+| " "
+| <p>
+| "X"
+
+#data
+<p><b><i><u></p>
+<p>X
+#errors
+Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected end tag (p). Ignored.
+Line: 2 Col: 4 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <i>
+| <u>
+| <b>
+| <i>
+| <u>
+| "
+"
+| <p>
+| "X"
+
+#data
+<!doctype html></html> <head>
+#errors
+Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " "
+
+#data
+<!doctype html></body><meta>
+#errors
+Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+
+#data
+<html></html><!-- foo -->
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element.
+#document
+| <html>
+| <head>
+| <body>
+| <!-- foo -->
+
+#data
+<!doctype html></body><title>X</title>
+#errors
+Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+
+#data
+<!doctype html><table> X<meta></table>
+#errors
+Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " X"
+| <meta>
+| <table>
+
+#data
+<!doctype html><table> x</table>
+#errors
+Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x"
+| <table>
+
+#data
+<!doctype html><table> x </table>
+#errors
+Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x "
+| <table>
+
+#data
+<!doctype html><table><tr> x</table>
+#errors
+Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table>X<style> <tr>x </style> </table>
+#errors
+Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <style>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div>
+#errors
+Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode.
+Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <a>
+| "foo"
+| <table>
+| " "
+| <tbody>
+| <tr>
+| <td>
+| "bar"
+| " "
+
+#data
+<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes>
+#errors
+6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+13: Stray start tag “frameâ€.
+21: Stray end tag “frameâ€.
+29: Stray end tag “frameâ€.
+39: “frameset†start tag after “body†already open.
+105: End of file seen inside an [R]CDATA element.
+105: End of file seen and there were open elements.
+XXX: These errors are wrong, please fix me!
+#document
+| <html>
+| <head>
+| <frameset>
+| <frame>
+| <frameset>
+| <frame>
+| <noframes>
+| "</frameset><noframes>"
+
+#data
+<!DOCTYPE html><object></html>
+#errors
+1: Expected closing tag. Unexpected end of file
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <object>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat
new file mode 100644
index 000000000..c8ef66f0e
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat
@@ -0,0 +1,2299 @@
+#data
+<!doctype html><script>
+#errors
+Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script>a
+#errors
+Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "a"
+| <body>
+
+#data
+<!doctype html><script><
+#errors
+Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<"
+| <body>
+
+#data
+<!doctype html><script></
+#errors
+Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</"
+| <body>
+
+#data
+<!doctype html><script></S
+#errors
+Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</S"
+| <body>
+
+#data
+<!doctype html><script></SC
+#errors
+Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SC"
+| <body>
+
+#data
+<!doctype html><script></SCR
+#errors
+Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCR"
+| <body>
+
+#data
+<!doctype html><script></SCRI
+#errors
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRI"
+| <body>
+
+#data
+<!doctype html><script></SCRIP
+#errors
+Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRIP"
+| <body>
+
+#data
+<!doctype html><script></SCRIPT
+#errors
+Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRIPT"
+| <body>
+
+#data
+<!doctype html><script></SCRIPT
+#errors
+Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script></s
+#errors
+Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</s"
+| <body>
+
+#data
+<!doctype html><script></sc
+#errors
+Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</sc"
+| <body>
+
+#data
+<!doctype html><script></scr
+#errors
+Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scr"
+| <body>
+
+#data
+<!doctype html><script></scri
+#errors
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scri"
+| <body>
+
+#data
+<!doctype html><script></scrip
+#errors
+Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scrip"
+| <body>
+
+#data
+<!doctype html><script></script
+#errors
+Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</script"
+| <body>
+
+#data
+<!doctype html><script></script
+#errors
+Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script><!
+#errors
+Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!"
+| <body>
+
+#data
+<!doctype html><script><!a
+#errors
+Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!a"
+| <body>
+
+#data
+<!doctype html><script><!-
+#errors
+Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!-"
+| <body>
+
+#data
+<!doctype html><script><!-a
+#errors
+Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!-a"
+| <body>
+
+#data
+<!doctype html><script><!--
+#errors
+Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<!doctype html><script><!--a
+#errors
+Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--a"
+| <body>
+
+#data
+<!doctype html><script><!--<
+#errors
+Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<"
+| <body>
+
+#data
+<!doctype html><script><!--<a
+#errors
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<a"
+| <body>
+
+#data
+<!doctype html><script><!--</
+#errors
+Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--</"
+| <body>
+
+#data
+<!doctype html><script><!--</script
+#errors
+Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--</script"
+| <body>
+
+#data
+<!doctype html><script><!--</script
+#errors
+Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<!doctype html><script><!--<s
+#errors
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<s"
+| <body>
+
+#data
+<!doctype html><script><!--<script
+#errors
+Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script"
+| <body>
+
+#data
+<!doctype html><script><!--<script
+#errors
+Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script "
+| <body>
+
+#data
+<!doctype html><script><!--<script <
+#errors
+Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script <"
+| <body>
+
+#data
+<!doctype html><script><!--<script <a
+#errors
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script <a"
+| <body>
+
+#data
+<!doctype html><script><!--<script </
+#errors
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </"
+| <body>
+
+#data
+<!doctype html><script><!--<script </s
+#errors
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </s"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script
+#errors
+Line: 1 Col: 43 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script"
+| <body>
+
+#data
+<!doctype html><script><!--<script </scripta
+#errors
+Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </scripta"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script
+#errors
+Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script>
+#errors
+Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script>"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script/
+#errors
+Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script/"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script <
+#errors
+Line: 1 Col: 45 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script <a
+#errors
+Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <a"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </
+#errors
+Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script
+#errors
+Line: 1 Col: 52 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </script"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script
+#errors
+Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script/
+#errors
+Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script -
+#errors
+Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -"
+| <body>
+
+#data
+<!doctype html><script><!--<script -a
+#errors
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -a"
+| <body>
+
+#data
+<!doctype html><script><!--<script -<
+#errors
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -<"
+| <body>
+
+#data
+<!doctype html><script><!--<script --
+#errors
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --"
+| <body>
+
+#data
+<!doctype html><script><!--<script --a
+#errors
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --a"
+| <body>
+
+#data
+<!doctype html><script><!--<script --<
+#errors
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --<"
+| <body>
+
+#data
+<!doctype html><script><!--<script -->
+#errors
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --><
+#errors
+Line: 1 Col: 39 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --><"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></
+#errors
+Line: 1 Col: 40 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --></"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script
+#errors
+Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --></script"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script
+#errors
+Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script/
+#errors
+Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script><\/script>--></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script><\/script>-->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></scr'+'ipt>--></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt>-->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>--><!--</script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>--><!--"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>-- ></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>-- >"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>- -></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- ->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>- - ></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- - >"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>-></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>->"
+| <body>
+
+#data
+<!doctype html><script><!--<script>--!></script>X
+#errors
+Line: 1 Col: 49 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script>--!></script>X"
+| <body>
+
+#data
+<!doctype html><script><!--<scr'+'ipt></script>--></script>
+#errors
+Line: 1 Col: 59 Unexpected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<scr'+'ipt>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><script><!--<script></scr'+'ipt></script>X
+#errors
+Line: 1 Col: 57 Unexpected end of file. Expected end tag (script).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt></script>X"
+| <body>
+
+#data
+<!doctype html><style><!--<style></style>--></style>
+#errors
+Line: 1 Col: 52 Unexpected end tag (style).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--<style>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><style><!--</style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "X"
+
+#data
+<!doctype html><style><!--...</style>...--></style>
+#errors
+Line: 1 Col: 51 Unexpected end tag (style).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <body>
+| "...-->"
+
+#data
+<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
+| <body>
+| "X"
+
+#data
+<!doctype html><style><!--...<style><!--...--!></style>--></style>
+#errors
+Line: 1 Col: 66 Unexpected end tag (style).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--...<style><!--...--!>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><style><!--...</style><!-- --><style>@import ...</style>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <!-- -->
+| <style>
+| "@import ..."
+| <body>
+
+#data
+<!doctype html><style>...<style><!--...</style><!-- --></style>
+#errors
+Line: 1 Col: 63 Unexpected end tag (style).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "...<style><!--..."
+| <!-- -->
+| <body>
+
+#data
+<!doctype html><style>...<!--[if IE]><style>...</style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "...<!--[if IE]><style>..."
+| <body>
+| "X"
+
+#data
+<!doctype html><title><!--<title></title>--></title>
+#errors
+Line: 1 Col: 52 Unexpected end tag (title).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "<!--<title>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><title>&lt;/title></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "</title>"
+| <body>
+
+#data
+<!doctype html><title>foo/title><link></head><body>X
+#errors
+Line: 1 Col: 52 Unexpected end of file. Expected end tag (title).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "foo/title><link></head><body>X"
+| <body>
+
+#data
+<!doctype html><noscript><!--<noscript></noscript>--></noscript>
+#errors
+Line: 1 Col: 64 Unexpected end tag (noscript).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<!--<noscript>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "X"
+| <noscript>
+| "-->"
+
+#data
+<!doctype html><noscript><iframe></noscript>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<iframe>"
+| <body>
+| "X"
+
+#data
+<!doctype html><noframes><!--<noframes></noframes>--></noframes>
+#errors
+Line: 1 Col: 64 Unexpected end tag (noframes).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noframes>
+| "<!--<noframes>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><noframes><body><script><!--...</script></body></noframes></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noframes>
+| "<body><script><!--...</script></body>"
+| <body>
+
+#data
+<!doctype html><textarea><!--<textarea></textarea>--></textarea>
+#errors
+Line: 1 Col: 64 Unexpected end tag (textarea).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--<textarea>"
+| "-->"
+
+#data
+<!doctype html><textarea>&lt;/textarea></textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "</textarea>"
+
+#data
+<!doctype html><textarea>&lt;</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<"
+
+#data
+<!doctype html><textarea>a&lt;b</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "a<b"
+
+#data
+<!doctype html><iframe><!--<iframe></iframe>--></iframe>
+#errors
+Line: 1 Col: 56 Unexpected end tag (iframe).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "<!--<iframe>"
+| "-->"
+
+#data
+<!doctype html><iframe>...<!--X->...<!--/X->...</iframe>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "...<!--X->...<!--/X->..."
+
+#data
+<!doctype html><xmp><!--<xmp></xmp>--></xmp>
+#errors
+Line: 1 Col: 44 Unexpected end tag (xmp).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xmp>
+| "<!--<xmp>"
+| "-->"
+
+#data
+<!doctype html><noembed><!--<noembed></noembed>--></noembed>
+#errors
+Line: 1 Col: 60 Unexpected end tag (noembed).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <noembed>
+| "<!--<noembed>"
+| "-->"
+
+#data
+<script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 8 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script>a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "a"
+| <body>
+
+#data
+<script><
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<"
+| <body>
+
+#data
+<script></
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</"
+| <body>
+
+#data
+<script></S
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</S"
+| <body>
+
+#data
+<script></SC
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</SC"
+| <body>
+
+#data
+<script></SCR
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</SCR"
+| <body>
+
+#data
+<script></SCRI
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRI"
+| <body>
+
+#data
+<script></SCRIP
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRIP"
+| <body>
+
+#data
+<script></SCRIPT
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRIPT"
+| <body>
+
+#data
+<script></SCRIPT
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script></s
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</s"
+| <body>
+
+#data
+<script></sc
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</sc"
+| <body>
+
+#data
+<script></scr
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</scr"
+| <body>
+
+#data
+<script></scri
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</scri"
+| <body>
+
+#data
+<script></scrip
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</scrip"
+| <body>
+
+#data
+<script></script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</script"
+| <body>
+
+#data
+<script></script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script><!
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!"
+| <body>
+
+#data
+<script><!a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!a"
+| <body>
+
+#data
+<script><!-
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!-"
+| <body>
+
+#data
+<script><!-a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!-a"
+| <body>
+
+#data
+<script><!--
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<script><!--a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--a"
+| <body>
+
+#data
+<script><!--<
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<"
+| <body>
+
+#data
+<script><!--<a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<a"
+| <body>
+
+#data
+<script><!--</
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--</"
+| <body>
+
+#data
+<script><!--</script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--</script"
+| <body>
+
+#data
+<script><!--</script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<script><!--<s
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<s"
+| <body>
+
+#data
+<script><!--<script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 19 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script"
+| <body>
+
+#data
+<script><!--<script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script "
+| <body>
+
+#data
+<script><!--<script <
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script <"
+| <body>
+
+#data
+<script><!--<script <a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script <a"
+| <body>
+
+#data
+<script><!--<script </
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </"
+| <body>
+
+#data
+<script><!--<script </s
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </s"
+| <body>
+
+#data
+<script><!--<script </script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script"
+| <body>
+
+#data
+<script><!--<script </scripta
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </scripta"
+| <body>
+
+#data
+<script><!--<script </script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script>"
+| <body>
+
+#data
+<script><!--<script </script/
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script/"
+| <body>
+
+#data
+<script><!--<script </script <
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <"
+| <body>
+
+#data
+<script><!--<script </script <a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <a"
+| <body>
+
+#data
+<script><!--<script </script </
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </"
+| <body>
+
+#data
+<script><!--<script </script </script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </script"
+| <body>
+
+#data
+<script><!--<script </script </script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script </script/
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script </script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script -
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -"
+| <body>
+
+#data
+<script><!--<script -a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -a"
+| <body>
+
+#data
+<script><!--<script --
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --"
+| <body>
+
+#data
+<script><!--<script --a
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --a"
+| <body>
+
+#data
+<script><!--<script -->
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --><
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --><"
+| <body>
+
+#data
+<script><!--<script --></
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --></"
+| <body>
+
+#data
+<script><!--<script --></script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --></script"
+| <body>
+
+#data
+<script><!--<script --></script
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --></script/
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script><\/script>--></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script><\/script>-->"
+| <body>
+
+#data
+<script><!--<script></scr'+'ipt>--></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt>-->"
+| <body>
+
+#data
+<script><!--<script></script><script></script></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>"
+| <body>
+
+#data
+<script><!--<script></script><script></script>--><!--</script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>--><!--"
+| <body>
+
+#data
+<script><!--<script></script><script></script>-- ></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>-- >"
+| <body>
+
+#data
+<script><!--<script></script><script></script>- -></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- ->"
+| <body>
+
+#data
+<script><!--<script></script><script></script>- - ></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- - >"
+| <body>
+
+#data
+<script><!--<script></script><script></script>-></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>->"
+| <body>
+
+#data
+<script><!--<script>--!></script>X
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script>--!></script>X"
+| <body>
+
+#data
+<script><!--<scr'+'ipt></script>--></script>
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 44 Unexpected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<scr'+'ipt>"
+| <body>
+| "-->"
+
+#data
+<script><!--<script></scr'+'ipt></script>X
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 42 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt></script>X"
+| <body>
+
+#data
+<style><!--<style></style>--></style>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 37 Unexpected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| "<!--<style>"
+| <body>
+| "-->"
+
+#data
+<style><!--</style>X
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "X"
+
+#data
+<style><!--...</style>...--></style>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 36 Unexpected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <body>
+| "...-->"
+
+#data
+<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
+| <body>
+| "X"
+
+#data
+<style><!--...<style><!--...--!></style>--></style>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 51 Unexpected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| "<!--...<style><!--...--!>"
+| <body>
+| "-->"
+
+#data
+<style><!--...</style><!-- --><style>@import ...</style>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <!-- -->
+| <style>
+| "@import ..."
+| <body>
+
+#data
+<style>...<style><!--...</style><!-- --></style>
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 48 Unexpected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| "...<style><!--..."
+| <!-- -->
+| <body>
+
+#data
+<style>...<!--[if IE]><style>...</style>X
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| "...<!--[if IE]><style>..."
+| <body>
+| "X"
+
+#data
+<title><!--<title></title>--></title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+Line: 1 Col: 37 Unexpected end tag (title).
+#document
+| <html>
+| <head>
+| <title>
+| "<!--<title>"
+| <body>
+| "-->"
+
+#data
+<title>&lt;/title></title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <title>
+| "</title>"
+| <body>
+
+#data
+<title>foo/title><link></head><body>X
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+Line: 1 Col: 37 Unexpected end of file. Expected end tag (title).
+#document
+| <html>
+| <head>
+| <title>
+| "foo/title><link></head><body>X"
+| <body>
+
+#data
+<noscript><!--<noscript></noscript>--></noscript>
+#errors
+Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
+Line: 1 Col: 49 Unexpected end tag (noscript).
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--<noscript>"
+| <body>
+| "-->"
+
+#data
+<noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "X"
+| <noscript>
+| "-->"
+
+#data
+<noscript><iframe></noscript>X
+#errors
+Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <noscript>
+| "<iframe>"
+| <body>
+| "X"
+
+#data
+<noframes><!--<noframes></noframes>--></noframes>
+#errors
+Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
+Line: 1 Col: 49 Unexpected end tag (noframes).
+#document
+| <html>
+| <head>
+| <noframes>
+| "<!--<noframes>"
+| <body>
+| "-->"
+
+#data
+<noframes><body><script><!--...</script></body></noframes></html>
+#errors
+Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <noframes>
+| "<body><script><!--...</script></body>"
+| <body>
+
+#data
+<textarea><!--<textarea></textarea>--></textarea>
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+Line: 1 Col: 49 Unexpected end tag (textarea).
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--<textarea>"
+| "-->"
+
+#data
+<textarea>&lt;/textarea></textarea>
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "</textarea>"
+
+#data
+<iframe><!--<iframe></iframe>--></iframe>
+#errors
+Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
+Line: 1 Col: 41 Unexpected end tag (iframe).
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "<!--<iframe>"
+| "-->"
+
+#data
+<iframe>...<!--X->...<!--/X->...</iframe>
+#errors
+Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "...<!--X->...<!--/X->..."
+
+#data
+<xmp><!--<xmp></xmp>--></xmp>
+#errors
+Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE.
+Line: 1 Col: 29 Unexpected end tag (xmp).
+#document
+| <html>
+| <head>
+| <body>
+| <xmp>
+| "<!--<xmp>"
+| "-->"
+
+#data
+<noembed><!--<noembed></noembed>--></noembed>
+#errors
+Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE.
+Line: 1 Col: 45 Unexpected end tag (noembed).
+#document
+| <html>
+| <head>
+| <body>
+| <noembed>
+| "<!--<noembed>"
+| "-->"
+
+#data
+<!doctype html><table>
+
+#errors
+Line 2 Col 0 Unexpected end of file. Expected table content.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| "
+"
+
+#data
+<!doctype html><table><td><span><font></span><span>
+#errors
+Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase.
+Line 1 Col 45 Unexpected end tag (span).
+Line 1 Col 51 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <span>
+| <font>
+| <font>
+| <span>
+
+#data
+<!doctype html><form><table></form><form></table></form>
+#errors
+35: Stray end tag “formâ€.
+41: Start tag “form†seen in “tableâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <table>
+| <form>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat
new file mode 100644
index 000000000..7b555f888
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat
@@ -0,0 +1,153 @@
+#data
+<!doctype html><table><tbody><select><tr>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><tr><select><td>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<!doctype html><table><tr><td><select><td>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| <td>
+
+#data
+<!doctype html><table><tr><th><select><td>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <th>
+| <select>
+| <td>
+
+#data
+<!doctype html><table><caption><select><tr>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <select>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><select><tr>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><td>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><th>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><tbody>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><thead>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><tfoot>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><caption>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><table><tr></table>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| "a"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat
new file mode 100644
index 000000000..680e1f068
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat
@@ -0,0 +1,269 @@
+#data
+<!doctype html><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+
+#data
+<!doctype html><table><tbody><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+| <tbody>
+
+#data
+<!doctype html><table><tbody><tr><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><tbody><tr><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><td><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><caption><plaintext></plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><tr><style></script></style>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+| <style>
+| "</script>"
+
+#data
+<!doctype html><table><tr><script></style></script>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+| <script>
+| "</style>"
+
+#data
+<!doctype html><table><caption><style></script></style>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <style>
+| "</script>"
+| "abc"
+
+#data
+<!doctype html><table><td><style></script></style>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <style>
+| "</script>"
+| "abc"
+
+#data
+<!doctype html><select><script></style></script>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+
+#data
+<!doctype html><table><select><script></style></script>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+| <table>
+
+#data
+<!doctype html><table><tr><select><script></style></script>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><frameset></frameset><noframes>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+
+#data
+<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+| <!-- abc -->
+
+#data
+<!doctype html><frameset></frameset></html><noframes>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+
+#data
+<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+| <!-- abc -->
+
+#data
+<!doctype html><table><tr></tbody><tfoot>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <tfoot>
+
+#data
+<!doctype html><table><td><svg></svg>abc<td>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| "abc"
+| <td>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat
new file mode 100644
index 000000000..0d62f5a5b
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat
@@ -0,0 +1,1237 @@
+#data
+<!doctype html><math><mn DefinitionUrl="foo">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| definitionURL="foo"
+
+#data
+<!doctype html><html></p><!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <!-- foo -->
+| <head>
+| <body>
+
+#data
+<!doctype html><head></head></p><!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <!-- foo -->
+| <body>
+
+#data
+<!doctype html><body><p><pre>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <pre>
+
+#data
+<!doctype html><body><p><listing>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <listing>
+
+#data
+<!doctype html><p><plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <plaintext>
+
+#data
+<!doctype html><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <h1>
+
+#data
+<!doctype html><form><isindex>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+
+#data
+<!doctype html><isindex action="POST">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| action="POST"
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><isindex prompt="this is isindex">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "this is isindex"
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><isindex type="hidden">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| type="hidden"
+| <hr>
+
+#data
+<!doctype html><isindex name="foo">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><ruby><p><rp>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <p>
+| <rp>
+
+#data
+<!doctype html><ruby><div><span><rp>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <span>
+| <rp>
+
+#data
+<!doctype html><ruby><div><p><rp>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <p>
+| <rp>
+
+#data
+<!doctype html><ruby><p><rt>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <p>
+| <rt>
+
+#data
+<!doctype html><ruby><div><span><rt>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <span>
+| <rt>
+
+#data
+<!doctype html><ruby><div><p><rt>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <p>
+| <rt>
+
+#data
+<!doctype html><math/><foo>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <foo>
+
+#data
+<!doctype html><svg/><foo>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <foo>
+
+#data
+<!doctype html><div></body><!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- foo -->
+
+#data
+<!doctype html><h1><div><h3><span></h1>foo
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <h1>
+| <div>
+| <h3>
+| <span>
+| "foo"
+
+#data
+<!doctype html><p></h3>foo
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "foo"
+
+#data
+<!doctype html><h3><li>abc</h2>foo
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <h3>
+| <li>
+| "abc"
+| "foo"
+
+#data
+<!doctype html><table>abc<!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <!-- foo -->
+
+#data
+<!doctype html><table> <!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <!-- foo -->
+
+#data
+<!doctype html><table> b <!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " b "
+| <table>
+| <!-- foo -->
+
+#data
+<!doctype html><select><option><option>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <option>
+
+#data
+<!doctype html><select><option></optgroup>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!doctype html><select><option></optgroup>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!doctype html><p><math><mi><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mi>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mo><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mo>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mn><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mn>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><ms><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math ms>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mtext><p><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mtext>
+| <p>
+| <h1>
+
+#data
+<!doctype html><frameset></noframes>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html c=d><body></html><html a=b>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <body>
+
+#data
+<!doctype html><html c=d><frameset></frameset></html><html a=b>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html><!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <!-- foo -->
+
+#data
+<!doctype html><html><frameset></frameset></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!doctype html><html><frameset></frameset></html>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html><p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html></p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<html><frameset></frameset></html><!doctype html>
+#errors
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><body><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html><p><frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><p>a<frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "a"
+
+#data
+<!doctype html><p> <frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><pre><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+
+#data
+<!doctype html><listing><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <listing>
+
+#data
+<!doctype html><li><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <li>
+
+#data
+<!doctype html><dd><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dd>
+
+#data
+<!doctype html><dt><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dt>
+
+#data
+<!doctype html><button><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <button>
+
+#data
+<!doctype html><applet><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <applet>
+
+#data
+<!doctype html><marquee><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <marquee>
+
+#data
+<!doctype html><object><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <object>
+
+#data
+<!doctype html><table><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+
+#data
+<!doctype html><area><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <area>
+
+#data
+<!doctype html><basefont><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <basefont>
+| <frameset>
+
+#data
+<!doctype html><bgsound><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <bgsound>
+| <frameset>
+
+#data
+<!doctype html><br><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<!doctype html><embed><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <embed>
+
+#data
+<!doctype html><img><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+
+#data
+<!doctype html><input><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+
+#data
+<!doctype html><keygen><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <keygen>
+
+#data
+<!doctype html><wbr><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <wbr>
+
+#data
+<!doctype html><hr><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <hr>
+
+#data
+<!doctype html><textarea></textarea><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+
+#data
+<!doctype html><xmp></xmp><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xmp>
+
+#data
+<!doctype html><iframe></iframe><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+
+#data
+<!doctype html><select></select><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><svg></svg><frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><math></math><frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><svg><foreignObject><div> <frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><svg>a</svg><frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "a"
+
+#data
+<!doctype html><svg> </svg><frameset><frame>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<html>aaa<frameset></frameset>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "aaa"
+
+#data
+<html> a <frameset></frameset>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "a "
+
+#data
+<!doctype html><div><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><div><body><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<!doctype html><p><math></p>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| "a"
+
+#data
+<!doctype html><p><math><mn><span></p>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mn>
+| <span>
+| <p>
+| "a"
+
+#data
+<!doctype html><math></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!doctype html><meta charset="ascii">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| charset="ascii"
+| <body>
+
+#data
+<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| content="text/html;charset=ascii"
+| http-equiv="content-type"
+| <body>
+
+#data
+<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -->
+| <meta>
+| charset="utf8"
+| <body>
+
+#data
+<!doctype html><html a=b><head></head><html c=d>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <body>
+
+#data
+<!doctype html><image/>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+
+#data
+<!doctype html>a<i>b<table>c<b>d</i>e</b>f
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "a"
+| <i>
+| "bc"
+| <b>
+| "de"
+| "f"
+| <table>
+
+#data
+<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+| <table>
+
+#data
+<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+
+#data
+<!doctype html><table><i>a<b>b<div>c</i>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <i>
+| "c"
+| <table>
+
+#data
+<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+| <table>
+
+#data
+<!doctype html><table><i>a<div>b<tr>c<b>d</i>e
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <div>
+| "b"
+| <i>
+| "c"
+| <b>
+| "d"
+| <b>
+| "e"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><td><table><i>a<div>b<b>c</i>d
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+| "a"
+| <div>
+| <i>
+| "b"
+| <b>
+| "c"
+| <b>
+| "d"
+| <table>
+
+#data
+<!doctype html><body><bgsound>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <bgsound>
+
+#data
+<!doctype html><body><basefont>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <basefont>
+
+#data
+<!doctype html><a><b></a><basefont>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <basefont>
+
+#data
+<!doctype html><a><b></a><bgsound>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <bgsound>
+
+#data
+<!doctype html><figcaption><article></figcaption>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <figcaption>
+| <article>
+| "a"
+
+#data
+<!doctype html><summary><article></summary>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <summary>
+| <article>
+| "a"
+
+#data
+<!doctype html><p><a><plaintext>b
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <a>
+| <plaintext>
+| <a>
+| "b"
+
+#data
+<!DOCTYPE html><div>a<a></div>b<p>c</p>d
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| "a"
+| <a>
+| <a>
+| "b"
+| <p>
+| "c"
+| "d"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat
new file mode 100644
index 000000000..60d859221
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat
@@ -0,0 +1,763 @@
+#data
+<!DOCTYPE html>Test
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<textarea>test</div>test
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+Line: 1 Col: 24 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "test</div>test"
+
+#data
+<table><td>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 11 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><td>test</tbody></table>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "test"
+
+#data
+<frame>test
+#errors
+Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE.
+Line: 1 Col: 7 Unexpected start tag frame. Ignored.
+#document
+| <html>
+| <head>
+| <body>
+| "test"
+
+#data
+<!DOCTYPE html><frameset>test
+#errors
+Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored.
+Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset><!DOCTYPE html>
+#errors
+Line: 1 Col: 40 Unexpected DOCTYPE. Ignored.
+Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><font><p><b>test</font>
+#errors
+Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <font>
+| <b>
+| "test"
+
+#data
+<!DOCTYPE html><dt><div><dd>
+#errors
+Line: 1 Col: 28 Missing end tag (div, dt).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dt>
+| <div>
+| <dd>
+
+#data
+<script></x
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
+#document
+| <html>
+| <head>
+| <script>
+| "</x"
+| <body>
+
+#data
+<table><plaintext><td>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode.
+Line: 1 Col: 22 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "<td>"
+| <table>
+
+#data
+<plaintext></plaintext>
+#errors
+Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE.
+Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!DOCTYPE html><table><tr>TEST
+#errors
+Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 30 Unexpected end of file. Expected table content.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "TEST"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4>
+#errors
+Line: 1 Col: 37 Unexpected start tag (body).
+Line: 1 Col: 53 Unexpected start tag (body).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| t1="1"
+| t2="2"
+| t3="3"
+| t4="4"
+
+#data
+</b test
+#errors
+Line: 1 Col: 8 Unexpected end of file in attribute name.
+Line: 1 Col: 8 End tag contains unexpected attributes.
+Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE.
+Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html></b test<b &=&amp>X
+#errors
+Line: 1 Col: 32 Named entity didn't end with ';'.
+Line: 1 Col: 33 End tag contains unexpected attributes.
+Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+
+#data
+<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+Line: 1 Col: 54 Unexpected end of file in the tag name.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| type="text/x-foobar;baz"
+| "X</SCRipt"
+| <body>
+
+#data
+&
+#errors
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&"
+
+#data
+&#
+#errors
+Line: 1 Col: 1 Numeric entity expected. Got end of file instead.
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&#"
+
+#data
+&#X
+#errors
+Line: 1 Col: 3 Numeric entity expected but none found.
+Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&#X"
+
+#data
+&#x
+#errors
+Line: 1 Col: 3 Numeric entity expected but none found.
+Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&#x"
+
+#data
+&#45
+#errors
+Line: 1 Col: 4 Numeric entity didn't end with ';'.
+Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "-"
+
+#data
+&x-test
+#errors
+Line: 1 Col: 1 Named entity expected. Got none.
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&x-test"
+
+#data
+<!doctypehtml><p><li>
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <li>
+
+#data
+<!doctypehtml><p><dt>
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <dt>
+
+#data
+<!doctypehtml><p><dd>
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <dd>
+
+#data
+<!doctypehtml><p><form>
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <form>
+
+#data
+<!DOCTYPE html><p></P>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "X"
+
+#data
+&AMP
+#errors
+Line: 1 Col: 4 Named entity didn't end with ';'.
+Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&"
+
+#data
+&AMp;
+#errors
+Line: 1 Col: 1 Named entity expected. Got none.
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "&AMp;"
+
+#data
+<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY>
+#errors
+Line: 1 Col: 110 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly>
+
+#data
+<!DOCTYPE html>X</body>X
+#errors
+Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "XX"
+
+#data
+<!DOCTYPE html><!-- X
+#errors
+Line: 1 Col: 21 Unexpected end of file in comment.
+#document
+| <!DOCTYPE html>
+| <!-- X -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><table><caption>test TEST</caption><td>test
+#errors
+Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 58 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| "test TEST"
+| <tbody>
+| <tr>
+| <td>
+| "test"
+
+#data
+<!DOCTYPE html><select><option><optgroup>
+#errors
+Line: 1 Col: 41 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <optgroup>
+
+#data
+<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option>
+#errors
+Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag.
+Line: 1 Col: 76 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+| <option>
+| <option>
+| <option>
+
+#data
+<!DOCTYPE html><select><optgroup><option><optgroup>
+#errors
+Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+| <option>
+| <optgroup>
+
+#data
+<!DOCTYPE html><datalist><option>foo</datalist>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <datalist>
+| <option>
+| "foo"
+| "bar"
+
+#data
+<!DOCTYPE html><font><input><input></font>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <input>
+| <input>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX -->
+#errors
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX
+#errors
+Line: 1 Col: 29 Unexpected end of file in comment (-)
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX - XXX -->
+#errors
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<isindex test=x name=x>
+#errors
+Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected start tag isindex. Don't use it!
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| test="x"
+| <hr>
+
+#data
+test
+test
+#errors
+Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "test
+test"
+
+#data
+<!DOCTYPE html><body><title>test</body></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "test</body>"
+
+#data
+<!DOCTYPE html><body><title>X</title><meta name=z><link rel=foo><style>
+x { content:"</style" } </style>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+| <meta>
+| name="z"
+| <link>
+| rel="foo"
+| <style>
+| "
+x { content:"</style" } "
+
+#data
+<!DOCTYPE html><select><optgroup></optgroup></select>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+
+#data
+
+
+#errors
+Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html> <html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><script>
+</script> <title>x</title> </head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "
+"
+| " "
+| <title>
+| "x"
+| " "
+| <body>
+
+#data
+<!DOCTYPE html><html><body><html id=x>
+#errors
+Line: 1 Col: 38 html needs to be the first start tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html>X</body><html id="x">
+#errors
+Line: 1 Col: 36 Unexpected start tag token (html) in the after body phase.
+Line: 1 Col: 36 html needs to be the first start tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+| "X"
+
+#data
+<!DOCTYPE html><head><html id=x>
+#errors
+Line: 1 Col: 32 html needs to be the first start tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html>X</html>X
+#errors
+Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "XX"
+
+#data
+<!DOCTYPE html>X</html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X "
+
+#data
+<!DOCTYPE html>X</html><p>X
+#errors
+Line: 1 Col: 26 Unexpected start tag (p).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <p>
+| "X"
+
+#data
+<!DOCTYPE html>X<p/x/y/z>
+#errors
+Line: 1 Col: 19 Expected a > after the /.
+Line: 1 Col: 21 Solidus (/) incorrectly placed in tag.
+Line: 1 Col: 23 Solidus (/) incorrectly placed in tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <p>
+| x=""
+| y=""
+| z=""
+
+#data
+<!DOCTYPE html><!--x--
+#errors
+Line: 1 Col: 22 Unexpected end of file in comment (--).
+#document
+| <!DOCTYPE html>
+| <!-- x -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><table><tr><td></p></table>
+#errors
+Line: 1 Col: 34 Unexpected end tag (p). Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <p>
+
+#data
+<!DOCTYPE <!DOCTYPE HTML>><!--<!--x-->-->
+#errors
+Line: 1 Col: 20 Expected space or '>'. Got ''
+Line: 1 Col: 25 Erroneous DOCTYPE.
+Line: 1 Col: 35 Unexpected character in comment found.
+#document
+| <!DOCTYPE <!doctype>
+| <html>
+| <head>
+| <body>
+| ">"
+| <!-- <!--x -->
+| "-->"
+
+#data
+<!doctype html><div><form></form><div></div></div>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <form>
+| <div>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat
new file mode 100644
index 000000000..6bd825608
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat
@@ -0,0 +1,455 @@
+#data
+<!doctype html><p><button><button>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <button>
+
+#data
+<!doctype html><p><button><address>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <address>
+
+#data
+<!doctype html><p><button><blockquote>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <blockquote>
+
+#data
+<!doctype html><p><button><menu>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <menu>
+
+#data
+<!doctype html><p><button><p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <p>
+
+#data
+<!doctype html><p><button><ul>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <ul>
+
+#data
+<!doctype html><p><button><h1>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <h1>
+
+#data
+<!doctype html><p><button><h6>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <h6>
+
+#data
+<!doctype html><p><button><listing>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <listing>
+
+#data
+<!doctype html><p><button><pre>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <pre>
+
+#data
+<!doctype html><p><button><form>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <form>
+
+#data
+<!doctype html><p><button><li>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <li>
+
+#data
+<!doctype html><p><button><dd>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <dd>
+
+#data
+<!doctype html><p><button><dt>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <dt>
+
+#data
+<!doctype html><p><button><plaintext>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <plaintext>
+
+#data
+<!doctype html><p><button><table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <table>
+
+#data
+<!doctype html><p><button><hr>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <hr>
+
+#data
+<!doctype html><p><button><xmp>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <xmp>
+
+#data
+<!doctype html><p><button></p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <p>
+
+#data
+<!doctype html><address><button></address>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <address>
+| <button>
+| "a"
+
+#data
+<!doctype html><address><button></address>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <address>
+| <button>
+| "a"
+
+#data
+<p><table></p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <p>
+| <table>
+
+#data
+<!doctype html><svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!doctype html><p><figcaption>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <figcaption>
+
+#data
+<!doctype html><p><summary>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <summary>
+
+#data
+<!doctype html><form><table><form>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <table>
+
+#data
+<!doctype html><table><form><form>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <form>
+
+#data
+<!doctype html><table><form></table><form>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <form>
+
+#data
+<!doctype html><svg><foreignObject><p>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+
+#data
+<!doctype html><svg><title>abc
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| "abc"
+
+#data
+<option><span><option>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <option>
+| <span>
+| <option>
+
+#data
+<option><option>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <option>
+| <option>
+
+#data
+<math><annotation-xml><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <div>
+
+#data
+<math><annotation-xml encoding="application/svg+xml"><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="application/svg+xml"
+| <div>
+
+#data
+<math><annotation-xml encoding="application/xhtml+xml"><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="application/xhtml+xml"
+| <div>
+
+#data
+<math><annotation-xml encoding="aPPlication/xhtmL+xMl"><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="aPPlication/xhtmL+xMl"
+| <div>
+
+#data
+<math><annotation-xml encoding="text/html"><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="text/html"
+| <div>
+
+#data
+<math><annotation-xml encoding="Text/htmL"><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="Text/htmL"
+| <div>
+
+#data
+<math><annotation-xml encoding=" text/html "><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding=" text/html "
+| <div>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat
new file mode 100644
index 000000000..1260ec03e
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat
@@ -0,0 +1,221 @@
+#data
+<svg><![CDATA[foo]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<math><![CDATA[foo]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| "foo"
+
+#data
+<div><![CDATA[foo]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- [CDATA[foo]] -->
+
+#data
+<svg><![CDATA[foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<svg><![CDATA[foo
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<svg><![CDATA[
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<svg><![CDATA[]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<svg><![CDATA[]] >]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]] >"
+
+#data
+<svg><![CDATA[]] >]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]] >"
+
+#data
+<svg><![CDATA[]]
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]]"
+
+#data
+<svg><![CDATA[]
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]"
+
+#data
+<svg><![CDATA[]>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]>a"
+
+#data
+<svg><foreignObject><div><![CDATA[foo]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <!-- [CDATA[foo]] -->
+
+#data
+<svg><![CDATA[<svg>]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+
+#data
+<svg><![CDATA[</svg>a]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "</svg>a"
+
+#data
+<svg><![CDATA[<svg>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>a"
+
+#data
+<svg><![CDATA[</svg>a
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "</svg>a"
+
+#data
+<svg><![CDATA[<svg>]]><path>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+| <svg path>
+
+#data
+<svg><![CDATA[<svg>]]></path>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+
+#data
+<svg><![CDATA[<svg>]]><!--path-->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+| <!-- path -->
+
+#data
+<svg><![CDATA[<svg>]]>path
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>path"
+
+#data
+<svg><![CDATA[<!--svg-->]]>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<!--svg-->"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat
new file mode 100644
index 000000000..aab27b2e9
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat
@@ -0,0 +1,157 @@
+#data
+<a><b><big><em><strong><div>X</a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <big>
+| <em>
+| <strong>
+| <big>
+| <em>
+| <strong>
+| <div>
+| <a>
+| "X"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8>A</a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| "A"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9>A</a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| <div>
+| id="9"
+| "A"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9><div id=10>A</a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| <div>
+| id="9"
+| <div>
+| id="10"
+| "A"
+
+#data
+<cite><b><cite><i><cite><i><cite><i><div>X</b>TEST
+#errors
+Line: 1 Col: 6 Unexpected start tag (cite). Expected DOCTYPE.
+Line: 1 Col: 46 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 50 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <cite>
+| <b>
+| <cite>
+| <i>
+| <cite>
+| <i>
+| <cite>
+| <i>
+| <i>
+| <i>
+| <div>
+| <b>
+| "X"
+| "TEST"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat
new file mode 100644
index 000000000..34d2a73f1
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat
@@ -0,0 +1,155 @@
+#data
+<p><font size=4><font color=red><font size=4><font size=4><font size=4><font size=4><font size=4><font color=red><p>X
+#errors
+3: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+116: Unclosed elements.
+117: End of file seen and there were open elements.
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| color="red"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| color="red"
+| <p>
+| <font>
+| color="red"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| color="red"
+| "X"
+
+#data
+<p><font size=4><font size=4><font size=4><font size=4><p>X
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><font size=4><font size=4><font size=4><font size="5"><font size=4><p>X
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="5"
+| <font>
+| size="4"
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="5"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><font size=4 id=a><font size=4 id=b><font size=4><font size=4><p>X
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| id="a"
+| size="4"
+| <font>
+| id="b"
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <p>
+| <font>
+| id="a"
+| size="4"
+| <font>
+| id="b"
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><b id=a><b id=a><b id=a><b><object><b id=a><b id=a>X</object><p>Y
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| <object>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| "X"
+| <p>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| "Y"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat
new file mode 100644
index 000000000..f6dc7eb48
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat
@@ -0,0 +1,79 @@
+#data
+<!DOCTYPE html>&NotEqualTilde;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "≂̸"
+
+#data
+<!DOCTYPE html>&NotEqualTilde;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "≂̸A"
+
+#data
+<!DOCTYPE html>&ThickSpace;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "âŸâ€Š"
+
+#data
+<!DOCTYPE html>&ThickSpace;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "âŸâ€ŠA"
+
+#data
+<!DOCTYPE html>&NotSubset;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "⊂⃒"
+
+#data
+<!DOCTYPE html>&NotSubset;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "⊂⃒A"
+
+#data
+<!DOCTYPE html>&Gopf;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "ð”¾"
+
+#data
+<!DOCTYPE html>&Gopf;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "ð”¾A"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat
new file mode 100644
index 000000000..00de7295b
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat
@@ -0,0 +1,219 @@
+#data
+<!DOCTYPE html><body><foo>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <foo>
+| "A"
+
+#data
+<!DOCTYPE html><body><area>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <area>
+| "A"
+
+#data
+<!DOCTYPE html><body><base>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <base>
+| "A"
+
+#data
+<!DOCTYPE html><body><basefont>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <basefont>
+| "A"
+
+#data
+<!DOCTYPE html><body><bgsound>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <bgsound>
+| "A"
+
+#data
+<!DOCTYPE html><body><br>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <br>
+| "A"
+
+#data
+<!DOCTYPE html><body><col>A
+#errors
+26: Stray start tag “colâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+
+#data
+<!DOCTYPE html><body><command>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <command>
+| "A"
+
+#data
+<!DOCTYPE html><body><embed>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <embed>
+| "A"
+
+#data
+<!DOCTYPE html><body><frame>A
+#errors
+26: Stray start tag “frameâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+
+#data
+<!DOCTYPE html><body><hr>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <hr>
+| "A"
+
+#data
+<!DOCTYPE html><body><img>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+| "A"
+
+#data
+<!DOCTYPE html><body><input>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| "A"
+
+#data
+<!DOCTYPE html><body><keygen>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <keygen>
+| "A"
+
+#data
+<!DOCTYPE html><body><link>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <link>
+| "A"
+
+#data
+<!DOCTYPE html><body><meta>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+| "A"
+
+#data
+<!DOCTYPE html><body><param>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <param>
+| "A"
+
+#data
+<!DOCTYPE html><body><source>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <source>
+| "A"
+
+#data
+<!DOCTYPE html><body><track>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <track>
+| "A"
+
+#data
+<!DOCTYPE html><body><wbr>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <wbr>
+| "A"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat
new file mode 100644
index 000000000..fae11ffdf
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat
@@ -0,0 +1,313 @@
+#data
+<!DOCTYPE html><body><a href='#1'><nobr>1<nobr></a><br><a href='#2'><nobr>2<nobr></a><br><a href='#3'><nobr>3<nobr></a>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| href="#1"
+| <nobr>
+| "1"
+| <nobr>
+| <nobr>
+| <br>
+| <a>
+| href="#2"
+| <a>
+| href="#2"
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| <br>
+| <a>
+| href="#3"
+| <a>
+| href="#3"
+| <nobr>
+| "3"
+| <nobr>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr></b><i><nobr>2<nobr></i>3
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+| <table>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<table><tr><td><nobr></b><i><nobr>2<nobr></i>3
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<div><nobr></b><i><nobr>2<nobr></i>3
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <div>
+| <b>
+| <nobr>
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr></b><div><i><nobr>2<nobr></i>3
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <div>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr><ins></b><i><nobr>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <ins>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<ins><nobr></b><i>2
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <ins>
+| <nobr>
+| <nobr>
+| <i>
+| "2"
+
+#data
+<!DOCTYPE html><body><b>1<nobr></b><i><nobr>2</i>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| "1"
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+
+#data
+<p><code x</code></p>
+
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <code>
+| code=""
+| x<=""
+| <code>
+| code=""
+| x<=""
+| "
+"
+
+#data
+<!DOCTYPE html><svg><foreignObject><p><i></p>a
+#errors
+45: End tag “p†seen, but there were open elements.
+41: Unclosed element “iâ€.
+46: End of file seen and there were open elements.
+35: Unclosed element “foreignObjectâ€.
+20: Unclosed element “svgâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><table><tr><td><svg><foreignObject><p><i></p>a
+#errors
+56: End tag “p†seen, but there were open elements.
+52: Unclosed element “iâ€.
+57: End of file seen and there were open elements.
+46: Unclosed element “foreignObjectâ€.
+31: Unclosed element “svgâ€.
+22: Unclosed element “tableâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><math><mtext><p><i></p>a
+#errors
+38: End tag “p†seen, but there were open elements.
+34: Unclosed element “iâ€.
+39: End of file in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><table><tr><td><math><mtext><p><i></p>a
+#errors
+53: End tag “p†seen, but there were open elements.
+49: Unclosed element “iâ€.
+54: End of file in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mtext>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><body><div><!/div>a
+#errors
+29: Bogus comment.
+34: End of file seen and there were open elements.
+26: Unclosed element “divâ€.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- /div -->
+| "a"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat
new file mode 100644
index 000000000..38dc501be
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat
@@ -0,0 +1,305 @@
+#data
+<head></head><style></style>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected start tag (style) that can be in head. Moved.
+#document
+| <html>
+| <head>
+| <style>
+| <body>
+
+#data
+<head></head><script></script>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved.
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<head></head><!-- --><style></style><!-- --><script></script>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved.
+#document
+| <html>
+| <head>
+| <style>
+| <script>
+| <!-- -->
+| <!-- -->
+| <body>
+
+#data
+<head></head><!-- -->x<style></style><!-- --><script></script>
+#errors
+Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <!-- -->
+| <body>
+| "x"
+| <style>
+| <!-- -->
+| <script>
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+foo</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+
+foo</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "
+foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+foo
+</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "foo
+"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x</pre><span>
+</span></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x"
+| <span>
+| "
+"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x
+y</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x
+y"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x<div>
+y</pre></body></html>
+#errors
+Line: 2 Col: 7 End tag (pre) seen too early. Expected other end tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x"
+| <div>
+| "
+y"
+
+#data
+<!DOCTYPE html><pre>&#x0a;&#x0a;A</pre>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "
+A"
+
+#data
+<!DOCTYPE html><HTML><META><HEAD></HEAD></HTML>
+#errors
+Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| <body>
+
+#data
+<!DOCTYPE html><HTML><HEAD><head></HEAD></HTML>
+#errors
+Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<textarea>foo<span>bar</span><i>baz
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "foo<span>bar</span><i>baz"
+
+#data
+<title>foo<span>bar</em><i>baz
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+Line: 1 Col: 30 Unexpected end of file. Expected end tag (title).
+#document
+| <html>
+| <head>
+| <title>
+| "foo<span>bar</em><i>baz"
+| <body>
+
+#data
+<!DOCTYPE html><textarea>
+</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+
+#data
+<!DOCTYPE html><textarea>
+foo</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "foo"
+
+#data
+<!DOCTYPE html><textarea>
+
+foo</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "
+foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><ul><li><div><p><li></ul></body></html>
+#errors
+Line: 1 Col: 60 Missing end tag (div, li).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| <div>
+| <p>
+| <li>
+
+#data
+<!doctype html><nobr><nobr><nobr>
+#errors
+Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
+Line: 1 Col: 33 Unexpected start tag (nobr) implies end tag (nobr).
+Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <nobr>
+| <nobr>
+| <nobr>
+
+#data
+<!doctype html><nobr><nobr></nobr><nobr>
+#errors
+Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
+Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <nobr>
+| <nobr>
+| <nobr>
+
+#data
+<!doctype html><html><body><p><table></table></body></html>
+#errors
+Not known
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <table>
+
+#data
+<p><table></table>
+#errors
+Not known
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <table>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat
new file mode 100644
index 000000000..3c506326d
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat
@@ -0,0 +1,59 @@
+#data
+direct div content
+#errors
+#document-fragment
+div
+#document
+| "direct div content"
+
+#data
+direct textarea content
+#errors
+#document-fragment
+textarea
+#document
+| "direct textarea content"
+
+#data
+textarea content with <em>pseudo</em> <foo>markup
+#errors
+#document-fragment
+textarea
+#document
+| "textarea content with <em>pseudo</em> <foo>markup"
+
+#data
+this is &#x0043;DATA inside a <style> element
+#errors
+#document-fragment
+style
+#document
+| "this is &#x0043;DATA inside a <style> element"
+
+#data
+</plaintext>
+#errors
+#document-fragment
+plaintext
+#document
+| "</plaintext>"
+
+#data
+setting html's innerHTML
+#errors
+Line: 1 Col: 24 Unexpected EOF in inner html mode.
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| "setting html's innerHTML"
+
+#data
+<title>setting head's innerHTML</title>
+#errors
+#document-fragment
+head
+#document
+| <title>
+| "setting head's innerHTML"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat
new file mode 100644
index 000000000..d7b5128a4
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat
@@ -0,0 +1,191 @@
+#data
+<style> <!-- </style>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end of file. Expected end tag (style).
+#document
+| <html>
+| <head>
+| <style>
+| " <!-- "
+| <body>
+| "x"
+
+#data
+<style> <!-- </style> --> </style>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<style> <!--> </style>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| " <!--> "
+| <body>
+| "x"
+
+#data
+<style> <!---> </style>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| " <!---> "
+| <body>
+| "x"
+
+#data
+<iframe> <!---> </iframe>x
+#errors
+Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| " <!---> "
+| "x"
+
+#data
+<iframe> <!--- </iframe>->x</iframe> --> </iframe>x
+#errors
+Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| " <!--- "
+| "->x --> x"
+
+#data
+<script> <!-- </script> --> </script>x
+#errors
+Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <script>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<title> <!-- </title> --> </title>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <title>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<textarea> <!--- </textarea>->x</textarea> --> </textarea>x
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| " <!--- "
+| "->x --> x"
+
+#data
+<style> <!</-- </style>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <style>
+| " <!</-- "
+| <body>
+| "x"
+
+#data
+<p><xmp></xmp>
+#errors
+XXX: Unknown
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <xmp>
+
+#data
+<xmp> <!-- > --> </xmp>
+#errors
+Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <xmp>
+| " <!-- > --> "
+
+#data
+<title>&amp;</title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <title>
+| "&"
+| <body>
+
+#data
+<title><!--&amp;--></title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <title>
+| "<!--&-->"
+| <body>
+
+#data
+<title><!--</title>
+#errors
+Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
+Line: 1 Col: 19 Unexpected end of file. Expected end tag (title).
+#document
+| <html>
+| <head>
+| <title>
+| "<!--"
+| <body>
+
+#data
+<noscript><!--</noscript>--></noscript>
+#errors
+Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "-->"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat
new file mode 100644
index 000000000..f28ece4fb
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat
@@ -0,0 +1,663 @@
+#data
+<!doctype html></head> <head>
+#errors
+Line: 1 Col: 29 Unexpected start tag head. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| " "
+| <body>
+
+#data
+<!doctype html><form><div></form><div>
+#errors
+33: End tag "form" seen but there were unclosed elements.
+38: End of file seen and there were open elements.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <div>
+| <div>
+
+#data
+<!doctype html><title>&amp;</title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "&"
+| <body>
+
+#data
+<!doctype html><title><!--&amp;--></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "<!--&-->"
+| <body>
+
+#data
+<!doctype>
+#errors
+Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
+Line: 1 Col: 10 Unexpected > character. Expected DOCTYPE name.
+Line: 1 Col: 10 Erroneous DOCTYPE.
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+
+#data
+<!---x
+#errors
+Line: 1 Col: 6 Unexpected end of file in comment.
+Line: 1 Col: 6 Unexpected End of file. Expected DOCTYPE.
+#document
+| <!-- -x -->
+| <html>
+| <head>
+| <body>
+
+#data
+<body>
+<div>
+#errors
+Line: 1 Col: 6 Unexpected start tag (body).
+Line: 2 Col: 5 Expected closing tag. Unexpected end of file.
+#document-fragment
+div
+#document
+| "
+"
+| <div>
+
+#data
+<frameset></frameset>
+foo
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 2 Col: 3 Unexpected non-space characters in the after frameset phase. Ignored.
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+<noframes>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 2 Col: 10 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+| <noframes>
+
+#data
+<frameset></frameset>
+<div>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 2 Col: 5 Unexpected start tag (div) in the after frameset phase. Ignored.
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+</html>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+</div>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 2 Col: 6 Unexpected end tag (div) in the after frameset phase. Ignored.
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<form><form>
+#errors
+Line: 1 Col: 6 Unexpected start tag (form). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected start tag (form).
+Line: 1 Col: 12 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+
+#data
+<button><button>
+#errors
+Line: 1 Col: 8 Unexpected start tag (button). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected start tag (button) implies end tag (button).
+Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <button>
+| <button>
+
+#data
+<table><tr><td></th>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end tag (th). Ignored.
+Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><caption><td>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end tag (td). Ignored.
+Line: 1 Col: 20 Unexpected table cell start tag (td) in the table body phase.
+Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><caption><div>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 21 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+</caption><div>
+#errors
+Line: 1 Col: 10 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
+#document-fragment
+caption
+#document
+| <div>
+
+#data
+<table><caption><div></caption>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 31 Unexpected end tag (caption). Missing end tag (div).
+Line: 1 Col: 31 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+<table><caption></table>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 24 Unexpected end table tag in caption. Generates implied end caption.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+
+#data
+</table><div>
+#errors
+Line: 1 Col: 8 Unexpected end table tag in caption. Generates implied end caption.
+Line: 1 Col: 8 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 13 Expected closing tag. Unexpected end of file.
+#document-fragment
+caption
+#document
+| <div>
+
+#data
+<table><caption></body></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 23 Unexpected end tag (body). Ignored.
+Line: 1 Col: 29 Unexpected end tag (col). Ignored.
+Line: 1 Col: 40 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 47 Unexpected end tag (html). Ignored.
+Line: 1 Col: 55 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 60 Unexpected end tag (td). Ignored.
+Line: 1 Col: 68 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 73 Unexpected end tag (th). Ignored.
+Line: 1 Col: 81 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 86 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 86 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+
+#data
+<table><caption><div></div>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 27 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+<table><tr><td></body></caption></col></colgroup></html>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end tag (body). Ignored.
+Line: 1 Col: 32 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 38 Unexpected end tag (col). Ignored.
+Line: 1 Col: 49 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 56 Unexpected end tag (html). Ignored.
+Line: 1 Col: 56 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+</table></tbody></tfoot></thead></tr><div>
+#errors
+Line: 1 Col: 8 Unexpected end tag (table). Ignored.
+Line: 1 Col: 16 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 24 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 32 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 37 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 42 Expected closing tag. Unexpected end of file.
+#document-fragment
+td
+#document
+| <div>
+
+#data
+<table><colgroup>foo
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 20 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| "foo"
+| <table>
+| <colgroup>
+
+#data
+foo<col>
+#errors
+Line: 1 Col: 3 Unexpected end tag (colgroup). Ignored.
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<table><colgroup></col>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 23 This element (col) has no end tag.
+Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+
+#data
+<frameset><div>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 1 Col: 15 Unexpected start tag token (div) in the frameset phase. Ignored.
+Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</frameset><frame>
+#errors
+Line: 1 Col: 11 Unexpected end tag token (frameset) in the frameset phase (innerHTML).
+#document-fragment
+frameset
+#document
+| <frame>
+
+#data
+<frameset></div>
+#errors
+Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected end tag token (div) in the frameset phase. Ignored.
+Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</body><div>
+#errors
+Line: 1 Col: 7 Unexpected end tag (body). Ignored.
+Line: 1 Col: 12 Expected closing tag. Unexpected end of file.
+#document-fragment
+body
+#document
+| <div>
+
+#data
+<table><tr><div>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode.
+Line: 1 Col: 16 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+</tr><td>
+#errors
+Line: 1 Col: 5 Unexpected end tag (tr). Ignored.
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+</tbody></tfoot></thead><td>
+#errors
+Line: 1 Col: 8 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 16 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 24 Unexpected end tag (thead). Ignored.
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<table><tr><div><td>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode.
+Line: 1 Col: 20 Unexpected implied end tag (div) in the table row phase.
+Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<caption><col><colgroup><tbody><tfoot><thead><tr>
+#errors
+Line: 1 Col: 9 Unexpected start tag (caption).
+Line: 1 Col: 14 Unexpected start tag (col).
+Line: 1 Col: 24 Unexpected start tag (colgroup).
+Line: 1 Col: 31 Unexpected start tag (tbody).
+Line: 1 Col: 38 Unexpected start tag (tfoot).
+Line: 1 Col: 45 Unexpected start tag (thead).
+Line: 1 Col: 49 Unexpected end of file. Expected table content.
+#document-fragment
+tbody
+#document
+| <tr>
+
+#data
+<table><tbody></thead>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 22 Unexpected end tag (thead) in the table body phase. Ignored.
+Line: 1 Col: 22 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+</table><tr>
+#errors
+Line: 1 Col: 8 Unexpected end tag (table). Ignored.
+Line: 1 Col: 12 Unexpected end of file. Expected table content.
+#document-fragment
+tbody
+#document
+| <tr>
+
+#data
+<table><tbody></body></caption></col></colgroup></html></td></th></tr>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 21 Unexpected end tag (body) in the table body phase. Ignored.
+Line: 1 Col: 31 Unexpected end tag (caption) in the table body phase. Ignored.
+Line: 1 Col: 37 Unexpected end tag (col) in the table body phase. Ignored.
+Line: 1 Col: 48 Unexpected end tag (colgroup) in the table body phase. Ignored.
+Line: 1 Col: 55 Unexpected end tag (html) in the table body phase. Ignored.
+Line: 1 Col: 60 Unexpected end tag (td) in the table body phase. Ignored.
+Line: 1 Col: 65 Unexpected end tag (th) in the table body phase. Ignored.
+Line: 1 Col: 70 Unexpected end tag (tr) in the table body phase. Ignored.
+Line: 1 Col: 70 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+<table><tbody></div>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 20 Unexpected end tag (div) in table context caused voodoo mode.
+Line: 1 Col: 20 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 20 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+<table><table>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected start tag (table) implies end tag (table).
+Line: 1 Col: 14 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <table>
+
+#data
+<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 14 Unexpected end tag (body). Ignored.
+Line: 1 Col: 24 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 30 Unexpected end tag (col). Ignored.
+Line: 1 Col: 41 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 48 Unexpected end tag (html). Ignored.
+Line: 1 Col: 56 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 61 Unexpected end tag (td). Ignored.
+Line: 1 Col: 69 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 74 Unexpected end tag (th). Ignored.
+Line: 1 Col: 82 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 87 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 87 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+
+#data
+</table><tr>
+#errors
+Line: 1 Col: 8 Unexpected end tag (table). Ignored.
+Line: 1 Col: 12 Unexpected end of file. Expected table content.
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+<body></body></html>
+#errors
+Line: 1 Col: 20 Unexpected html end tag in inner html mode.
+Line: 1 Col: 20 Unexpected EOF in inner html mode.
+#document-fragment
+html
+#document
+| <head>
+| <body>
+
+#data
+<html><frameset></frameset></html>
+#errors
+Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"><html></html>
+#errors
+Line: 1 Col: 50 Erroneous DOCTYPE.
+Line: 1 Col: 63 Unexpected end tag (html) after the (implied) root element.
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "">
+| <html>
+| <head>
+| <body>
+
+#data
+<param><frameset></frameset>
+#errors
+Line: 1 Col: 7 Unexpected start tag (param). Expected DOCTYPE.
+Line: 1 Col: 17 Unexpected start tag (frameset).
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<source><frameset></frameset>
+#errors
+Line: 1 Col: 7 Unexpected start tag (source). Expected DOCTYPE.
+Line: 1 Col: 17 Unexpected start tag (frameset).
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<track><frameset></frameset>
+#errors
+Line: 1 Col: 7 Unexpected start tag (track). Expected DOCTYPE.
+Line: 1 Col: 17 Unexpected start tag (frameset).
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</html><frameset></frameset>
+#errors
+7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+17: Stray “frameset†start tag.
+17: “frameset†start tag seen.
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</body><frameset></frameset>
+#errors
+7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€.
+17: Stray “frameset†start tag.
+17: “frameset†start tag seen.
+#document
+| <html>
+| <head>
+| <frameset>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat
new file mode 100644
index 000000000..f5193c660
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat
@@ -0,0 +1,390 @@
+#data
+<!doctype html><body><title>X</title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+
+#data
+<!doctype html><table><title>X</title></table>
+#errors
+Line: 1 Col: 29 Unexpected start tag (title) in table context caused voodoo mode.
+Line: 1 Col: 38 Unexpected end tag (title) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+| <table>
+
+#data
+<!doctype html><head></head><title>X</title>
+#errors
+Line: 1 Col: 35 Unexpected start tag (title) that can be in head. Moved.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "X"
+| <body>
+
+#data
+<!doctype html></head><title>X</title>
+#errors
+Line: 1 Col: 29 Unexpected start tag (title) that can be in head. Moved.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "X"
+| <body>
+
+#data
+<!doctype html><table><meta></table>
+#errors
+Line: 1 Col: 28 Unexpected start tag (meta) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+| <table>
+
+#data
+<!doctype html><table>X<tr><td><table> <meta></table></table>
+#errors
+Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 45 Unexpected start tag (meta) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <meta>
+| <table>
+| " "
+
+#data
+<!doctype html><html> <head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html> <head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html><table><style> <tr>x </style> </table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <style>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><table><TBODY><script> <tr>x </script> </table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <script>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><p><applet><p>X</p></applet>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <applet>
+| <p>
+| "X"
+
+#data
+<!doctype html><listing>
+X</listing>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <listing>
+| "X"
+
+#data
+<!doctype html><select><input>X
+#errors
+Line: 1 Col: 30 Unexpected input start tag in the select phase.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <input>
+| "X"
+
+#data
+<!doctype html><select><select>X
+#errors
+Line: 1 Col: 31 Unexpected select start tag in the select phase treated as select end tag.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+
+#data
+<!doctype html><table><input type=hidDEN></table>
+#errors
+Line: 1 Col: 41 Unexpected input with type hidden in table context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table>X<input type=hidDEN></table>
+#errors
+Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table> <input type=hidDEN></table>
+#errors
+Line: 1 Col: 43 Unexpected input with type hidden in table context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table> <input type='hidDEN'></table>
+#errors
+Line: 1 Col: 45 Unexpected input with type hidden in table context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table><input type=" hidden"><input type=hidDEN></table>
+#errors
+Line: 1 Col: 44 Unexpected start tag (input) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| type=" hidden"
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table><select>X<tr>
+#errors
+Line: 1 Col: 30 Unexpected start tag (select) in table context caused voodoo mode.
+Line: 1 Col: 35 Unexpected table element start tag (trs) in the select in table phase.
+Line: 1 Col: 35 Unexpected end of file. Expected table content.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><select>X</select>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+
+#data
+<!DOCTYPE hTmL><html></html>
+#errors
+Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML><html></html>
+#errors
+Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<body>X</body></body>
+#errors
+Line: 1 Col: 21 Unexpected end tag token (body) in the after body phase.
+Line: 1 Col: 21 Unexpected EOF in inner html mode.
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| "X"
+
+#data
+<div><p>a</x> b
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 13 Unexpected end tag (x). Ignored.
+Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <p>
+| "a b"
+
+#data
+<table><tr><td><code></code> </table>
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <code>
+| " "
+
+#data
+<table><b><tr><td>aaa</td></tr>bbb</table>ccc
+#errors
+XXX: Fix me
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <b>
+| "bbb"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "aaa"
+| <b>
+| "ccc"
+
+#data
+A<table><tr> B</tr> B</table>
+#errors
+XXX: Fix me
+#document
+| <html>
+| <head>
+| <body>
+| "A B B"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+A<table><tr> B</tr> </em>C</table>
+#errors
+XXX: Fix me
+#document
+| <html>
+| <head>
+| <body>
+| "A BC"
+| <table>
+| <tbody>
+| <tr>
+| " "
+
+#data
+<select><keygen>
+#errors
+Not known
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <keygen>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat
new file mode 100644
index 000000000..90e6c919e
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat
@@ -0,0 +1,148 @@
+#data
+<div>
+<div></div>
+</span>x
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 3 Col: 7 Unexpected end tag (span). Ignored.
+Line: 3 Col: 8 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "
+"
+| <div>
+| "
+x"
+
+#data
+<div>x<div></div>
+</span>x
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 2 Col: 7 Unexpected end tag (span). Ignored.
+Line: 2 Col: 8 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "
+x"
+
+#data
+<div>x<div></div>x</span>x
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 25 Unexpected end tag (span). Ignored.
+Line: 1 Col: 26 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "xx"
+
+#data
+<div>x<div></div>y</span>z
+#errors
+Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
+Line: 1 Col: 25 Unexpected end tag (span). Ignored.
+Line: 1 Col: 26 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "yz"
+
+#data
+<table><div>x<div></div>x</span>x
+#errors
+Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
+Line: 1 Col: 12 Unexpected start tag (div) in table context caused voodoo mode.
+Line: 1 Col: 18 Unexpected start tag (div) in table context caused voodoo mode.
+Line: 1 Col: 24 Unexpected end tag (div) in table context caused voodoo mode.
+Line: 1 Col: 32 Unexpected end tag (span) in table context caused voodoo mode.
+Line: 1 Col: 32 Unexpected end tag (span). Ignored.
+Line: 1 Col: 33 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "xx"
+| <table>
+
+#data
+x<table>x
+#errors
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 9 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| "xx"
+| <table>
+
+#data
+x<table><table>x
+#errors
+Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
+Line: 1 Col: 15 Unexpected start tag (table) implies end tag (table).
+Line: 1 Col: 16 Unexpected non-space characters in table context caused voodoo mode.
+Line: 1 Col: 16 Unexpected end of file. Expected table content.
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <table>
+| "x"
+| <table>
+
+#data
+<b>a<div></div><div></b>y
+#errors
+Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
+Line: 1 Col: 24 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "a"
+| <div>
+| <div>
+| <b>
+| "y"
+
+#data
+<a><div><p></a>
+#errors
+Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
+Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
+Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <div>
+| <a>
+| <p>
+| <a>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat
new file mode 100644
index 000000000..554e27aec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat
@@ -0,0 +1,457 @@
+#data
+<!DOCTYPE html><math></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!DOCTYPE html><body><math></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!DOCTYPE html><math><mi>
+#errors
+25: End of file in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+
+#data
+<!DOCTYPE html><math><annotation-xml><svg><u>
+#errors
+45: HTML start tag “u†in a foreign namespace context.
+45: End of file seen and there were open elements.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <u>
+
+#data
+<!DOCTYPE html><body><select><math></math></select>
+#errors
+Line: 1 Col: 35 Unexpected start tag token (math) in the select phase. Ignored.
+Line: 1 Col: 42 Unexpected end tag (math) in the select phase. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!DOCTYPE html><body><select><option><math></math></option></select>
+#errors
+Line: 1 Col: 43 Unexpected start tag token (math) in the select phase. Ignored.
+Line: 1 Col: 50 Unexpected end tag (math) in the select phase. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!DOCTYPE html><body><table><math></math></table>
+#errors
+Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 41 Unexpected end tag (math) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <table>
+
+#data
+<!DOCTYPE html><body><table><math><mi>foo</mi></math></table>
+#errors
+Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 53 Unexpected end tag (math) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><math><mi>foo</mi><mi>bar</mi></math></table>
+#errors
+Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 58 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 65 Unexpected end tag (math) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><tbody><math><mi>foo</mi><mi>bar</mi></math></tbody></table>
+#errors
+Line: 1 Col: 41 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 53 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 65 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 72 Unexpected end tag (math) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+| <tbody>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><math><mi>foo</mi><mi>bar</mi></math></tr></tbody></table>
+#errors
+Line: 1 Col: 45 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 57 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 69 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 76 Unexpected end tag (math) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math></td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math><p>baz</td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi></math><p>baz</caption></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+Line: 1 Col: 70 HTML start tag "p" in a foreign namespace context.
+Line: 1 Col: 81 Unexpected end table tag in caption. Generates implied end caption.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi>baz</table><p>quux
+#errors
+Line: 1 Col: 78 Unexpected end table tag in caption. Generates implied end caption.
+Line: 1 Col: 78 Unexpected end tag (caption). Missing end tag (math).
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><colgroup><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+Line: 1 Col: 44 Unexpected start tag (math) in table context caused voodoo mode.
+Line: 1 Col: 56 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 68 Unexpected end tag (mi) in table context caused voodoo mode.
+Line: 1 Col: 71 HTML start tag "p" in a foreign namespace context.
+Line: 1 Col: 71 Unexpected start tag (p) in table context caused voodoo mode.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+| <table>
+| <colgroup>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><tr><td><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+Line: 1 Col: 50 Unexpected start tag token (math) in the select phase. Ignored.
+Line: 1 Col: 54 Unexpected start tag token (mi) in the select phase. Ignored.
+Line: 1 Col: 62 Unexpected end tag (mi) in the select phase. Ignored.
+Line: 1 Col: 66 Unexpected start tag token (mi) in the select phase. Ignored.
+Line: 1 Col: 74 Unexpected end tag (mi) in the select phase. Ignored.
+Line: 1 Col: 77 Unexpected start tag token (p) in the select phase. Ignored.
+Line: 1 Col: 88 Unexpected table element end tag (tables) in the select in table phase.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| "foobarbaz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+Line: 1 Col: 36 Unexpected start tag (select) in table context caused voodoo mode.
+Line: 1 Col: 42 Unexpected start tag token (math) in the select phase. Ignored.
+Line: 1 Col: 46 Unexpected start tag token (mi) in the select phase. Ignored.
+Line: 1 Col: 54 Unexpected end tag (mi) in the select phase. Ignored.
+Line: 1 Col: 58 Unexpected start tag token (mi) in the select phase. Ignored.
+Line: 1 Col: 66 Unexpected end tag (mi) in the select phase. Ignored.
+Line: 1 Col: 69 Unexpected start tag token (p) in the select phase. Ignored.
+Line: 1 Col: 80 Unexpected table element end tag (tables) in the select in table phase.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "foobarbaz"
+| <table>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body></body></html><math><mi>foo</mi><mi>bar</mi><p>baz
+#errors
+Line: 1 Col: 41 Unexpected start tag (math).
+Line: 1 Col: 68 HTML start tag "p" in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body></body><math><mi>foo</mi><mi>bar</mi><p>baz
+#errors
+Line: 1 Col: 34 Unexpected start tag token (math) in the after body phase.
+Line: 1 Col: 61 HTML start tag "p" in a foreign namespace context.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><frameset><math><mi></mi><mi></mi><p><span>
+#errors
+Line: 1 Col: 31 Unexpected start tag token (math) in the frameset phase. Ignored.
+Line: 1 Col: 35 Unexpected start tag token (mi) in the frameset phase. Ignored.
+Line: 1 Col: 40 Unexpected end tag token (mi) in the frameset phase. Ignored.
+Line: 1 Col: 44 Unexpected start tag token (mi) in the frameset phase. Ignored.
+Line: 1 Col: 49 Unexpected end tag token (mi) in the frameset phase. Ignored.
+Line: 1 Col: 52 Unexpected start tag token (p) in the frameset phase. Ignored.
+Line: 1 Col: 58 Unexpected start tag token (span) in the frameset phase. Ignored.
+Line: 1 Col: 58 Expected closing tag. Unexpected end of file.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset></frameset><math><mi></mi><mi></mi><p><span>
+#errors
+Line: 1 Col: 42 Unexpected start tag (math) in the after frameset phase. Ignored.
+Line: 1 Col: 46 Unexpected start tag (mi) in the after frameset phase. Ignored.
+Line: 1 Col: 51 Unexpected end tag (mi) in the after frameset phase. Ignored.
+Line: 1 Col: 55 Unexpected start tag (mi) in the after frameset phase. Ignored.
+Line: 1 Col: 60 Unexpected end tag (mi) in the after frameset phase. Ignored.
+Line: 1 Col: 63 Unexpected start tag (p) in the after frameset phase. Ignored.
+Line: 1 Col: 69 Unexpected start tag (span) in the after frameset phase. Ignored.
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><body xlink:href=foo><math xlink:href=foo></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| <math math>
+| xlink href="foo"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo></mi></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo /></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo />bar</math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+| "bar"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat
new file mode 100644
index 000000000..6c78661e0
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat
@@ -0,0 +1,741 @@
+#data
+<body><span>
+#errors
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><body>
+#errors
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><body>
+#errors
+#document-fragment
+div
+#document
+| <span>
+
+#data
+<body><span>
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| <span>
+
+#data
+<frameset><span>
+#errors
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><frameset>
+#errors
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><frameset>
+#errors
+#document-fragment
+div
+#document
+| <span>
+
+#data
+<frameset><span>
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <frameset>
+
+#data
+<table><tr>
+#errors
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+</table><tr>
+#errors
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+<a>
+#errors
+#document-fragment
+table
+#document
+| <a>
+
+#data
+<a>
+#errors
+#document-fragment
+table
+#document
+| <a>
+
+#data
+<a><caption>a
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <caption>
+| "a"
+
+#data
+<a><colgroup><col>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <colgroup>
+| <col>
+
+#data
+<a><tbody><tr>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+
+#data
+<a><tfoot><tr>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <tfoot>
+| <tr>
+
+#data
+<a><thead><tr>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <thead>
+| <tr>
+
+#data
+<a><tr>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+
+#data
+<a><th>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+| <th>
+
+#data
+<a><td>
+#errors
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table></table><tbody>
+#errors
+#document-fragment
+caption
+#document
+| <table>
+
+#data
+</table><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+<span></table>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+</caption><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+<span></caption><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><caption><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><col><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><colgroup><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><html><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tbody><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><td><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tfoot><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><thead><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><th><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tr><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span></table><span>
+#errors
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+</colgroup><col>
+#errors
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<a><col>
+#errors
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<caption><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<col><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<colgroup><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<tbody><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<tfoot><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<thead><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+</table><a>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<a><tr>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+
+#data
+<a><td>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<a><td>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<a><td>
+#errors
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<td><table><tbody><a><tr>
+#errors
+#document-fragment
+tbody
+#document
+| <tr>
+| <td>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+</tr><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<td><table><a><tr></tr><tr>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+| <tr>
+
+#data
+<caption><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<col><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<colgroup><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tbody><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tfoot><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<thead><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tr><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+</table><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<td><table></table><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+| <table>
+| <td>
+
+#data
+<td><table></table><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+| <table>
+| <td>
+
+#data
+<caption><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<col><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<colgroup><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tbody><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tfoot><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<th><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<thead><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tr><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</table><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tbody><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</td><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tfoot><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</thead><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</th><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tr><a>
+#errors
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<table><td><td>
+#errors
+#document-fragment
+td
+#document
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <td>
+
+#data
+</select><option>
+#errors
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<input><option>
+#errors
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<keygen><option>
+#errors
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<textarea><option>
+#errors
+#document-fragment
+select
+#document
+| <option>
+
+#data
+</html><!--abc-->
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| <!-- abc -->
+
+#data
+</frameset><frame>
+#errors
+#document-fragment
+frameset
+#document
+| <frame>
+
+#data
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <body>
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat b/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat
new file mode 100644
index 000000000..084199244
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat
@@ -0,0 +1,261 @@
+#data
+<b><p>Bold </b> Not bold</p>
+Also not bold.
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <p>
+| <b>
+| "Bold "
+| " Not bold"
+| "
+Also not bold."
+
+#data
+<html>
+<font color=red><i>Italic and Red<p>Italic and Red </font> Just italic.</p> Italic only.</i> Plain
+<p>I should not be red. <font color=red>Red. <i>Italic and red.</p>
+<p>Italic and red. </i> Red.</font> I should not be red.</p>
+<b>Bold <i>Bold and italic</b> Only Italic </i> Plain
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| color="red"
+| <i>
+| "Italic and Red"
+| <i>
+| <p>
+| <font>
+| color="red"
+| "Italic and Red "
+| " Just italic."
+| " Italic only."
+| " Plain
+"
+| <p>
+| "I should not be red. "
+| <font>
+| color="red"
+| "Red. "
+| <i>
+| "Italic and red."
+| <font>
+| color="red"
+| <i>
+| "
+"
+| <p>
+| <font>
+| color="red"
+| <i>
+| "Italic and red. "
+| " Red."
+| " I should not be red."
+| "
+"
+| <b>
+| "Bold "
+| <i>
+| "Bold and italic"
+| <i>
+| " Only Italic "
+| " Plain"
+
+#data
+<html><body>
+<p><font size="7">First paragraph.</p>
+<p>Second paragraph.</p></font>
+<b><p><i>Bold and Italic</b> Italic</p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <p>
+| <font>
+| size="7"
+| "First paragraph."
+| <font>
+| size="7"
+| "
+"
+| <p>
+| "Second paragraph."
+| "
+"
+| <b>
+| <p>
+| <b>
+| <i>
+| "Bold and Italic"
+| <i>
+| " Italic"
+
+#data
+<html>
+<dl>
+<dt><b>Boo
+<dd>Goo?
+</dl>
+</html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <dl>
+| "
+"
+| <dt>
+| <b>
+| "Boo
+"
+| <dd>
+| <b>
+| "Goo?
+"
+| <b>
+| "
+"
+
+#data
+<html><body>
+<label><a><div>Hello<div>World</div></a></label>
+</body></html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <label>
+| <a>
+| <div>
+| <a>
+| "Hello"
+| <div>
+| "World"
+| "
+"
+
+#data
+<table><center> <font>a</center> <img> <tr><td> </td> </tr> </table>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <center>
+| " "
+| <font>
+| "a"
+| <font>
+| <img>
+| " "
+| <table>
+| " "
+| <tbody>
+| <tr>
+| <td>
+| " "
+| " "
+| " "
+
+#data
+<table><tr><p><a><p>You should see this text.
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <a>
+| <p>
+| <a>
+| "You should see this text."
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<TABLE>
+<TR>
+<CENTER><CENTER><TD></TD></TR><TR>
+<FONT>
+<TABLE><tr></tr></TABLE>
+</P>
+<a></font><font></a>
+This page contains an insanely badly-nested tag sequence.
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <center>
+| <center>
+| <font>
+| "
+"
+| <table>
+| "
+"
+| <tbody>
+| <tr>
+| "
+"
+| <td>
+| <tr>
+| "
+"
+| <table>
+| <tbody>
+| <tr>
+| <font>
+| "
+"
+| <p>
+| "
+"
+| <a>
+| <a>
+| <font>
+| <font>
+| "
+This page contains an insanely badly-nested tag sequence."
+
+#data
+<html>
+<body>
+<b><nobr><div>This text is in a div inside a nobr</nobr>More text that should not be in the nobr, i.e., the
+nobr should have closed the div inside it implicitly. </b><pre>A pre tag outside everything else.</pre>
+</body>
+</html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <b>
+| <nobr>
+| <div>
+| <b>
+| <nobr>
+| "This text is in a div inside a nobr"
+| "More text that should not be in the nobr, i.e., the
+nobr should have closed the div inside it implicitly. "
+| <pre>
+| "A pre tag outside everything else."
+| "
+
+"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat b/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat
new file mode 100644
index 000000000..9d425e99d
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat
@@ -0,0 +1,610 @@
+#data
+Test
+#errors
+Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
+#document
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<div></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<div>Test</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Test"
+
+#data
+<di
+#errors
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<div>Hello</div>
+<script>
+console.log("PASS");
+</script>
+<div>Bye</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Hello"
+| "
+"
+| <script>
+| "
+console.log("PASS");
+"
+| "
+"
+| <div>
+| "Bye"
+
+#data
+<div foo="bar">Hello</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo="bar"
+| "Hello"
+
+#data
+<div>Hello</div>
+<script>
+console.log("FOO<span>BAR</span>BAZ");
+</script>
+<div>Bye</div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Hello"
+| "
+"
+| <script>
+| "
+console.log("FOO<span>BAR</span>BAZ");
+"
+| "
+"
+| <div>
+| "Bye"
+
+#data
+<foo bar="baz"></foo><potato quack="duck"></potato>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="baz"
+| <potato>
+| quack="duck"
+
+#data
+<foo bar="baz"><potato quack="duck"></potato></foo>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="baz"
+| <potato>
+| quack="duck"
+
+#data
+<foo></foo bar="baz"><potato></potato quack="duck">
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| <potato>
+
+#data
+</ tttt>
+#errors
+#document
+| <!-- tttt -->
+| <html>
+| <head>
+| <body>
+
+#data
+<div FOO ><img><img></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo=""
+| <img>
+| <img>
+
+#data
+<p>Test</p<p>Test2</p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "TestTest2"
+
+#data
+<rdar://problem/6869687>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <rdar:>
+| 6869687=""
+| problem=""
+
+#data
+<A>test< /A>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "test< /A>"
+
+#data
+&lt;
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "<"
+
+#data
+<body foo='bar'><body foo='baz' yo='mama'>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| foo="bar"
+| yo="mama"
+
+#data
+<body></br foo="bar"></body>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<bdy><br foo="bar"></body>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <bdy>
+| <br>
+| foo="bar"
+
+#data
+<body></body></br foo="bar">
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<bdy></body><br foo="bar">
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <bdy>
+| <br>
+| foo="bar"
+
+#data
+<html><body></body></html><!-- Hi there -->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <!-- Hi there -->
+
+#data
+<html><body></body></html>x<!-- Hi there -->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+
+#data
+<html><body></body></html>x<!-- Hi there --></html><!-- Again -->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+| <!-- Again -->
+
+#data
+<html><body></body></html>x<!-- Hi there --></body></html><!-- Again -->
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+| <!-- Again -->
+
+#data
+<html><body><ruby><div><rp>xx</rp></div></ruby></body></html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <rp>
+| "xx"
+
+#data
+<html><body><ruby><div><rt>xx</rt></div></ruby></body></html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <rt>
+| "xx"
+
+#data
+<html><frameset><!--1--><noframes>A</noframes><!--2--></frameset><!--3--><noframes>B</noframes><!--4--></html><!--5--><noframes>C</noframes><!--6-->
+#errors
+#document
+| <html>
+| <head>
+| <frameset>
+| <!-- 1 -->
+| <noframes>
+| "A"
+| <!-- 2 -->
+| <!-- 3 -->
+| <noframes>
+| "B"
+| <!-- 4 -->
+| <noframes>
+| "C"
+| <!-- 5 -->
+| <!-- 6 -->
+
+#data
+<select><option>A<select><option>B<select><option>C<select><option>D<select><option>E<select><option>F<select><option>G<select>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "A"
+| <option>
+| "B"
+| <select>
+| <option>
+| "C"
+| <option>
+| "D"
+| <select>
+| <option>
+| "E"
+| <option>
+| "F"
+| <select>
+| <option>
+| "G"
+
+#data
+<dd><dd><dt><dt><dd><li><li>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <dd>
+| <dd>
+| <dt>
+| <dt>
+| <dd>
+| <li>
+| <li>
+
+#data
+<div><b></div><div><nobr>a<nobr>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <b>
+| <div>
+| <b>
+| <nobr>
+| "a"
+| <nobr>
+
+#data
+<head></head>
+<body></body>
+#errors
+#document
+| <html>
+| <head>
+| "
+"
+| <body>
+
+#data
+<head></head> <style></style>ddd
+#errors
+#document
+| <html>
+| <head>
+| <style>
+| " "
+| <body>
+| "ddd"
+
+#data
+<kbd><table></kbd><col><select><tr>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <kbd>
+| <select>
+| <table>
+| <colgroup>
+| <col>
+| <tbody>
+| <tr>
+
+#data
+<kbd><table></kbd><col><select><tr></table><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <kbd>
+| <select>
+| <table>
+| <colgroup>
+| <col>
+| <tbody>
+| <tr>
+| <div>
+
+#data
+<a><li><style></style><title></title></a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <li>
+| <a>
+| <style>
+| <title>
+
+#data
+<font></p><p><meta><title></title></font>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <p>
+| <font>
+| <meta>
+| <title>
+
+#data
+<a><center><title></title><a>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <center>
+| <a>
+| <title>
+| <a>
+
+#data
+<svg><title><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <div>
+
+#data
+<svg><title><rect><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <rect>
+| <div>
+
+#data
+<svg><title><svg><div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <svg svg>
+| <div>
+
+#data
+<img <="" FAIL>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <img>
+| <=""
+| fail=""
+
+#data
+<ul><li><div id='foo'/>A</li><li>B<div>C</div></li></ul>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| <div>
+| id="foo"
+| "A"
+| <li>
+| "B"
+| <div>
+| "C"
+
+#data
+<svg><em><desc></em>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <em>
+| <desc>
+
+#data
+<table><tr><td><svg><desc><td></desc><circle>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg desc>
+| <td>
+| <circle>
+
+#data
+<svg><tfoot></mi><td>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg tfoot>
+| <svg td>
+
+#data
+<math><mrow><mrow><mn>1</mn></mrow><mi>a</mi></mrow></math>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mrow>
+| <math mrow>
+| <math mn>
+| "1"
+| <math mi>
+| "a"
+
+#data
+<!doctype html><input type="hidden"><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><input type="button"><frameset>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| type="button"
diff --git a/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat b/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat
new file mode 100644
index 000000000..905783d3c
--- /dev/null
+++ b/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat
@@ -0,0 +1,159 @@
+#data
+<foo bar=qux/>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="qux/"
+
+#data
+<p id="status"><noscript><strong>A</strong></noscript><span>B</span></p>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| id="status"
+| <noscript>
+| "<strong>A</strong>"
+| <span>
+| "B"
+
+#data
+<div><sarcasm><div></div></sarcasm></div>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <sarcasm>
+| <div>
+
+#data
+<html><body><img src="" border="0" alt="><div>A</div></body></html>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<table><td></tbody>A
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "A"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><td></thead>A
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+
+#data
+<table><td></tfoot>A
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+
+#data
+<table><thead><td></tbody>A
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <thead>
+| <tr>
+| <td>
+| "A"
+
+#data
+<legend>test</legend>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <legend>
+| "test"
+
+#data
+<table><input>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <input>
+| <table>
+
+#data
+<b><em><dcell><postfield><postfield><postfield><postfield><missing_glyph><missing_glyph><missing_glyph><missing_glyph><hkern><aside></b></em>
+#errors
+#document-fragment
+div
+#document
+| <b>
+| <em>
+| <dcell>
+| <postfield>
+| <postfield>
+| <postfield>
+| <postfield>
+| <missing_glyph>
+| <missing_glyph>
+| <missing_glyph>
+| <missing_glyph>
+| <hkern>
+| <aside>
+| <em>
+| <b>
+
+#data
+<isindex action="x">
+#errors
+#document-fragment
+table
+#document
+| <form>
+| action="x"
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<option><XH<optgroup></optgroup>
+#errors
+#document-fragment
+select
+#document
+| <option>
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
new file mode 100644
index 000000000..893e272a9
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token.go
@@ -0,0 +1,1219 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html/atom"
+)
+
+// A TokenType is the type of a Token.
+type TokenType uint32
+
+const (
+ // ErrorToken means that an error occurred during tokenization.
+ ErrorToken TokenType = iota
+ // TextToken means a text node.
+ TextToken
+ // A StartTagToken looks like <a>.
+ StartTagToken
+ // An EndTagToken looks like </a>.
+ EndTagToken
+ // A SelfClosingTagToken tag looks like <br/>.
+ SelfClosingTagToken
+ // A CommentToken looks like <!--x-->.
+ CommentToken
+ // A DoctypeToken looks like <!DOCTYPE x>
+ DoctypeToken
+)
+
+// ErrBufferExceeded means that the buffering limit was exceeded.
+var ErrBufferExceeded = errors.New("max buffer exceeded")
+
+// String returns a string representation of the TokenType.
+func (t TokenType) String() string {
+ switch t {
+ case ErrorToken:
+ return "Error"
+ case TextToken:
+ return "Text"
+ case StartTagToken:
+ return "StartTag"
+ case EndTagToken:
+ return "EndTag"
+ case SelfClosingTagToken:
+ return "SelfClosingTag"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ }
+ return "Invalid(" + strconv.Itoa(int(t)) + ")"
+}
+
+// An Attribute is an attribute namespace-key-value triple. Namespace is
+// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
+// does not contain escapable characters like '&', '<' or '>'), and Val is
+// unescaped (it looks like "a<b" rather than "a&lt;b").
+//
+// Namespace is only used by the parser, not the tokenizer.
+type Attribute struct {
+ Namespace, Key, Val string
+}
+
+// A Token consists of a TokenType and some Data (tag name for start and end
+// tags, content for text, comments and doctypes). A tag Token may also contain
+// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
+// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
+// zero if Data is not a known tag name.
+type Token struct {
+ Type TokenType
+ DataAtom atom.Atom
+ Data string
+ Attr []Attribute
+}
+
+// tagString returns a string representation of a tag Token's Data and Attr.
+func (t Token) tagString() string {
+ if len(t.Attr) == 0 {
+ return t.Data
+ }
+ buf := bytes.NewBufferString(t.Data)
+ for _, a := range t.Attr {
+ buf.WriteByte(' ')
+ buf.WriteString(a.Key)
+ buf.WriteString(`="`)
+ escape(buf, a.Val)
+ buf.WriteByte('"')
+ }
+ return buf.String()
+}
+
+// String returns a string representation of the Token.
+func (t Token) String() string {
+ switch t.Type {
+ case ErrorToken:
+ return ""
+ case TextToken:
+ return EscapeString(t.Data)
+ case StartTagToken:
+ return "<" + t.tagString() + ">"
+ case EndTagToken:
+ return "</" + t.tagString() + ">"
+ case SelfClosingTagToken:
+ return "<" + t.tagString() + "/>"
+ case CommentToken:
+ return "<!--" + t.Data + "-->"
+ case DoctypeToken:
+ return "<!DOCTYPE " + t.Data + ">"
+ }
+ return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
+}
+
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
+// A Tokenizer returns a stream of HTML Tokens.
+type Tokenizer struct {
+ // r is the source of the HTML text.
+ r io.Reader
+ // tt is the TokenType of the current token.
+ tt TokenType
+ // err is the first error encountered during tokenization. It is possible
+ // for tt != Error && err != nil to hold: this means that Next returned a
+ // valid token but the subsequent Next call will return an error token.
+ // For example, if the HTML text input was just "plain", then the first
+ // Next call would set z.err to io.EOF but return a TextToken, and all
+ // subsequent Next calls would return an ErrorToken.
+ // err is never reset. Once it becomes non-nil, it stays non-nil.
+ err error
+ // readErr is the error returned by the io.Reader r. It is separate from
+ // err because it is valid for an io.Reader to return (n int, err1 error)
+ // such that n > 0 && err1 != nil, and callers should always process the
+ // n > 0 bytes before considering the error err1.
+ readErr error
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
+ maxBuf int
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "</script>" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "<p>" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "<!-- [CDATA[foo]] -->" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an <svg> element
+// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
+// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
+// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
+// In practice, if using the tokenizer without caring whether MathML or SVG
+// CDATA is text or comments, such as tokenizing HTML to find all the anchor
+// text, it is acceptable to ignore this responsibility.
+func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
+ z.allowCDATA = allowCDATA
+}
+
+// NextIsNotRawText instructs the tokenizer that the next token should not be
+// considered as 'raw text'. Some elements, such as script and title elements,
+// normally require the next token after the opening tag to be 'raw text' that
+// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
+// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
+// an end tag token for "</title>". There are no distinct start tag or end tag
+// tokens for the "<b>" and "</b>".
+//
+// This tokenizer implementation will generally look for raw text at the right
+// times. Strictly speaking, an HTML5 compliant tokenizer should not look for
+// raw text if in foreign content: <title> generally needs raw text, but a
+// <title> inside an <svg> does not. Another example is that a <textarea>
+// generally needs raw text, but a <textarea> is not allowed as an immediate
+// child of a <select>; in normal parsing, a <textarea> implies </select>, but
+// one cannot close the implicit element when parsing a <select>'s InnerHTML.
+// Similarly to AllowCDATA, tracking the correct moment to override raw-text-
+// ness is difficult to do purely in the tokenizer, as opposed to the parser.
+// For strict compliance with the HTML5 tokenization algorithm, it is the
+// responsibility of the user of a tokenizer to call NextIsNotRawText as
+// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
+// responsibility for basic usage.
+//
+// Note that this 'raw text' concept is different from the one offered by the
+// Tokenizer.Raw method.
+func (z *Tokenizer) NextIsNotRawText() {
+ z.rawTag = ""
+}
+
+// Err returns the error associated with the most recent ErrorToken token.
+// This is typically io.EOF, meaning the end of tokenization.
+func (z *Tokenizer) Err() error {
+ if z.tt != ErrorToken {
+ return nil
+ }
+ return z.err
+}
+
+// readByte returns the next byte from the input stream, doing a buffered read
+// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
+// slice that holds all the bytes read so far for the current token.
+// It sets z.err if the underlying reader returns an error.
+// Pre-condition: z.err == nil.
+func (z *Tokenizer) readByte() byte {
+ if z.raw.end >= len(z.buf) {
+ // Our buffer is exhausted and we have to read from z.r. Check if the
+ // previous read resulted in an error.
+ if z.readErr != nil {
+ z.err = z.readErr
+ return 0
+ }
+ // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
+ // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
+ // allocate a new buffer before the copy.
+ c := cap(z.buf)
+ d := z.raw.end - z.raw.start
+ var buf1 []byte
+ if 2*d > c {
+ buf1 = make([]byte, d, 2*c)
+ } else {
+ buf1 = z.buf[:d]
+ }
+ copy(buf1, z.buf[z.raw.start:z.raw.end])
+ if x := z.raw.start; x != 0 {
+ // Adjust the data/attr spans to refer to the same contents after the copy.
+ z.data.start -= x
+ z.data.end -= x
+ z.pendingAttr[0].start -= x
+ z.pendingAttr[0].end -= x
+ z.pendingAttr[1].start -= x
+ z.pendingAttr[1].end -= x
+ for i := range z.attr {
+ z.attr[i][0].start -= x
+ z.attr[i][0].end -= x
+ z.attr[i][1].start -= x
+ z.attr[i][1].end -= x
+ }
+ }
+ z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
+ // Now that we have copied the live bytes to the start of the buffer,
+ // we read from z.r into the remainder.
+ var n int
+ n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
+ if n == 0 {
+ z.err = z.readErr
+ return 0
+ }
+ z.buf = buf1[:d+n]
+ }
+ x := z.buf[z.raw.end]
+ z.raw.end++
+ if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
+ z.err = ErrBufferExceeded
+ return 0
+ }
+ return x
+}
+
+// Buffered returns a slice containing data buffered but not yet tokenized.
+func (z *Tokenizer) Buffered() []byte {
+ return z.buf[z.raw.end:]
+}
+
+// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
+// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
+// too many times in succession.
+func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
+ for i := 0; i < 100; i++ {
+ n, err := r.Read(b)
+ if n != 0 || err != nil {
+ return n, err
+ }
+ }
+ return 0, io.ErrNoProgress
+}
+
+// skipWhiteSpace skips past any white space.
+func (z *Tokenizer) skipWhiteSpace() {
+ if z.err != nil {
+ return
+ }
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ // No-op.
+ default:
+ z.raw.end--
+ return
+ }
+ }
+}
+
+// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
+// is typically something like "script" or "textarea".
+func (z *Tokenizer) readRawOrRCDATA() {
+ if z.rawTag == "script" {
+ z.readScript()
+ z.textIsRaw = true
+ z.rawTag = ""
+ return
+ }
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '/' {
+ continue loop
+ }
+ if z.readRawEndTag() || z.err != nil {
+ break loop
+ }
+ }
+ z.data.end = z.raw.end
+ // A textarea's or title's RCDATA can contain escaped entities.
+ z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
+ z.rawTag = ""
+}
+
+// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
+// If it succeeds, it backs up the input position to reconsume the tag and
+// returns true. Otherwise it returns false. The opening "</" has already been
+// consumed.
+func (z *Tokenizer) readRawEndTag() bool {
+ for i := 0; i < len(z.rawTag); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
+ z.raw.end--
+ return false
+ }
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ // The 3 is 2 for the leading "</" plus 1 for the trailing character c.
+ z.raw.end -= 3 + len(z.rawTag)
+ return true
+ }
+ z.raw.end--
+ return false
+}
+
+// readScript reads until the next </script> tag, following the byzantine
+// rules for escaping/hiding the closing tag.
+func (z *Tokenizer) readScript() {
+ defer func() {
+ z.data.end = z.raw.end
+ }()
+ var c byte
+
+scriptData:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '<' {
+ goto scriptDataLessThanSign
+ }
+ goto scriptData
+
+scriptDataLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '/':
+ goto scriptDataEndTagOpen
+ case '!':
+ goto scriptDataEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptData
+
+scriptDataEscapeStart:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapeStartDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapeStartDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapedDashDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataEscapedEndTagOpen
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ goto scriptDataDoubleEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapedEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptDataEscaped
+
+scriptDataDoubleEscapeStart:
+ z.raw.end--
+ for i := 0; i < len("script"); i++ {
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != "script"[i] && c != "SCRIPT"[i] {
+ z.raw.end--
+ goto scriptDataEscaped
+ }
+ }
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ goto scriptDataDoubleEscaped
+ }
+ z.raw.end--
+ goto scriptDataEscaped
+
+scriptDataDoubleEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataDoubleEscapeEnd
+ }
+ z.raw.end--
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapeEnd:
+ if z.readRawEndTag() {
+ z.raw.end += len("</script>")
+ goto scriptDataEscaped
+ }
+ if z.err != nil {
+ return
+ }
+ goto scriptDataDoubleEscaped
+}
+
+// readComment reads the next comment token starting with "<!--". The opening
+// "<!--" has already been consumed.
+func (z *Tokenizer) readComment() {
+ z.data.start = z.raw.end
+ defer func() {
+ if z.data.end < z.data.start {
+ // It's a comment with no data, like <!-->.
+ z.data.end = z.data.start
+ }
+ }()
+ for dashCount := 2; ; {
+ c := z.readByte()
+ if z.err != nil {
+ // Ignore up to two dashes at EOF.
+ if dashCount > 2 {
+ dashCount = 2
+ }
+ z.data.end = z.raw.end - dashCount
+ return
+ }
+ switch c {
+ case '-':
+ dashCount++
+ continue
+ case '>':
+ if dashCount >= 2 {
+ z.data.end = z.raw.end - len("-->")
+ return
+ }
+ case '!':
+ if dashCount >= 2 {
+ c = z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len("--!>")
+ return
+ }
+ }
+ }
+ dashCount = 0
+ }
+}
+
+// readUntilCloseAngle reads until the next ">".
+func (z *Tokenizer) readUntilCloseAngle() {
+ z.data.start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len(">")
+ return
+ }
+ }
+}
+
+// readMarkupDeclaration reads the next token starting with "<!". It might be
+// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
+// "<!a bogus comment". The opening "<!" has already been consumed.
+func (z *Tokenizer) readMarkupDeclaration() TokenType {
+ z.data.start = z.raw.end
+ var c [2]byte
+ for i := 0; i < 2; i++ {
+ c[i] = z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return CommentToken
+ }
+ }
+ if c[0] == '-' && c[1] == '-' {
+ z.readComment()
+ return CommentToken
+ }
+ z.raw.end -= 2
+ if z.readDoctype() {
+ return DoctypeToken
+ }
+ if z.allowCDATA && z.readCDATA() {
+ z.convertNUL = true
+ return TextToken
+ }
+ // It's a bogus comment.
+ z.readUntilCloseAngle()
+ return CommentToken
+}
+
+// readDoctype attempts to read a doctype declaration and returns true if
+// successful. The opening "<!" has already been consumed.
+func (z *Tokenizer) readDoctype() bool {
+ const s = "DOCTYPE"
+ for i := 0; i < len(s); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return false
+ }
+ if c != s[i] && c != s[i]+('a'-'A') {
+ // Back up to read the fragment of "DOCTYPE" again.
+ z.raw.end = z.data.start
+ return false
+ }
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return true
+ }
+ z.readUntilCloseAngle()
+ return true
+}
+
+// readCDATA attempts to read a CDATA section and returns true if
+// successful. The opening "<!" has already been consumed.
+func (z *Tokenizer) readCDATA() bool {
+ const s = "[CDATA["
+ for i := 0; i < len(s); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return false
+ }
+ if c != s[i] {
+ // Back up to read the fragment of "[CDATA[" again.
+ z.raw.end = z.data.start
+ return false
+ }
+ }
+ z.data.start = z.raw.end
+ brackets := 0
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return true
+ }
+ switch c {
+ case ']':
+ brackets++
+ case '>':
+ if brackets >= 2 {
+ z.data.end = z.raw.end - len("]]>")
+ return true
+ }
+ brackets = 0
+ default:
+ brackets = 0
+ }
+ }
+}
+
+// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
+// case-insensitively matches any element of ss.
+func (z *Tokenizer) startTagIn(ss ...string) bool {
+loop:
+ for _, s := range ss {
+ if z.data.end-z.data.start != len(s) {
+ continue loop
+ }
+ for i := 0; i < len(s); i++ {
+ c := z.buf[z.data.start+i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ if c != s[i] {
+ continue loop
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// readStartTag reads the next start tag token. The opening "<a" has already
+// been consumed, where 'a' means anything in [A-Za-z].
+func (z *Tokenizer) readStartTag() TokenType {
+ z.readTag(true)
+ if z.err != nil {
+ return ErrorToken
+ }
+ // Several tags flag the tokenizer's next token as raw.
+ c, raw := z.buf[z.data.start], false
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ switch c {
+ case 'i':
+ raw = z.startTagIn("iframe")
+ case 'n':
+ raw = z.startTagIn("noembed", "noframes", "noscript")
+ case 'p':
+ raw = z.startTagIn("plaintext")
+ case 's':
+ raw = z.startTagIn("script", "style")
+ case 't':
+ raw = z.startTagIn("textarea", "title")
+ case 'x':
+ raw = z.startTagIn("xmp")
+ }
+ if raw {
+ z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
+ }
+ // Look for a self-closing token like "<br/>".
+ if z.err == nil && z.buf[z.raw.end-2] == '/' {
+ return SelfClosingTagToken
+ }
+ return StartTagToken
+}
+
+// readTag reads the next tag token and its attributes. If saveAttr, those
+// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
+// The opening "<a" or "</a" has already been consumed, where 'a' means anything
+// in [A-Za-z].
+func (z *Tokenizer) readTag(saveAttr bool) {
+ z.attr = z.attr[:0]
+ z.nAttrReturned = 0
+ // Read the tag name and attribute key/value pairs.
+ z.readTagName()
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ for {
+ c := z.readByte()
+ if z.err != nil || c == '>' {
+ break
+ }
+ z.raw.end--
+ z.readTagAttrKey()
+ z.readTagAttrVal()
+ // Save pendingAttr if saveAttr and that attribute has a non-empty key.
+ if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
+ z.attr = append(z.attr, z.pendingAttr)
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ break
+ }
+ }
+}
+
+// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
+// is positioned such that the first byte of the tag name (the "d" in "<div")
+// has already been consumed.
+func (z *Tokenizer) readTagName() {
+ z.data.start = z.raw.end - 1
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.data.end = z.raw.end - 1
+ return
+ case '/', '>':
+ z.raw.end--
+ z.data.end = z.raw.end
+ return
+ }
+ }
+}
+
+// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
+// Precondition: z.err == nil.
+func (z *Tokenizer) readTagAttrKey() {
+ z.pendingAttr[0].start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[0].end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/':
+ z.pendingAttr[0].end = z.raw.end - 1
+ return
+ case '=', '>':
+ z.raw.end--
+ z.pendingAttr[0].end = z.raw.end
+ return
+ }
+ }
+}
+
+// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
+func (z *Tokenizer) readTagAttrVal() {
+ z.pendingAttr[1].start = z.raw.end
+ z.pendingAttr[1].end = z.raw.end
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != '=' {
+ z.raw.end--
+ return
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ quote := z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch quote {
+ case '>':
+ z.raw.end--
+ return
+
+ case '\'', '"':
+ z.pendingAttr[1].start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ if c == quote {
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
+ }
+ }
+
+ default:
+ z.pendingAttr[1].start = z.raw.end - 1
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
+ case '>':
+ z.raw.end--
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ }
+ }
+}
+
+// Next scans the next token and returns its type.
+func (z *Tokenizer) Next() TokenType {
+ z.raw.start = z.raw.end
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ if z.err != nil {
+ z.tt = ErrorToken
+ return z.tt
+ }
+ if z.rawTag != "" {
+ if z.rawTag == "plaintext" {
+ // Read everything up to EOF.
+ for z.err == nil {
+ z.readByte()
+ }
+ z.data.end = z.raw.end
+ z.textIsRaw = true
+ } else {
+ z.readRawOrRCDATA()
+ }
+ if z.data.end > z.data.start {
+ z.tt = TextToken
+ z.convertNUL = true
+ return z.tt
+ }
+ }
+ z.textIsRaw = false
+ z.convertNUL = false
+
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+
+ // Check if the '<' we have just read is part of a tag, comment
+ // or doctype. If not, it's part of the accumulated text token.
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ var tokenType TokenType
+ switch {
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ tokenType = StartTagToken
+ case c == '/':
+ tokenType = EndTagToken
+ case c == '!' || c == '?':
+ // We use CommentToken to mean any of "<!--actual comments-->",
+ // "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
+ tokenType = CommentToken
+ default:
+ // Reconsume the current character.
+ z.raw.end--
+ continue
+ }
+
+ // We have a non-text token, but we might have accumulated some text
+ // before that. If so, we return the text first, and return the non-
+ // text token on the subsequent call to Next.
+ if x := z.raw.end - len("<a"); z.raw.start < x {
+ z.raw.end = x
+ z.data.end = x
+ z.tt = TextToken
+ return z.tt
+ }
+ switch tokenType {
+ case StartTagToken:
+ z.tt = z.readStartTag()
+ return z.tt
+ case EndTagToken:
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c == '>' {
+ // "</>" does not generate a token at all. Generate an empty comment
+ // to allow passthrough clients to pick up the data using Raw.
+ // Reset the tokenizer state and start again.
+ z.tt = CommentToken
+ return z.tt
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ z.readTag(false)
+ if z.err != nil {
+ z.tt = ErrorToken
+ } else {
+ z.tt = EndTagToken
+ }
+ return z.tt
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ z.tt = CommentToken
+ return z.tt
+ case CommentToken:
+ if c == '!' {
+ z.tt = z.readMarkupDeclaration()
+ return z.tt
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ z.tt = CommentToken
+ return z.tt
+ }
+ }
+ if z.raw.start < z.raw.end {
+ z.data.end = z.raw.end
+ z.tt = TextToken
+ return z.tt
+ }
+ z.tt = ErrorToken
+ return z.tt
+}
+
+// Raw returns the unmodified text of the current token. Calling Next, Token,
+// Text, TagName or TagAttr may change the contents of the returned slice.
+func (z *Tokenizer) Raw() []byte {
+ return z.buf[z.raw.start:z.raw.end]
+}
+
+// convertNewlines converts "\r" and "\r\n" in s to "\n".
+// The conversion happens in place, but the resulting slice may be shorter.
+func convertNewlines(s []byte) []byte {
+ for i, c := range s {
+ if c != '\r' {
+ continue
+ }
+
+ src := i + 1
+ if src >= len(s) || s[src] != '\n' {
+ s[i] = '\n'
+ continue
+ }
+
+ dst := i
+ for src < len(s) {
+ if s[src] == '\r' {
+ if src+1 < len(s) && s[src+1] == '\n' {
+ src++
+ }
+ s[dst] = '\n'
+ } else {
+ s[dst] = s[src]
+ }
+ src++
+ dst++
+ }
+ return s[:dst]
+ }
+ return s
+}
+
+var (
+ nul = []byte("\x00")
+ replacement = []byte("\ufffd")
+)
+
+// Text returns the unescaped text of a text, comment or doctype token. The
+// contents of the returned slice may change on the next call to Next.
+func (z *Tokenizer) Text() []byte {
+ switch z.tt {
+ case TextToken, CommentToken, DoctypeToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ s = convertNewlines(s)
+ if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
+ s = bytes.Replace(s, nul, replacement, -1)
+ }
+ if !z.textIsRaw {
+ s = unescape(s, false)
+ }
+ return s
+ }
+ return nil
+}
+
+// TagName returns the lower-cased name of a tag token (the `img` out of
+// `<IMG SRC="foo">`) and whether the tag has attributes.
+// The contents of the returned slice may change on the next call to Next.
+func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
+ if z.data.start < z.data.end {
+ switch z.tt {
+ case StartTagToken, EndTagToken, SelfClosingTagToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return lower(s), z.nAttrReturned < len(z.attr)
+ }
+ }
+ return nil, false
+}
+
+// TagAttr returns the lower-cased key and unescaped value of the next unparsed
+// attribute for the current tag token and whether there are more attributes.
+// The contents of the returned slices may change on the next call to Next.
+func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
+ if z.nAttrReturned < len(z.attr) {
+ switch z.tt {
+ case StartTagToken, SelfClosingTagToken:
+ x := z.attr[z.nAttrReturned]
+ z.nAttrReturned++
+ key = z.buf[x[0].start:x[0].end]
+ val = z.buf[x[1].start:x[1].end]
+ return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
+ }
+ }
+ return nil, nil, false
+}
+
+// Token returns the next Token. The result's Data and Attr values remain valid
+// after subsequent Next calls.
+func (z *Tokenizer) Token() Token {
+ t := Token{Type: z.tt}
+ switch z.tt {
+ case TextToken, CommentToken, DoctypeToken:
+ t.Data = string(z.Text())
+ case StartTagToken, SelfClosingTagToken, EndTagToken:
+ name, moreAttr := z.TagName()
+ for moreAttr {
+ var key, val []byte
+ key, val, moreAttr = z.TagAttr()
+ t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
+ }
+ if a := atom.Lookup(name); a != 0 {
+ t.DataAtom, t.Data = a, a.String()
+ } else {
+ t.DataAtom, t.Data = 0, string(name)
+ }
+ }
+ return t
+}
+
+// SetMaxBuf sets a limit on the amount of data buffered during tokenization.
+// A value of 0 means unlimited.
+func (z *Tokenizer) SetMaxBuf(n int) {
+ z.maxBuf = n
+}
+
+// NewTokenizer returns a new HTML Tokenizer for the given Reader.
+// The input is assumed to be UTF-8 encoded.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ return NewTokenizerFragment(r, "")
+}
+
+// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
+// tokenizing an existing element's InnerHTML fragment. contextTag is that
+// element's tag, such as "div" or "iframe".
+//
+// For example, how the InnerHTML "a<b" is tokenized depends on whether it is
+// for a <p> tag or a <script> tag.
+//
+// The input is assumed to be UTF-8 encoded.
+func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
+ z := &Tokenizer{
+ r: r,
+ buf: make([]byte, 0, 4096),
+ }
+ if contextTag != "" {
+ switch s := strings.ToLower(contextTag); s {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
+ z.rawTag = s
+ }
+ }
+ return z
+}
diff --git a/vendor/golang.org/x/net/html/token_test.go b/vendor/golang.org/x/net/html/token_test.go
new file mode 100644
index 000000000..20221c328
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token_test.go
@@ -0,0 +1,748 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type tokenTest struct {
+ // A short description of the test case.
+ desc string
+ // The HTML to parse.
+ html string
+ // The string representations of the expected tokens, joined by '$'.
+ golden string
+}
+
+var tokenTests = []tokenTest{
+ {
+ "empty",
+ "",
+ "",
+ },
+ // A single text node. The tokenizer should not break text nodes on whitespace,
+ // nor should it normalize whitespace within a text node.
+ {
+ "text",
+ "foo bar",
+ "foo bar",
+ },
+ // An entity.
+ {
+ "entity",
+ "one &lt; two",
+ "one &lt; two",
+ },
+ // A start, self-closing and end tag. The tokenizer does not care if the start
+ // and end tokens don't match; that is the job of the parser.
+ {
+ "tags",
+ "<a>b<c/>d</e>",
+ "<a>$b$<c/>$d$</e>",
+ },
+ // Angle brackets that aren't a tag.
+ {
+ "not a tag #0",
+ "<",
+ "&lt;",
+ },
+ {
+ "not a tag #1",
+ "</",
+ "&lt;/",
+ },
+ {
+ "not a tag #2",
+ "</>",
+ "<!---->",
+ },
+ {
+ "not a tag #3",
+ "a</>b",
+ "a$<!---->$b",
+ },
+ {
+ "not a tag #4",
+ "</ >",
+ "<!-- -->",
+ },
+ {
+ "not a tag #5",
+ "</.",
+ "<!--.-->",
+ },
+ {
+ "not a tag #6",
+ "</.>",
+ "<!--.-->",
+ },
+ {
+ "not a tag #7",
+ "a < b",
+ "a &lt; b",
+ },
+ {
+ "not a tag #8",
+ "<.>",
+ "&lt;.&gt;",
+ },
+ {
+ "not a tag #9",
+ "a<<<b>>>c",
+ "a&lt;&lt;$<b>$&gt;&gt;c",
+ },
+ {
+ "not a tag #10",
+ "if x<0 and y < 0 then x*y>0",
+ "if x&lt;0 and y &lt; 0 then x*y&gt;0",
+ },
+ {
+ "not a tag #11",
+ "<<p>",
+ "&lt;$<p>",
+ },
+ // EOF in a tag name.
+ {
+ "tag name eof #0",
+ "<a",
+ "",
+ },
+ {
+ "tag name eof #1",
+ "<a ",
+ "",
+ },
+ {
+ "tag name eof #2",
+ "a<b",
+ "a",
+ },
+ {
+ "tag name eof #3",
+ "<a><b",
+ "<a>",
+ },
+ {
+ "tag name eof #4",
+ `<a x`,
+ ``,
+ },
+ // Some malformed tags that are missing a '>'.
+ {
+ "malformed tag #0",
+ `<p</p>`,
+ `<p< p="">`,
+ },
+ {
+ "malformed tag #1",
+ `<p </p>`,
+ `<p <="" p="">`,
+ },
+ {
+ "malformed tag #2",
+ `<p id`,
+ ``,
+ },
+ {
+ "malformed tag #3",
+ `<p id=`,
+ ``,
+ },
+ {
+ "malformed tag #4",
+ `<p id=>`,
+ `<p id="">`,
+ },
+ {
+ "malformed tag #5",
+ `<p id=0`,
+ ``,
+ },
+ {
+ "malformed tag #6",
+ `<p id=0</p>`,
+ `<p id="0&lt;/p">`,
+ },
+ {
+ "malformed tag #7",
+ `<p id="0</p>`,
+ ``,
+ },
+ {
+ "malformed tag #8",
+ `<p id="0"</p>`,
+ `<p id="0" <="" p="">`,
+ },
+ {
+ "malformed tag #9",
+ `<p></p id`,
+ `<p>`,
+ },
+ // Raw text and RCDATA.
+ {
+ "basic raw text",
+ "<script><a></b></script>",
+ "<script>$&lt;a&gt;&lt;/b&gt;$</script>",
+ },
+ {
+ "unfinished script end tag",
+ "<SCRIPT>a</SCR",
+ "<script>$a&lt;/SCR",
+ },
+ {
+ "broken script end tag",
+ "<SCRIPT>a</SCR ipt>",
+ "<script>$a&lt;/SCR ipt&gt;",
+ },
+ {
+ "EOF in script end tag",
+ "<SCRIPT>a</SCRipt",
+ "<script>$a&lt;/SCRipt",
+ },
+ {
+ "scriptx end tag",
+ "<SCRIPT>a</SCRiptx",
+ "<script>$a&lt;/SCRiptx",
+ },
+ {
+ "' ' completes script end tag",
+ "<SCRIPT>a</SCRipt ",
+ "<script>$a",
+ },
+ {
+ "'>' completes script end tag",
+ "<SCRIPT>a</SCRipt>",
+ "<script>$a$</script>",
+ },
+ {
+ "self-closing script end tag",
+ "<SCRIPT>a</SCRipt/>",
+ "<script>$a$</script>",
+ },
+ {
+ "nested script tag",
+ "<SCRIPT>a</SCRipt<script>",
+ "<script>$a&lt;/SCRipt&lt;script&gt;",
+ },
+ {
+ "script end tag after unfinished",
+ "<SCRIPT>a</SCRipt</script>",
+ "<script>$a&lt;/SCRipt$</script>",
+ },
+ {
+ "script/style mismatched tags",
+ "<script>a</style>",
+ "<script>$a&lt;/style&gt;",
+ },
+ {
+ "style element with entity",
+ "<style>&apos;",
+ "<style>$&amp;apos;",
+ },
+ {
+ "textarea with tag",
+ "<textarea><div></textarea>",
+ "<textarea>$&lt;div&gt;$</textarea>",
+ },
+ {
+ "title with tag and entity",
+ "<title><b>K&amp;R C</b></title>",
+ "<title>$&lt;b&gt;K&amp;R C&lt;/b&gt;$</title>",
+ },
+ // DOCTYPE tests.
+ {
+ "Proper DOCTYPE",
+ "<!DOCTYPE html>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "DOCTYPE with no space",
+ "<!doctypehtml>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "DOCTYPE with two spaces",
+ "<!doctype html>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "looks like DOCTYPE but isn't",
+ "<!DOCUMENT html>",
+ "<!--DOCUMENT html-->",
+ },
+ {
+ "DOCTYPE at EOF",
+ "<!DOCtype",
+ "<!DOCTYPE >",
+ },
+ // XML processing instructions.
+ {
+ "XML processing instruction",
+ "<?xml?>",
+ "<!--?xml?-->",
+ },
+ // Comments.
+ {
+ "comment0",
+ "abc<b><!-- skipme --></b>def",
+ "abc$<b>$<!-- skipme -->$</b>$def",
+ },
+ {
+ "comment1",
+ "a<!-->z",
+ "a$<!---->$z",
+ },
+ {
+ "comment2",
+ "a<!--->z",
+ "a$<!---->$z",
+ },
+ {
+ "comment3",
+ "a<!--x>-->z",
+ "a$<!--x>-->$z",
+ },
+ {
+ "comment4",
+ "a<!--x->-->z",
+ "a$<!--x->-->$z",
+ },
+ {
+ "comment5",
+ "a<!>z",
+ "a$<!---->$z",
+ },
+ {
+ "comment6",
+ "a<!->z",
+ "a$<!----->$z",
+ },
+ {
+ "comment7",
+ "a<!---<>z",
+ "a$<!---<>z-->",
+ },
+ {
+ "comment8",
+ "a<!--z",
+ "a$<!--z-->",
+ },
+ {
+ "comment9",
+ "a<!--z-",
+ "a$<!--z-->",
+ },
+ {
+ "comment10",
+ "a<!--z--",
+ "a$<!--z-->",
+ },
+ {
+ "comment11",
+ "a<!--z---",
+ "a$<!--z--->",
+ },
+ {
+ "comment12",
+ "a<!--z----",
+ "a$<!--z---->",
+ },
+ {
+ "comment13",
+ "a<!--x--!>z",
+ "a$<!--x-->$z",
+ },
+ // An attribute with a backslash.
+ {
+ "backslash",
+ `<p id="a\"b">`,
+ `<p id="a\" b"="">`,
+ },
+ // Entities, tag name and attribute key lower-casing, and whitespace
+ // normalization within a tag.
+ {
+ "tricky",
+ "<p \t\n iD=\"a&quot;B\" foo=\"bar\"><EM>te&lt;&amp;;xt</em></p>",
+ `<p id="a&#34;B" foo="bar">$<em>$te&lt;&amp;;xt$</em>$</p>`,
+ },
+ // A nonexistent entity. Tokenizing and converting back to a string should
+ // escape the "&" to become "&amp;".
+ {
+ "noSuchEntity",
+ `<a b="c&noSuchEntity;d">&lt;&alsoDoesntExist;&`,
+ `<a b="c&amp;noSuchEntity;d">$&lt;&amp;alsoDoesntExist;&amp;`,
+ },
+ {
+ "entity without semicolon",
+ `&notit;&notin;<a b="q=z&amp=5&notice=hello&not;=world">`,
+ `¬it;∉$<a b="q=z&amp;amp=5&amp;notice=hello¬=world">`,
+ },
+ {
+ "entity with digits",
+ "&frac12;",
+ "½",
+ },
+ // Attribute tests:
+ // http://dev.w3.org/html5/pf-summary/Overview.html#attributes
+ {
+ "Empty attribute",
+ `<input disabled FOO>`,
+ `<input disabled="" foo="">`,
+ },
+ {
+ "Empty attribute, whitespace",
+ `<input disabled FOO >`,
+ `<input disabled="" foo="">`,
+ },
+ {
+ "Unquoted attribute value",
+ `<input value=yes FOO=BAR>`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Unquoted attribute value, spaces",
+ `<input value = yes FOO = BAR>`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Unquoted attribute value, trailing space",
+ `<input value=yes FOO=BAR >`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Single-quoted attribute value",
+ `<input value='yes' FOO='BAR'>`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Single-quoted attribute value, trailing space",
+ `<input value='yes' FOO='BAR' >`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Double-quoted attribute value",
+ `<input value="I'm an attribute" FOO="BAR">`,
+ `<input value="I&#39;m an attribute" foo="BAR">`,
+ },
+ {
+ "Attribute name characters",
+ `<meta http-equiv="content-type">`,
+ `<meta http-equiv="content-type">`,
+ },
+ {
+ "Mixed attributes",
+ `a<P V="0 1" w='2' X=3 y>z`,
+ `a$<p v="0 1" w="2" x="3" y="">$z`,
+ },
+ {
+ "Attributes with a solitary single quote",
+ `<p id=can't><p id=won't>`,
+ `<p id="can&#39;t">$<p id="won&#39;t">`,
+ },
+}
+
+func TestTokenizer(t *testing.T) {
+loop:
+ for _, tt := range tokenTests {
+ z := NewTokenizer(strings.NewReader(tt.html))
+ if tt.golden != "" {
+ for i, s := range strings.Split(tt.golden, "$") {
+ if z.Next() == ErrorToken {
+ t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Err())
+ continue loop
+ }
+ actual := z.Token().String()
+ if s != actual {
+ t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual)
+ continue loop
+ }
+ }
+ }
+ z.Next()
+ if z.Err() != io.EOF {
+ t.Errorf("%s: want EOF got %q", tt.desc, z.Err())
+ }
+ }
+}
+
+func TestMaxBuffer(t *testing.T) {
+ // Exceeding the maximum buffer size generates ErrBufferExceeded.
+ z := NewTokenizer(strings.NewReader("<" + strings.Repeat("t", 10)))
+ z.SetMaxBuf(5)
+ tt := z.Next()
+ if got, want := tt, ErrorToken; got != want {
+ t.Fatalf("token type: got: %v want: %v", got, want)
+ }
+ if got, want := z.Err(), ErrBufferExceeded; got != want {
+ t.Errorf("error type: got: %v want: %v", got, want)
+ }
+ if got, want := string(z.Raw()), "<tttt"; got != want {
+ t.Fatalf("buffered before overflow: got: %q want: %q", got, want)
+ }
+}
+
+func TestMaxBufferReconstruction(t *testing.T) {
+ // Exceeding the maximum buffer size at any point while tokenizing permits
+ // reconstructing the original input.
+tests:
+ for _, test := range tokenTests {
+ for maxBuf := 1; ; maxBuf++ {
+ r := strings.NewReader(test.html)
+ z := NewTokenizer(r)
+ z.SetMaxBuf(maxBuf)
+ var tokenized bytes.Buffer
+ for {
+ tt := z.Next()
+ tokenized.Write(z.Raw())
+ if tt == ErrorToken {
+ if err := z.Err(); err != io.EOF && err != ErrBufferExceeded {
+ t.Errorf("%s: unexpected error: %v", test.desc, err)
+ }
+ break
+ }
+ }
+ // Anything tokenized along with untokenized input or data left in the reader.
+ assembled, err := ioutil.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r))
+ if err != nil {
+ t.Errorf("%s: ReadAll: %v", test.desc, err)
+ continue tests
+ }
+ if got, want := string(assembled), test.html; got != want {
+ t.Errorf("%s: reassembled html:\n got: %q\nwant: %q", test.desc, got, want)
+ continue tests
+ }
+ // EOF indicates that we completed tokenization and hence found the max
+ // maxBuf that generates ErrBufferExceeded, so continue to the next test.
+ if z.Err() == io.EOF {
+ break
+ }
+ } // buffer sizes
+ } // tests
+}
+
+func TestPassthrough(t *testing.T) {
+ // Accumulating the raw output for each parse event should reconstruct the
+ // original input.
+ for _, test := range tokenTests {
+ z := NewTokenizer(strings.NewReader(test.html))
+ var parsed bytes.Buffer
+ for {
+ tt := z.Next()
+ parsed.Write(z.Raw())
+ if tt == ErrorToken {
+ break
+ }
+ }
+ if got, want := parsed.String(), test.html; got != want {
+ t.Errorf("%s: parsed output:\n got: %q\nwant: %q", test.desc, got, want)
+ }
+ }
+}
+
+func TestBufAPI(t *testing.T) {
+ s := "0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9"
+ z := NewTokenizer(bytes.NewBufferString(s))
+ var result bytes.Buffer
+ depth := 0
+loop:
+ for {
+ tt := z.Next()
+ switch tt {
+ case ErrorToken:
+ if z.Err() != io.EOF {
+ t.Error(z.Err())
+ }
+ break loop
+ case TextToken:
+ if depth > 0 {
+ result.Write(z.Text())
+ }
+ case StartTagToken, EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+ u := "14567"
+ v := string(result.Bytes())
+ if u != v {
+ t.Errorf("TestBufAPI: want %q got %q", u, v)
+ }
+}
+
+func TestConvertNewlines(t *testing.T) {
+ testCases := map[string]string{
+ "Mac\rDOS\r\nUnix\n": "Mac\nDOS\nUnix\n",
+ "Unix\nMac\rDOS\r\n": "Unix\nMac\nDOS\n",
+ "DOS\r\nDOS\r\nDOS\r\n": "DOS\nDOS\nDOS\n",
+ "": "",
+ "\n": "\n",
+ "\n\r": "\n\n",
+ "\r": "\n",
+ "\r\n": "\n",
+ "\r\n\n": "\n\n",
+ "\r\n\r": "\n\n",
+ "\r\n\r\n": "\n\n",
+ "\r\r": "\n\n",
+ "\r\r\n": "\n\n",
+ "\r\r\n\n": "\n\n\n",
+ "\r\r\r\n": "\n\n\n",
+ "\r \n": "\n \n",
+ "xyz": "xyz",
+ }
+ for in, want := range testCases {
+ if got := string(convertNewlines([]byte(in))); got != want {
+ t.Errorf("input %q: got %q, want %q", in, got, want)
+ }
+ }
+}
+
+func TestReaderEdgeCases(t *testing.T) {
+ const s = "<p>An io.Reader can return (0, nil) or (n, io.EOF).</p>"
+ testCases := []io.Reader{
+ &zeroOneByteReader{s: s},
+ &eofStringsReader{s: s},
+ &stuckReader{},
+ }
+ for i, tc := range testCases {
+ got := []TokenType{}
+ z := NewTokenizer(tc)
+ for {
+ tt := z.Next()
+ if tt == ErrorToken {
+ break
+ }
+ got = append(got, tt)
+ }
+ if err := z.Err(); err != nil && err != io.EOF {
+ if err != io.ErrNoProgress {
+ t.Errorf("i=%d: %v", i, err)
+ }
+ continue
+ }
+ want := []TokenType{
+ StartTagToken,
+ TextToken,
+ EndTagToken,
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("i=%d: got %v, want %v", i, got, want)
+ continue
+ }
+ }
+}
+
+// zeroOneByteReader is like a strings.Reader that alternates between
+// returning 0 bytes and 1 byte at a time.
+type zeroOneByteReader struct {
+ s string
+ n int
+}
+
+func (r *zeroOneByteReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if len(r.s) == 0 {
+ return 0, io.EOF
+ }
+ r.n++
+ if r.n%2 != 0 {
+ return 0, nil
+ }
+ p[0], r.s = r.s[0], r.s[1:]
+ return 1, nil
+}
+
+// eofStringsReader is like a strings.Reader but can return an (n, err) where
+// n > 0 && err != nil.
+type eofStringsReader struct {
+ s string
+}
+
+func (r *eofStringsReader) Read(p []byte) (int, error) {
+ n := copy(p, r.s)
+ r.s = r.s[n:]
+ if r.s != "" {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// stuckReader is an io.Reader that always returns no data and no error.
+type stuckReader struct{}
+
+func (*stuckReader) Read(p []byte) (int, error) {
+ return 0, nil
+}
+
+const (
+ rawLevel = iota
+ lowLevel
+ highLevel
+)
+
+func benchmarkTokenizer(b *testing.B, level int) {
+ buf, err := ioutil.ReadFile("testdata/go1.html")
+ if err != nil {
+ b.Fatalf("could not read testdata/go1.html: %v", err)
+ }
+ b.SetBytes(int64(len(buf)))
+ runtime.GC()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ z := NewTokenizer(bytes.NewBuffer(buf))
+ for {
+ tt := z.Next()
+ if tt == ErrorToken {
+ if err := z.Err(); err != nil && err != io.EOF {
+ b.Fatalf("tokenizer error: %v", err)
+ }
+ break
+ }
+ switch level {
+ case rawLevel:
+ // Calling z.Raw just returns the raw bytes of the token. It does
+ // not unescape &lt; to <, or lower-case tag names and attribute keys.
+ z.Raw()
+ case lowLevel:
+ // Caling z.Text, z.TagName and z.TagAttr returns []byte values
+ // whose contents may change on the next call to z.Next.
+ switch tt {
+ case TextToken, CommentToken, DoctypeToken:
+ z.Text()
+ case StartTagToken, SelfClosingTagToken:
+ _, more := z.TagName()
+ for more {
+ _, _, more = z.TagAttr()
+ }
+ case EndTagToken:
+ z.TagName()
+ }
+ case highLevel:
+ // Calling z.Token converts []byte values to strings whose validity
+ // extend beyond the next call to z.Next.
+ z.Token()
+ }
+ }
+ }
+}
+
+func BenchmarkRawLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, rawLevel) }
+func BenchmarkLowLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, lowLevel) }
+func BenchmarkHighLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, highLevel) }
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore
new file mode 100644
index 000000000..190f12234
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 000000000..53fc52579
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 000000000..55fd826f7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
new file mode 100644
index 000000000..360d5aa37
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 000000000..b13941258
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,256 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ if isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ const singleUse = true
+ cc, err := p.t.dialClientConn(addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 000000000..4f720f530
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 000000000..20fd7626a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,130 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+ Cause error // optional additional detail
+}
+
+func streamError(id uint32, code ErrCode) StreamError {
+ return StreamError{StreamID: id, Code: code}
+}
+
+func (e StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+ Code ErrCode
+ Reason string
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/errors_test.go b/vendor/golang.org/x/net/http2/errors_test.go
new file mode 100644
index 000000000..da5c58c31
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors_test.go
@@ -0,0 +1,24 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestErrCodeString(t *testing.T) {
+ tests := []struct {
+ err ErrCode
+ want string
+ }{
+ {ErrCodeProtocol, "PROTOCOL_ERROR"},
+ {0xd, "HTTP_1_1_REQUIRED"},
+ {0xf, "unknown error code 0xf"},
+ }
+ for i, tt := range tests {
+ got := tt.err.String()
+ if got != tt.want {
+ t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go
new file mode 100644
index 000000000..47da0f0bf
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/fixed_buffer.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+)
+
+// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type fixedBuffer struct {
+ buf []byte
+ r, w int
+}
+
+var (
+ errReadEmpty = errors.New("read from empty fixedBuffer")
+ errWriteFull = errors.New("write on full fixedBuffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *fixedBuffer) Read(p []byte) (n int, err error) {
+ if b.r == b.w {
+ return 0, errReadEmpty
+ }
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ if b.r == b.w {
+ b.r = 0
+ b.w = 0
+ }
+ return n, nil
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *fixedBuffer) Len() int {
+ return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *fixedBuffer) Write(p []byte) (n int, err error) {
+ // Slide existing data to beginning.
+ if b.r > 0 && len(p) > len(b.buf)-b.w {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ // Write new data.
+ n = copy(b.buf[b.w:], p)
+ b.w += n
+ if n < len(p) {
+ err = errWriteFull
+ }
+ return n, err
+}
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer_test.go b/vendor/golang.org/x/net/http2/fixed_buffer_test.go
new file mode 100644
index 000000000..f5432f8d8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/fixed_buffer_test.go
@@ -0,0 +1,128 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "reflect"
+ "testing"
+)
+
+var bufferReadTests = []struct {
+ buf fixedBuffer
+ read, wn int
+ werr error
+ wp []byte
+ wbuf fixedBuffer
+}{
+ {
+ fixedBuffer{[]byte{'a', 0}, 0, 1},
+ 5, 1, nil, []byte{'a'},
+ fixedBuffer{[]byte{'a', 0}, 0, 0},
+ },
+ {
+ fixedBuffer{[]byte{0, 'a'}, 1, 2},
+ 5, 1, nil, []byte{'a'},
+ fixedBuffer{[]byte{0, 'a'}, 0, 0},
+ },
+ {
+ fixedBuffer{[]byte{'a', 'b'}, 0, 2},
+ 1, 1, nil, []byte{'a'},
+ fixedBuffer{[]byte{'a', 'b'}, 1, 2},
+ },
+ {
+ fixedBuffer{[]byte{}, 0, 0},
+ 5, 0, errReadEmpty, []byte{},
+ fixedBuffer{[]byte{}, 0, 0},
+ },
+}
+
+func TestBufferRead(t *testing.T) {
+ for i, tt := range bufferReadTests {
+ read := make([]byte, tt.read)
+ n, err := tt.buf.Read(read)
+ if n != tt.wn {
+ t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
+ continue
+ }
+ if err != tt.werr {
+ t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
+ continue
+ }
+ read = read[:n]
+ if !reflect.DeepEqual(read, tt.wp) {
+ t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
+ }
+ if !reflect.DeepEqual(tt.buf, tt.wbuf) {
+ t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
+ }
+ }
+}
+
+var bufferWriteTests = []struct {
+ buf fixedBuffer
+ write, wn int
+ werr error
+ wbuf fixedBuffer
+}{
+ {
+ buf: fixedBuffer{
+ buf: []byte{},
+ },
+ wbuf: fixedBuffer{
+ buf: []byte{},
+ },
+ },
+ {
+ buf: fixedBuffer{
+ buf: []byte{1, 'a'},
+ },
+ write: 1,
+ wn: 1,
+ wbuf: fixedBuffer{
+ buf: []byte{0, 'a'},
+ w: 1,
+ },
+ },
+ {
+ buf: fixedBuffer{
+ buf: []byte{'a', 1},
+ r: 1,
+ w: 1,
+ },
+ write: 2,
+ wn: 2,
+ wbuf: fixedBuffer{
+ buf: []byte{0, 0},
+ w: 2,
+ },
+ },
+ {
+ buf: fixedBuffer{
+ buf: []byte{},
+ },
+ write: 5,
+ werr: errWriteFull,
+ wbuf: fixedBuffer{
+ buf: []byte{},
+ },
+ },
+}
+
+func TestBufferWrite(t *testing.T) {
+ for i, tt := range bufferWriteTests {
+ n, err := tt.buf.Write(make([]byte, tt.write))
+ if n != tt.wn {
+ t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn)
+ continue
+ }
+ if err != tt.werr {
+ t.Errorf("#%d: error = %v; want %v", i, err, tt.werr)
+ continue
+ }
+ if !reflect.DeepEqual(tt.buf, tt.wbuf) {
+ t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 000000000..957de2542
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/vendor/golang.org/x/net/http2/flow_test.go b/vendor/golang.org/x/net/http2/flow_test.go
new file mode 100644
index 000000000..859adf5d1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow_test.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "testing"
+
+func TestFlow(t *testing.T) {
+ var st flow
+ var conn flow
+ st.add(3)
+ conn.add(2)
+
+ if got, want := st.available(), int32(3); got != want {
+ t.Errorf("available = %d; want %d", got, want)
+ }
+ st.setConnFlow(&conn)
+ if got, want := st.available(), int32(2); got != want {
+ t.Errorf("after parent setup, available = %d; want %d", got, want)
+ }
+
+ st.take(2)
+ if got, want := conn.available(), int32(0); got != want {
+ t.Errorf("after taking 2, conn = %d; want %d", got, want)
+ }
+ if got, want := st.available(), int32(0); got != want {
+ t.Errorf("after taking 2, stream = %d; want %d", got, want)
+ }
+}
+
+func TestFlowAdd(t *testing.T) {
+ var f flow
+ if !f.add(1) {
+ t.Fatal("failed to add 1")
+ }
+ if !f.add(-1) {
+ t.Fatal("failed to add -1")
+ }
+ if got, want := f.available(), int32(0); got != want {
+ t.Fatalf("size = %d; want %d", got, want)
+ }
+ if !f.add(1<<31 - 1) {
+ t.Fatal("failed to add 2^31-1")
+ }
+ if got, want := f.available(), int32(1<<31-1); got != want {
+ t.Fatalf("size = %d; want %d", got, want)
+ }
+ if f.add(1) {
+ t.Fatal("adding 1 to max shouldn't be allowed")
+ }
+
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 000000000..b0c79b01a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1539 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+ errDetail error
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if logFrameWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fh, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := &DataFrame{
+ FrameHeader: fh,
+ }
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+ errPadLength = errors.New("pad length too large")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteData writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if len(pad) > 255 {
+ return errPadLength
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ if pad != nil {
+ flags |= FlagDataPadded
+ }
+ f.startWrite(FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
+ f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if VerboseLogs && logFrameReads {
+ log.Printf("http2: decoded hpack field %+v", hf)
+ }
+ if !httplex.ValidHeaderFieldValue(hf.Value) {
+ invalid = headerFieldValueError(hf.Value)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/frame_test.go b/vendor/golang.org/x/net/http2/frame_test.go
new file mode 100644
index 000000000..7b1933d96
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame_test.go
@@ -0,0 +1,1102 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "unsafe"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+func testFramer() (*Framer, *bytes.Buffer) {
+ buf := new(bytes.Buffer)
+ return NewFramer(buf, buf), buf
+}
+
+func TestFrameSizes(t *testing.T) {
+ // Catch people rearranging the FrameHeader fields.
+ if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {
+ t.Errorf("FrameHeader size = %d; want %d", got, want)
+ }
+}
+
+func TestFrameTypeString(t *testing.T) {
+ tests := []struct {
+ ft FrameType
+ want string
+ }{
+ {FrameData, "DATA"},
+ {FramePing, "PING"},
+ {FrameGoAway, "GOAWAY"},
+ {0xf, "UNKNOWN_FRAME_TYPE_15"},
+ }
+
+ for i, tt := range tests {
+ got := tt.ft.String()
+ if got != tt.want {
+ t.Errorf("%d. String(FrameType %d) = %q; want %q", i, int(tt.ft), got, tt.want)
+ }
+ }
+}
+
+func TestWriteRST(t *testing.T) {
+ fr, buf := testFramer()
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4
+ fr.WriteRSTStream(streamID, ErrCode(errCode))
+ const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &RSTStreamFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x3,
+ Flags: 0x0,
+ Length: 0x4,
+ StreamID: 0x1020304,
+ },
+ ErrCode: 0x7060504,
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestWriteData(t *testing.T) {
+ fr, buf := testFramer()
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ data := []byte("ABC")
+ fr.WriteData(streamID, true, data)
+ const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ t.Fatalf("got %T; want *DataFrame", f)
+ }
+ if !bytes.Equal(df.Data(), data) {
+ t.Errorf("got %q; want %q", df.Data(), data)
+ }
+ if f.Header().Flags&1 == 0 {
+ t.Errorf("didn't see END_STREAM flag")
+ }
+}
+
+func TestWriteDataPadded(t *testing.T) {
+ tests := [...]struct {
+ streamID uint32
+ endStream bool
+ data []byte
+ pad []byte
+ wantHeader FrameHeader
+ }{
+ // Unpadded:
+ 0: {
+ streamID: 1,
+ endStream: true,
+ data: []byte("foo"),
+ pad: nil,
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataEndStream,
+ Length: 3,
+ StreamID: 1,
+ },
+ },
+
+ // Padded bit set, but no padding:
+ 1: {
+ streamID: 1,
+ endStream: true,
+ data: []byte("foo"),
+ pad: []byte{},
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataEndStream | FlagDataPadded,
+ Length: 4,
+ StreamID: 1,
+ },
+ },
+
+ // Padded bit set, with padding:
+ 2: {
+ streamID: 1,
+ endStream: false,
+ data: []byte("foo"),
+ pad: []byte("bar"),
+ wantHeader: FrameHeader{
+ Type: FrameData,
+ Flags: FlagDataPadded,
+ Length: 7,
+ StreamID: 1,
+ },
+ },
+ }
+ for i, tt := range tests {
+ fr, _ := testFramer()
+ fr.WriteDataPadded(tt.streamID, tt.endStream, tt.data, tt.pad)
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("%d. ReadFrame: %v", i, err)
+ continue
+ }
+ got := f.Header()
+ tt.wantHeader.valid = true
+ if got != tt.wantHeader {
+ t.Errorf("%d. read %+v; want %+v", i, got, tt.wantHeader)
+ continue
+ }
+ df := f.(*DataFrame)
+ if !bytes.Equal(df.Data(), tt.data) {
+ t.Errorf("%d. got %q; want %q", i, df.Data(), tt.data)
+ }
+ }
+}
+
+func TestWriteHeaders(t *testing.T) {
+ tests := []struct {
+ name string
+ p HeadersFrameParam
+ wantEnc string
+ wantFrame *HeadersFrame
+ }{
+ {
+ "basic",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\x03\x01\x00\x00\x00\x00*abc",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Length: uint32(len("abc")),
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "basic + end flags",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\x03\x01\x05\x00\x00\x00*abc",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders,
+ Length: uint32(len("abc")),
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with padding",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 5,
+ Priority: PriorityParam{},
+ },
+ "\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,
+ Length: uint32(1 + len("abc") + 5), // pad length + contents + padding
+ },
+ Priority: PriorityParam{},
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with priority",
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 2,
+ Priority: PriorityParam{
+ StreamDep: 15,
+ Exclusive: true,
+ Weight: 127,
+ },
+ },
+ "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
+ Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
+ },
+ Priority: PriorityParam{
+ StreamDep: 15,
+ Exclusive: true,
+ Weight: 127,
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "with priority stream dep zero", // golang.org/issue/15444
+ HeadersFrameParam{
+ StreamID: 42,
+ BlockFragment: []byte("abc"),
+ EndStream: true,
+ EndHeaders: true,
+ PadLength: 2,
+ Priority: PriorityParam{
+ StreamDep: 0,
+ Exclusive: true,
+ Weight: 127,
+ },
+ },
+ "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x00\u007fabc\x00\x00",
+ &HeadersFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: 42,
+ Type: FrameHeaders,
+ Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
+ Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
+ },
+ Priority: PriorityParam{
+ StreamDep: 0,
+ Exclusive: true,
+ Weight: 127,
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, buf := testFramer()
+ if err := fr.WriteHeaders(tt.p); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ if buf.String() != tt.wantEnc {
+ t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWriteInvalidStreamDep(t *testing.T) {
+ fr, _ := testFramer()
+ err := fr.WriteHeaders(HeadersFrameParam{
+ StreamID: 42,
+ Priority: PriorityParam{
+ StreamDep: 1 << 31,
+ },
+ })
+ if err != errDepStreamID {
+ t.Errorf("header error = %v; want %q", err, errDepStreamID)
+ }
+
+ err = fr.WritePriority(2, PriorityParam{StreamDep: 1 << 31})
+ if err != errDepStreamID {
+ t.Errorf("priority error = %v; want %q", err, errDepStreamID)
+ }
+}
+
+func TestWriteContinuation(t *testing.T) {
+ const streamID = 42
+ tests := []struct {
+ name string
+ end bool
+ frag []byte
+
+ wantFrame *ContinuationFrame
+ }{
+ {
+ "not end",
+ false,
+ []byte("abc"),
+ &ContinuationFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FrameContinuation,
+ Length: uint32(len("abc")),
+ },
+ headerFragBuf: []byte("abc"),
+ },
+ },
+ {
+ "end",
+ true,
+ []byte("def"),
+ &ContinuationFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FrameContinuation,
+ Flags: FlagContinuationEndHeaders,
+ Length: uint32(len("def")),
+ },
+ headerFragBuf: []byte("def"),
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, _ := testFramer()
+ if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ fr.AllowIllegalReads = true
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWritePriority(t *testing.T) {
+ const streamID = 42
+ tests := []struct {
+ name string
+ priority PriorityParam
+ wantFrame *PriorityFrame
+ }{
+ {
+ "not exclusive",
+ PriorityParam{
+ StreamDep: 2,
+ Exclusive: false,
+ Weight: 127,
+ },
+ &PriorityFrame{
+ FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FramePriority,
+ Length: 5,
+ },
+ PriorityParam{
+ StreamDep: 2,
+ Exclusive: false,
+ Weight: 127,
+ },
+ },
+ },
+
+ {
+ "exclusive",
+ PriorityParam{
+ StreamDep: 3,
+ Exclusive: true,
+ Weight: 77,
+ },
+ &PriorityFrame{
+ FrameHeader{
+ valid: true,
+ StreamID: streamID,
+ Type: FramePriority,
+ Length: 5,
+ },
+ PriorityParam{
+ StreamDep: 3,
+ Exclusive: true,
+ Weight: 77,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ fr, _ := testFramer()
+ if err := fr.WritePriority(streamID, tt.priority); err != nil {
+ t.Errorf("test %q: %v", tt.name, err)
+ continue
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(f, tt.wantFrame) {
+ t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+ }
+ }
+}
+
+func TestWriteSettings(t *testing.T) {
+ fr, buf := testFramer()
+ settings := []Setting{{1, 2}, {3, 4}}
+ fr.WriteSettings(settings...)
+ const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ t.Fatalf("Got a %T; want a SettingsFrame", f)
+ }
+ var got []Setting
+ sf.ForeachSetting(func(s Setting) error {
+ got = append(got, s)
+ valBack, ok := sf.Value(s.ID)
+ if !ok || valBack != s.Val {
+ t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val)
+ }
+ return nil
+ })
+ if !reflect.DeepEqual(settings, got) {
+ t.Errorf("Read settings %+v != written settings %+v", got, settings)
+ }
+}
+
+func TestWriteSettingsAck(t *testing.T) {
+ fr, buf := testFramer()
+ fr.WriteSettingsAck()
+ const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+}
+
+func TestWriteWindowUpdate(t *testing.T) {
+ fr, buf := testFramer()
+ const streamID = 1<<24 + 2<<16 + 3<<8 + 4
+ const incr = 7<<24 + 6<<16 + 5<<8 + 4
+ if err := fr.WriteWindowUpdate(streamID, incr); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &WindowUpdateFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x8,
+ Flags: 0x0,
+ Length: 0x4,
+ StreamID: 0x1020304,
+ },
+ Increment: 0x7060504,
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestWritePing(t *testing.T) { testWritePing(t, false) }
+func TestWritePingAck(t *testing.T) { testWritePing(t, true) }
+
+func testWritePing(t *testing.T, ack bool) {
+ fr, buf := testFramer()
+ if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
+ t.Fatal(err)
+ }
+ var wantFlags Flags
+ if ack {
+ wantFlags = FlagPingAck
+ }
+ var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &PingFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x6,
+ Flags: wantFlags,
+ Length: 0x8,
+ StreamID: 0,
+ },
+ Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Errorf("parsed back %#v; want %#v", f, want)
+ }
+}
+
+func TestReadFrameHeader(t *testing.T) {
+ tests := []struct {
+ in string
+ want FrameHeader
+ }{
+ {in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}},
+ {in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{
+ Length: 66051, Type: 4, Flags: 5, StreamID: 101124105,
+ }},
+ // Ignore high bit:
+ {in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{
+ Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+ {in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{
+ Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+ }
+ for i, tt := range tests {
+ got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))
+ if err != nil {
+ t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err)
+ continue
+ }
+ tt.want.valid = true
+ if got != tt.want {
+ t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want)
+ }
+ }
+}
+
+func TestReadWriteFrameHeader(t *testing.T) {
+ tests := []struct {
+ len uint32
+ typ FrameType
+ flags Flags
+ streamID uint32
+ }{
+ {len: 0, typ: 255, flags: 1, streamID: 0},
+ {len: 0, typ: 255, flags: 1, streamID: 1},
+ {len: 0, typ: 255, flags: 1, streamID: 255},
+ {len: 0, typ: 255, flags: 1, streamID: 256},
+ {len: 0, typ: 255, flags: 1, streamID: 65535},
+ {len: 0, typ: 255, flags: 1, streamID: 65536},
+
+ {len: 0, typ: 1, flags: 255, streamID: 1},
+ {len: 255, typ: 1, flags: 255, streamID: 1},
+ {len: 256, typ: 1, flags: 255, streamID: 1},
+ {len: 65535, typ: 1, flags: 255, streamID: 1},
+ {len: 65536, typ: 1, flags: 255, streamID: 1},
+ {len: 16777215, typ: 1, flags: 255, streamID: 1},
+ }
+ for _, tt := range tests {
+ fr, buf := testFramer()
+ fr.startWrite(tt.typ, tt.flags, tt.streamID)
+ fr.writeBytes(make([]byte, tt.len))
+ fr.endWrite()
+ fh, err := ReadFrameHeader(buf)
+ if err != nil {
+ t.Errorf("ReadFrameHeader(%+v) = %v", tt, err)
+ continue
+ }
+ if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {
+ t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh)
+ }
+ }
+
+}
+
+func TestWriteTooLargeFrame(t *testing.T) {
+ fr, _ := testFramer()
+ fr.startWrite(0, 1, 1)
+ fr.writeBytes(make([]byte, 1<<24))
+ err := fr.endWrite()
+ if err != ErrFrameTooLarge {
+ t.Errorf("endWrite = %v; want errFrameTooLarge", err)
+ }
+}
+
+func TestWriteGoAway(t *testing.T) {
+ const debug = "foo"
+ fr, buf := testFramer()
+ if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &GoAwayFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x7,
+ Flags: 0,
+ Length: uint32(4 + 4 + len(debug)),
+ StreamID: 0,
+ },
+ LastStreamID: 0x01020304,
+ ErrCode: 0x05060708,
+ debugData: []byte(debug),
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+ }
+ if got := string(f.(*GoAwayFrame).DebugData()); got != debug {
+ t.Errorf("debug data = %q; want %q", got, debug)
+ }
+}
+
+func TestWritePushPromise(t *testing.T) {
+ pp := PushPromiseParam{
+ StreamID: 42,
+ PromiseID: 42,
+ BlockFragment: []byte("abc"),
+ }
+ fr, buf := testFramer()
+ if err := fr.WritePushPromise(pp); err != nil {
+ t.Fatal(err)
+ }
+ const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc"
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, ok := f.(*PushPromiseFrame)
+ if !ok {
+ t.Fatalf("got %T; want *PushPromiseFrame", f)
+ }
+ want := &PushPromiseFrame{
+ FrameHeader: FrameHeader{
+ valid: true,
+ Type: 0x5,
+ Flags: 0x0,
+ Length: 0x7,
+ StreamID: 42,
+ },
+ PromiseID: 42,
+ headerFragBuf: []byte("abc"),
+ }
+ if !reflect.DeepEqual(f, want) {
+ t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+ }
+}
+
+// test checkFrameOrder and that HEADERS and CONTINUATION frames can't be intermingled.
+func TestReadFrameOrder(t *testing.T) {
+ head := func(f *Framer, id uint32, end bool) {
+ f.WriteHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: []byte("foo"), // unused, but non-empty
+ EndHeaders: end,
+ })
+ }
+ cont := func(f *Framer, id uint32, end bool) {
+ f.WriteContinuation(id, end, []byte("foo"))
+ }
+
+ tests := [...]struct {
+ name string
+ w func(*Framer)
+ atLeast int
+ wantErr string
+ }{
+ 0: {
+ w: func(f *Framer) {
+ head(f, 1, true)
+ },
+ },
+ 1: {
+ w: func(f *Framer) {
+ head(f, 1, true)
+ head(f, 2, true)
+ },
+ },
+ 2: {
+ wantErr: "got HEADERS for stream 2; expected CONTINUATION following HEADERS for stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ head(f, 2, true)
+ },
+ },
+ 3: {
+ wantErr: "got DATA for stream 1; expected CONTINUATION following HEADERS for stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ },
+ },
+ 4: {
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 1, true)
+ head(f, 2, true)
+ },
+ },
+ 5: {
+ wantErr: "got CONTINUATION for stream 2; expected stream 1",
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 2, true)
+ head(f, 2, true)
+ },
+ },
+ 6: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ w: func(f *Framer) {
+ cont(f, 1, true)
+ },
+ },
+ 7: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ w: func(f *Framer) {
+ cont(f, 1, false)
+ },
+ },
+ 8: {
+ wantErr: "HEADERS frame with stream ID 0",
+ w: func(f *Framer) {
+ head(f, 0, true)
+ },
+ },
+ 9: {
+ wantErr: "CONTINUATION frame with stream ID 0",
+ w: func(f *Framer) {
+ cont(f, 0, true)
+ },
+ },
+ 10: {
+ wantErr: "unexpected CONTINUATION for stream 1",
+ atLeast: 5,
+ w: func(f *Framer) {
+ head(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, false)
+ cont(f, 1, true)
+ cont(f, 1, false)
+ },
+ },
+ }
+ for i, tt := range tests {
+ buf := new(bytes.Buffer)
+ f := NewFramer(buf, buf)
+ f.AllowIllegalWrites = true
+ tt.w(f)
+ f.WriteData(1, true, nil) // to test transition away from last step
+
+ var err error
+ n := 0
+ var log bytes.Buffer
+ for {
+ var got Frame
+ got, err = f.ReadFrame()
+ fmt.Fprintf(&log, " read %v, %v\n", got, err)
+ if err != nil {
+ break
+ }
+ n++
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ ok := tt.wantErr == ""
+ if ok && err != nil {
+ t.Errorf("%d. after %d good frames, ReadFrame = %v; want success\n%s", i, n, err, log.Bytes())
+ continue
+ }
+ if !ok && err != ConnectionError(ErrCodeProtocol) {
+ t.Errorf("%d. after %d good frames, ReadFrame = %v; want ConnectionError(ErrCodeProtocol)\n%s", i, n, err, log.Bytes())
+ continue
+ }
+ if !((f.errDetail == nil && tt.wantErr == "") || (fmt.Sprint(f.errDetail) == tt.wantErr)) {
+ t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errDetail, tt.wantErr, log.Bytes())
+ }
+ if n < tt.atLeast {
+ t.Errorf("%d. framer only read %d frames; want at least %d\n%s", i, n, tt.atLeast, log.Bytes())
+ }
+ }
+}
+
+func TestMetaFrameHeader(t *testing.T) {
+ write := func(f *Framer, frags ...[]byte) {
+ for i, frag := range frags {
+ end := (i == len(frags)-1)
+ if i == 0 {
+ f.WriteHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: frag,
+ EndHeaders: end,
+ })
+ } else {
+ f.WriteContinuation(1, end, frag)
+ }
+ }
+ }
+
+ want := func(flags Flags, length uint32, pairs ...string) *MetaHeadersFrame {
+ mh := &MetaHeadersFrame{
+ HeadersFrame: &HeadersFrame{
+ FrameHeader: FrameHeader{
+ Type: FrameHeaders,
+ Flags: flags,
+ Length: length,
+ StreamID: 1,
+ },
+ },
+ Fields: []hpack.HeaderField(nil),
+ }
+ for len(pairs) > 0 {
+ mh.Fields = append(mh.Fields, hpack.HeaderField{
+ Name: pairs[0],
+ Value: pairs[1],
+ })
+ pairs = pairs[2:]
+ }
+ return mh
+ }
+ truncated := func(mh *MetaHeadersFrame) *MetaHeadersFrame {
+ mh.Truncated = true
+ return mh
+ }
+
+ const noFlags Flags = 0
+
+ oneKBString := strings.Repeat("a", 1<<10)
+
+ tests := [...]struct {
+ name string
+ w func(*Framer)
+ want interface{} // *MetaHeaderFrame or error
+ wantErrReason string
+ maxHeaderListSize uint32
+ }{
+ 0: {
+ name: "single_headers",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/")
+ write(f, all)
+ },
+ want: want(FlagHeadersEndHeaders, 2, ":method", "GET", ":path", "/"),
+ },
+ 1: {
+ name: "with_continuation",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar")
+ write(f, all[:1], all[1:])
+ },
+ want: want(noFlags, 1, ":method", "GET", ":path", "/", "foo", "bar"),
+ },
+ 2: {
+ name: "with_two_continuation",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar")
+ write(f, all[:2], all[2:4], all[4:])
+ },
+ want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", "bar"),
+ },
+ 3: {
+ name: "big_string_okay",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString)
+ write(f, all[:2], all[2:])
+ },
+ want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", oneKBString),
+ },
+ 4: {
+ name: "big_string_error",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString)
+ write(f, all[:2], all[2:])
+ },
+ maxHeaderListSize: (1 << 10) / 2,
+ want: ConnectionError(ErrCodeCompression),
+ },
+ 5: {
+ name: "max_header_list_truncated",
+ w: func(f *Framer) {
+ var he hpackEncoder
+ var pairs = []string{":method", "GET", ":path", "/"}
+ for i := 0; i < 100; i++ {
+ pairs = append(pairs, "foo", "bar")
+ }
+ all := he.encodeHeaderRaw(t, pairs...)
+ write(f, all[:2], all[2:])
+ },
+ maxHeaderListSize: (1 << 10) / 2,
+ want: truncated(want(noFlags, 2,
+ ":method", "GET",
+ ":path", "/",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar",
+ "foo", "bar", // 11
+ )),
+ },
+ 6: {
+ name: "pseudo_order",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ "foo", "bar",
+ ":path", "/", // bogus
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "pseudo header field after regular",
+ },
+ 7: {
+ name: "pseudo_unknown",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":unknown", "foo", // bogus
+ "foo", "bar",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid pseudo-header \":unknown\"",
+ },
+ 8: {
+ name: "pseudo_mix_request_response",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ ":status", "100",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "mix of request and response pseudo headers",
+ },
+ 9: {
+ name: "pseudo_dup",
+ w: func(f *Framer) {
+ write(f, encodeHeaderRaw(t,
+ ":method", "GET",
+ ":method", "POST",
+ ))
+ },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "duplicate pseudo-header \":method\"",
+ },
+ 10: {
+ name: "trailer_okay_no_pseudo",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "foo", "bar")) },
+ want: want(FlagHeadersEndHeaders, 8, "foo", "bar"),
+ },
+ 11: {
+ name: "invalid_field_name",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "CapitalBad", "x")) },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid header field name \"CapitalBad\"",
+ },
+ 12: {
+ name: "invalid_field_value",
+ w: func(f *Framer) { write(f, encodeHeaderRaw(t, "key", "bad_null\x00")) },
+ want: streamError(1, ErrCodeProtocol),
+ wantErrReason: "invalid header field value \"bad_null\\x00\"",
+ },
+ }
+ for i, tt := range tests {
+ buf := new(bytes.Buffer)
+ f := NewFramer(buf, buf)
+ f.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ f.MaxHeaderListSize = tt.maxHeaderListSize
+ tt.w(f)
+
+ name := tt.name
+ if name == "" {
+ name = fmt.Sprintf("test index %d", i)
+ }
+
+ var got interface{}
+ var err error
+ got, err = f.ReadFrame()
+ if err != nil {
+ got = err
+
+ // Ignore the StreamError.Cause field, if it matches the wantErrReason.
+ // The test table above predates the Cause field.
+ if se, ok := err.(StreamError); ok && se.Cause != nil && se.Cause.Error() == tt.wantErrReason {
+ se.Cause = nil
+ got = se
+ }
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ if mhg, ok := got.(*MetaHeadersFrame); ok {
+ if mhw, ok := tt.want.(*MetaHeadersFrame); ok {
+ hg := mhg.HeadersFrame
+ hw := mhw.HeadersFrame
+ if hg != nil && hw != nil && !reflect.DeepEqual(*hg, *hw) {
+ t.Errorf("%s: headers differ:\n got: %+v\nwant: %+v\n", name, *hg, *hw)
+ }
+ }
+ }
+ str := func(v interface{}) string {
+ if _, ok := v.(error); ok {
+ return fmt.Sprintf("error %v", v)
+ } else {
+ return fmt.Sprintf("value %#v", v)
+ }
+ }
+ t.Errorf("%s:\n got: %v\nwant: %s", name, str(got), str(tt.want))
+ }
+ if tt.wantErrReason != "" && tt.wantErrReason != fmt.Sprint(f.errDetail) {
+ t.Errorf("%s: got error reason %q; want %q", name, f.errDetail, tt.wantErrReason)
+ }
+ }
+}
+
+func encodeHeaderRaw(t *testing.T, pairs ...string) []byte {
+ var he hpackEncoder
+ return he.encodeHeaderRaw(t, pairs...)
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 000000000..2b72855f5
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,43 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return t1.ExpectContinueTimeout
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 000000000..730319dd5
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+type contextContext interface {
+ context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ cc.mu.Lock()
+ ci.Reused = cc.nextStreamID > 1
+ ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *clientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *clientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+ trace := httptrace.ContextClientTrace(req.Context())
+ return (*clientTrace)(trace)
+}
diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go
new file mode 100644
index 000000000..b4c52ecec
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17_not18.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,!go1.8
+
+package http2
+
+import "crypto/tls"
+
+// temporary copy of Go 1.7's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
new file mode 100644
index 000000000..c2ae16731
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package http2
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 000000000..9933c9f8c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack_test.go b/vendor/golang.org/x/net/http2/gotrack_test.go
new file mode 100644
index 000000000..06db61231
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack_test.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestGoroutineLock(t *testing.T) {
+ oldDebug := DebugGoroutines
+ DebugGoroutines = true
+ defer func() { DebugGoroutines = oldDebug }()
+
+ g := newGoroutineLock()
+ g.check()
+
+ sawPanic := make(chan interface{})
+ go func() {
+ defer func() { sawPanic <- recover() }()
+ g.check() // should panic
+ }()
+ e := <-sawPanic
+ if e == nil {
+ t.Fatal("did not see panic from check in other goroutine")
+ }
+ if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
+ t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/.gitignore b/vendor/golang.org/x/net/http2/h2demo/.gitignore
new file mode 100644
index 000000000..0de86ddbc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/.gitignore
@@ -0,0 +1,5 @@
+h2demo
+h2demo.linux
+client-id.dat
+client-secret.dat
+token.dat
diff --git a/vendor/golang.org/x/net/http2/h2demo/Makefile b/vendor/golang.org/x/net/http2/h2demo/Makefile
new file mode 100644
index 000000000..f5c31ef3e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/Makefile
@@ -0,0 +1,8 @@
+h2demo.linux: h2demo.go
+ GOOS=linux go build --tags=h2demo -o h2demo.linux .
+
+FORCE:
+
+upload: FORCE
+ go install golang.org/x/build/cmd/upload
+ upload --verbose --osarch=linux-amd64 --tags=h2demo --file=go:golang.org/x/net/http2/h2demo --public http2-demo-server-tls/h2demo
diff --git a/vendor/golang.org/x/net/http2/h2demo/README b/vendor/golang.org/x/net/http2/h2demo/README
new file mode 100644
index 000000000..212a96f38
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/README
@@ -0,0 +1,16 @@
+
+Client:
+ -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
+ -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
+
+Make CA:
+$ openssl genrsa -out rootCA.key 2048
+$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
+... install that to Firefox
+
+Make cert:
+$ openssl genrsa -out server.key 2048
+$ openssl req -new -key server.key -out server.csr
+$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500
+
+
diff --git a/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/vendor/golang.org/x/net/http2/h2demo/h2demo.go
new file mode 100644
index 000000000..a248d479e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/h2demo.go
@@ -0,0 +1,504 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build h2demo
+
+package main
+
+import (
+ "bytes"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "hash/crc32"
+ "image"
+ "image/jpeg"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "path"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "camlistore.org/pkg/googlestorage"
+ "go4.org/syncutil/singleflight"
+ "golang.org/x/net/http2"
+)
+
+var (
+ prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.")
+
+ httpsAddr = flag.String("https_addr", "localhost:4430", "TLS address to listen on ('host:port' or ':port'). Required.")
+ httpAddr = flag.String("http_addr", "", "Plain HTTP address to listen on ('host:port', or ':port'). Empty means no HTTP.")
+
+ hostHTTP = flag.String("http_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -http_addr.")
+ hostHTTPS = flag.String("https_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -https_addr.")
+)
+
+func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+<p>Unfortunately, you're <b>not</b> using HTTP/2 right now. To do so:</p>
+<ul>
+ <li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
+ <li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
+</ul>
+<p>See code & instructions for connecting at <a href="https://github.com/golang/net/tree/master/http2">https://github.com/golang/net/tree/master/http2</a>.</p>
+
+</body></html>`)
+}
+
+func home(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+ io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a
+href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+
+<p>Congratulations, <b>you're using HTTP/2 right now</b>.</p>
+
+<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
+
+<p>
+The code is at <a href="https://golang.org/x/net/http2">golang.org/x/net/http2</a> and
+is used transparently by the Go standard library from Go 1.6 and later.
+</p>
+
+<p>Contact info: <i>bradfitz@golang.org</i>, or <a
+href="https://golang.org/s/http2bug">file a bug</a>.</p>
+
+<h2>Handlers for testing</h2>
+<ul>
+ <li>GET <a href="/reqinfo">/reqinfo</a> to dump the request + headers received</li>
+ <li>GET <a href="/clockstream">/clockstream</a> streams the current time every second</li>
+ <li>GET <a href="/gophertiles">/gophertiles</a> to see a page with a bunch of images</li>
+ <li>GET <a href="/file/gopher.png">/file/gopher.png</a> for a small file (does If-Modified-Since, Content-Range, etc)</li>
+ <li>GET <a href="/file/go.src.tar.gz">/file/go.src.tar.gz</a> for a larger file (~10 MB)</li>
+ <li>GET <a href="/redirect">/redirect</a> to redirect back to / (this page)</li>
+ <li>GET <a href="/goroutines">/goroutines</a> to see all active goroutines in this server</li>
+ <li>PUT something to <a href="/crc32">/crc32</a> to get a count of number of bytes and its CRC-32</li>
+ <li>PUT something to <a href="/ECHO">/ECHO</a> and it will be streamed back to you capitalized</li>
+</ul>
+
+</body></html>`)
+}
+
+func reqInfoHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ fmt.Fprintf(w, "Method: %s\n", r.Method)
+ fmt.Fprintf(w, "Protocol: %s\n", r.Proto)
+ fmt.Fprintf(w, "Host: %s\n", r.Host)
+ fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr)
+ fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI)
+ fmt.Fprintf(w, "URL: %#v\n", r.URL)
+ fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength)
+ fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close)
+ fmt.Fprintf(w, "TLS: %#v\n", r.TLS)
+ fmt.Fprintf(w, "\nHeaders:\n")
+ r.Header.Write(w)
+}
+
+func crcHandler(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "PUT" {
+ http.Error(w, "PUT required.", 400)
+ return
+ }
+ crc := crc32.NewIEEE()
+ n, err := io.Copy(crc, r.Body)
+ if err == nil {
+ w.Header().Set("Content-Type", "text/plain")
+ fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil))
+ }
+}
+
+type capitalizeReader struct {
+ r io.Reader
+}
+
+func (cr capitalizeReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ for i, b := range p[:n] {
+ if b >= 'a' && b <= 'z' {
+ p[i] = b - ('a' - 'A')
+ }
+ }
+ return
+}
+
+type flushWriter struct {
+ w io.Writer
+}
+
+func (fw flushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if f, ok := fw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+ return
+}
+
+func echoCapitalHandler(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "PUT" {
+ http.Error(w, "PUT required.", 400)
+ return
+ }
+ io.Copy(flushWriter{w}, capitalizeReader{r.Body})
+}
+
+var (
+ fsGrp singleflight.Group
+ fsMu sync.Mutex // guards fsCache
+ fsCache = map[string]http.Handler{}
+)
+
+// fileServer returns a file-serving handler that proxies URL.
+// It lazily fetches URL on the first access and caches its contents forever.
+func fileServer(url string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ hi, err := fsGrp.Do(url, func() (interface{}, error) {
+ fsMu.Lock()
+ if h, ok := fsCache[url]; ok {
+ fsMu.Unlock()
+ return h, nil
+ }
+ fsMu.Unlock()
+
+ res, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ modTime := time.Now()
+ var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp))
+ })
+ fsMu.Lock()
+ fsCache[url] = h
+ fsMu.Unlock()
+ return h, nil
+ })
+ if err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+ hi.(http.Handler).ServeHTTP(w, r)
+ })
+}
+
+func clockStreamHandler(w http.ResponseWriter, r *http.Request) {
+ clientGone := w.(http.CloseNotifier).CloseNotify()
+ w.Header().Set("Content-Type", "text/plain")
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+ fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n")
+ io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13))
+
+ for {
+ fmt.Fprintf(w, "%v\n", time.Now())
+ w.(http.Flusher).Flush()
+ select {
+ case <-ticker.C:
+ case <-clientGone:
+ log.Printf("Client %v disconnected from the clock", r.RemoteAddr)
+ return
+ }
+ }
+}
+
+func registerHandlers() {
+ tiles := newGopherTilesHandler()
+
+ mux2 := http.NewServeMux()
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ if r.TLS == nil {
+ if r.URL.Path == "/gophertiles" {
+ tiles.ServeHTTP(w, r)
+ return
+ }
+ http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound)
+ return
+ }
+ if r.ProtoMajor == 1 {
+ if r.URL.Path == "/reqinfo" {
+ reqInfoHandler(w, r)
+ return
+ }
+ homeOldHTTP(w, r)
+ return
+ }
+ mux2.ServeHTTP(w, r)
+ })
+ mux2.HandleFunc("/", home)
+ mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png"))
+ mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz"))
+ mux2.HandleFunc("/reqinfo", reqInfoHandler)
+ mux2.HandleFunc("/crc32", crcHandler)
+ mux2.HandleFunc("/ECHO", echoCapitalHandler)
+ mux2.HandleFunc("/clockstream", clockStreamHandler)
+ mux2.Handle("/gophertiles", tiles)
+ mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, "/", http.StatusFound)
+ })
+ stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`)
+ mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ buf := make([]byte, 2<<20)
+ w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil))
+ })
+}
+
+func newGopherTilesHandler() http.Handler {
+ const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg"
+ res, err := http.Get(gopherURL)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ log.Fatalf("Error fetching %s: %v", gopherURL, res.Status)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ im, err := jpeg.Decode(bytes.NewReader(slurp))
+ if err != nil {
+ if len(slurp) > 1024 {
+ slurp = slurp[:1024]
+ }
+ log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp)
+ }
+
+ type subImager interface {
+ SubImage(image.Rectangle) image.Image
+ }
+ const tileSize = 32
+ xt := im.Bounds().Max.X / tileSize
+ yt := im.Bounds().Max.Y / tileSize
+ var tile [][][]byte // y -> x -> jpeg bytes
+ for yi := 0; yi < yt; yi++ {
+ var row [][]byte
+ for xi := 0; xi < xt; xi++ {
+ si := im.(subImager).SubImage(image.Rectangle{
+ Min: image.Point{xi * tileSize, yi * tileSize},
+ Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize},
+ })
+ buf := new(bytes.Buffer)
+ if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil {
+ log.Fatal(err)
+ }
+ row = append(row, buf.Bytes())
+ }
+ tile = append(tile, row)
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ms, _ := strconv.Atoi(r.FormValue("latency"))
+ const nanosPerMilli = 1e6
+ if r.FormValue("x") != "" {
+ x, _ := strconv.Atoi(r.FormValue("x"))
+ y, _ := strconv.Atoi(r.FormValue("y"))
+ if ms <= 1000 {
+ time.Sleep(time.Duration(ms) * nanosPerMilli)
+ }
+ if x >= 0 && x < xt && y >= 0 && y < yt {
+ http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x]))
+ return
+ }
+ }
+ io.WriteString(w, "<html><body onload='showtimes()'>")
+ fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
+ for _, ms := range []int{0, 30, 200, 1000} {
+ d := time.Duration(ms) * nanosPerMilli
+ fmt.Fprintf(w, "[<a href='https://%s/gophertiles?latency=%d'>HTTP/2, %v latency</a>] [<a href='http://%s/gophertiles?latency=%d'>HTTP/1, %v latency</a>]<br>\n",
+ httpsHost(), ms, d,
+ httpHost(), ms, d,
+ )
+ }
+ io.WriteString(w, "<p>\n")
+ cacheBust := time.Now().UnixNano()
+ for y := 0; y < yt; y++ {
+ for x := 0; x < xt; x++ {
+ fmt.Fprintf(w, "<img width=%d height=%d src='/gophertiles?x=%d&y=%d&cachebust=%d&latency=%d'>",
+ tileSize, tileSize, x, y, cacheBust, ms)
+ }
+ io.WriteString(w, "<br/>\n")
+ }
+ io.WriteString(w, `<p><div id='loadtimes'></div></p>
+<script>
+function showtimes() {
+ var times = 'Times from connection start:<br>'
+ times += 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.connectStart) + 'ms<br>'
+ times += 'DOM complete (images loaded): ' + (window.performance.timing.domComplete - window.performance.timing.connectStart) + 'ms<br>'
+ document.getElementById('loadtimes').innerHTML = times
+}
+</script>
+<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>`)
+ })
+}
+
+func httpsHost() string {
+ if *hostHTTPS != "" {
+ return *hostHTTPS
+ }
+ if v := *httpsAddr; strings.HasPrefix(v, ":") {
+ return "localhost" + v
+ } else {
+ return v
+ }
+}
+
+func httpHost() string {
+ if *hostHTTP != "" {
+ return *hostHTTP
+ }
+ if v := *httpAddr; strings.HasPrefix(v, ":") {
+ return "localhost" + v
+ } else {
+ return v
+ }
+}
+
+func serveProdTLS() error {
+ c, err := googlestorage.NewServiceClient()
+ if err != nil {
+ return err
+ }
+ slurp := func(key string) ([]byte, error) {
+ const bucket = "http2-demo-server-tls"
+ rc, _, err := c.GetObject(&googlestorage.Object{
+ Bucket: bucket,
+ Key: key,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err)
+ }
+ defer rc.Close()
+ return ioutil.ReadAll(rc)
+ }
+ certPem, err := slurp("http2.golang.org.chained.pem")
+ if err != nil {
+ return err
+ }
+ keyPem, err := slurp("http2.golang.org.key")
+ if err != nil {
+ return err
+ }
+ cert, err := tls.X509KeyPair(certPem, keyPem)
+ if err != nil {
+ return err
+ }
+ srv := &http.Server{
+ TLSConfig: &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ },
+ }
+ http2.ConfigureServer(srv, &http2.Server{})
+ ln, err := net.Listen("tcp", ":443")
+ if err != nil {
+ return err
+ }
+ return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
+}
+
+type tcpKeepAliveListener struct {
+ *net.TCPListener
+}
+
+func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+ tc, err := ln.AcceptTCP()
+ if err != nil {
+ return
+ }
+ tc.SetKeepAlive(true)
+ tc.SetKeepAlivePeriod(3 * time.Minute)
+ return tc, nil
+}
+
+func serveProd() error {
+ errc := make(chan error, 2)
+ go func() { errc <- http.ListenAndServe(":80", nil) }()
+ go func() { errc <- serveProdTLS() }()
+ return <-errc
+}
+
+const idleTimeout = 5 * time.Minute
+const activeTimeout = 10 * time.Minute
+
+// TODO: put this into the standard library and actually send
+// PING frames and GOAWAY, etc: golang.org/issue/14204
+func idleTimeoutHook() func(net.Conn, http.ConnState) {
+ var mu sync.Mutex
+ m := map[net.Conn]*time.Timer{}
+ return func(c net.Conn, cs http.ConnState) {
+ mu.Lock()
+ defer mu.Unlock()
+ if t, ok := m[c]; ok {
+ delete(m, c)
+ t.Stop()
+ }
+ var d time.Duration
+ switch cs {
+ case http.StateNew, http.StateIdle:
+ d = idleTimeout
+ case http.StateActive:
+ d = activeTimeout
+ default:
+ return
+ }
+ m[c] = time.AfterFunc(d, func() {
+ log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d)
+ go c.Close()
+ })
+ }
+}
+
+func main() {
+ var srv http.Server
+ flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
+ flag.Parse()
+ srv.Addr = *httpsAddr
+ srv.ConnState = idleTimeoutHook()
+
+ registerHandlers()
+
+ if *prod {
+ *hostHTTP = "http2.golang.org"
+ *hostHTTPS = "http2.golang.org"
+ log.Fatal(serveProd())
+ }
+
+ url := "https://" + httpsHost() + "/"
+ log.Printf("Listening on " + url)
+ http2.ConfigureServer(&srv, &http2.Server{})
+
+ if *httpAddr != "" {
+ go func() {
+ log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)")
+ log.Fatal(http.ListenAndServe(*httpAddr, nil))
+ }()
+ }
+
+ go func() {
+ log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
+ }()
+ select {}
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go
new file mode 100644
index 000000000..df0866a30
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/launch.go
@@ -0,0 +1,302 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ compute "google.golang.org/api/compute/v1"
+)
+
+var (
+ proj = flag.String("project", "symbolic-datum-552", "name of Project")
+ zone = flag.String("zone", "us-central1-a", "GCE zone")
+ mach = flag.String("machinetype", "n1-standard-1", "Machine type")
+ instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
+ sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
+ staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
+
+ writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
+ publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
+)
+
+func readFile(v string) string {
+ slurp, err := ioutil.ReadFile(v)
+ if err != nil {
+ log.Fatalf("Error reading %s: %v", v, err)
+ }
+ return strings.TrimSpace(string(slurp))
+}
+
+var config = &oauth2.Config{
+ // The client-id and secret should be for an "Installed Application" when using
+ // the CLI. Later we'll use a web application with a callback.
+ ClientID: readFile("client-id.dat"),
+ ClientSecret: readFile("client-secret.dat"),
+ Endpoint: google.Endpoint,
+ Scopes: []string{
+ compute.DevstorageFullControlScope,
+ compute.ComputeScope,
+ "https://www.googleapis.com/auth/sqlservice",
+ "https://www.googleapis.com/auth/sqlservice.admin",
+ },
+ RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+}
+
+const baseConfig = `#cloud-config
+coreos:
+ units:
+ - name: h2demo.service
+ command: start
+ content: |
+ [Unit]
+ Description=HTTP2 Demo
+
+ [Service]
+ ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
+ ExecStart=/opt/bin/h2demo --prod
+ RestartSec=5s
+ Restart=always
+ Type=simple
+
+ [Install]
+ WantedBy=multi-user.target
+`
+
+func main() {
+ flag.Parse()
+ if *proj == "" {
+ log.Fatalf("Missing --project flag")
+ }
+ prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
+ machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
+
+ const tokenFileName = "token.dat"
+ tokenFile := tokenCacheFile(tokenFileName)
+ tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
+ token, err := tokenSource.Token()
+ if err != nil {
+ if *writeObject != "" {
+ log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
+ }
+ log.Printf("Error getting token from %s: %v", tokenFileName, err)
+ log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
+ fmt.Print("\nEnter auth code: ")
+ sc := bufio.NewScanner(os.Stdin)
+ sc.Scan()
+ authCode := strings.TrimSpace(sc.Text())
+ token, err = config.Exchange(oauth2.NoContext, authCode)
+ if err != nil {
+ log.Fatalf("Error exchanging auth code for a token: %v", err)
+ }
+ if err := tokenFile.WriteToken(token); err != nil {
+ log.Fatalf("Error writing to %s: %v", tokenFileName, err)
+ }
+ tokenSource = oauth2.ReuseTokenSource(token, nil)
+ }
+
+ oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
+
+ if *writeObject != "" {
+ writeCloudStorageObject(oauthClient)
+ return
+ }
+
+ computeService, _ := compute.New(oauthClient)
+
+ natIP := *staticIP
+ if natIP == "" {
+ // Try to find it by name.
+ aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
+ if err != nil {
+ log.Fatal(err)
+ }
+ // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
+ IPLoop:
+ for _, asl := range aggAddrList.Items {
+ for _, addr := range asl.Addresses {
+ if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
+ natIP = addr.Address
+ break IPLoop
+ }
+ }
+ }
+ }
+
+ cloudConfig := baseConfig
+ if *sshPub != "" {
+ key := strings.TrimSpace(readFile(*sshPub))
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
+ }
+ if os.Getenv("USER") == "bradfitz" {
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
+ }
+ const maxCloudConfig = 32 << 10 // per compute API docs
+ if len(cloudConfig) > maxCloudConfig {
+ log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
+ }
+
+ instance := &compute.Instance{
+ Name: *instName,
+ Description: "Go Builder",
+ MachineType: machType,
+ Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
+ Tags: &compute.Tags{
+ Items: []string{"http-server", "https-server"},
+ },
+ Metadata: &compute.Metadata{
+ Items: []*compute.MetadataItems{
+ {
+ Key: "user-data",
+ Value: &cloudConfig,
+ },
+ },
+ },
+ NetworkInterfaces: []*compute.NetworkInterface{
+ {
+ AccessConfigs: []*compute.AccessConfig{
+ {
+ Type: "ONE_TO_ONE_NAT",
+ Name: "External NAT",
+ NatIP: natIP,
+ },
+ },
+ Network: prefix + "/global/networks/default",
+ },
+ },
+ ServiceAccounts: []*compute.ServiceAccount{
+ {
+ Email: "default",
+ Scopes: []string{
+ compute.DevstorageFullControlScope,
+ compute.ComputeScope,
+ },
+ },
+ },
+ }
+
+ log.Printf("Creating instance...")
+ op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
+ if err != nil {
+ log.Fatalf("Failed to create instance: %v", err)
+ }
+ opName := op.Name
+ log.Printf("Created. Waiting on operation %v", opName)
+OpLoop:
+ for {
+ time.Sleep(2 * time.Second)
+ op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
+ if err != nil {
+ log.Fatalf("Failed to get op %s: %v", opName, err)
+ }
+ switch op.Status {
+ case "PENDING", "RUNNING":
+ log.Printf("Waiting on operation %v", opName)
+ continue
+ case "DONE":
+ if op.Error != nil {
+ for _, operr := range op.Error.Errors {
+ log.Printf("Error: %+v", operr)
+ }
+ log.Fatalf("Failed to start.")
+ }
+ log.Printf("Success. %+v", op)
+ break OpLoop
+ default:
+ log.Fatalf("Unknown status %q: %+v", op.Status, op)
+ }
+ }
+
+ inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
+ if err != nil {
+ log.Fatalf("Error getting instance after creation: %v", err)
+ }
+ ij, _ := json.MarshalIndent(inst, "", " ")
+ log.Printf("Instance: %s", ij)
+}
+
+func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
+ const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
+ diskName := *instName + "-disk"
+
+ return &compute.AttachedDisk{
+ AutoDelete: true,
+ Boot: true,
+ Type: "PERSISTENT",
+ InitializeParams: &compute.AttachedDiskInitializeParams{
+ DiskName: diskName,
+ SourceImage: imageURL,
+ DiskSizeGb: 50,
+ },
+ }
+}
+
+func writeCloudStorageObject(httpClient *http.Client) {
+ content := os.Stdin
+ const maxSlurp = 1 << 20
+ var buf bytes.Buffer
+ n, err := io.CopyN(&buf, content, maxSlurp)
+ if err != nil && err != io.EOF {
+ log.Fatalf("Error reading from stdin: %v, %v", n, err)
+ }
+ contentType := http.DetectContentType(buf.Bytes())
+
+ req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
+ if err != nil {
+ log.Fatal(err)
+ }
+ req.Header.Set("x-goog-api-version", "2")
+ if *publicObject {
+ req.Header.Set("x-goog-acl", "public-read")
+ }
+ req.Header.Set("Content-Type", contentType)
+ res, err := httpClient.Do(req)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ res.Write(os.Stderr)
+ log.Fatalf("Failed.")
+ }
+ log.Printf("Success.")
+ os.Exit(0)
+}
+
+type tokenCacheFile string
+
+func (f tokenCacheFile) Token() (*oauth2.Token, error) {
+ slurp, err := ioutil.ReadFile(string(f))
+ if err != nil {
+ return nil, err
+ }
+ t := new(oauth2.Token)
+ if err := json.Unmarshal(slurp, t); err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
+ jt, err := json.Marshal(t)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(string(f), jt, 0600)
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
new file mode 100644
index 000000000..a15a6abaf
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
+62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
+XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
+mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
+JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
+SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
+nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
+/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
+qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
+hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
+NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
+LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
+8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
+0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
+K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
+bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
+QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
+Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
+nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
+b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
+gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
+WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
+C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
+x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
+hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
new file mode 100644
index 000000000..3a323e774
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
@@ -0,0 +1,26 @@
+-----BEGIN CERTIFICATE-----
+MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
+A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
+DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
+NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
+cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
+c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
+R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
+ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
+JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
+mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
+caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
+A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
+hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
+MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
+MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
+bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
+U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
+eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
+UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
+58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
+sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
+kPe6XoSbiLm/kxk32T0=
+-----END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
new file mode 100644
index 000000000..6db389188
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
@@ -0,0 +1 @@
+E2CE26BF3285059C
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt
new file mode 100644
index 000000000..c59059bd6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
+C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
+DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
+RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
+ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
+gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
+dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
+A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
+/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
+F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
+AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
+rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
+EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
+KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
+dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
+90p6/CbU71bGbfpM2PHot2fm
+-----END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key
new file mode 100644
index 000000000..f329c1421
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2demo/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
+fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
+J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
+b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
+mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
+fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
+3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
+qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
+NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
+LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
+a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
+Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
+W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
+gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
+S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
+Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
+V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
+KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
+yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
+drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
+ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
+48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
+c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
+nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
+IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
+-----END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md
new file mode 100644
index 000000000..fb5c5efb0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2i/README.md
@@ -0,0 +1,97 @@
+# h2i
+
+**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
+days of telnetting to your HTTP/1.n servers? We're bringing you
+back.
+
+Features:
+- send raw HTTP/2 frames
+ - PING
+ - SETTINGS
+ - HEADERS
+ - etc
+- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
+- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
+- tab completion of commands, options
+
+Not yet features, but soon:
+- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
+- request bodies (DATA frames)
+- send invalid frames for testing server implementations (supported by underlying Framer)
+
+Later:
+- act like a server
+
+## Installation
+
+```
+$ go get golang.org/x/net/http2/h2i
+$ h2i <host>
+```
+
+## Demo
+
+```
+$ h2i
+Usage: h2i <hostname>
+
+ -insecure
+ Whether to skip TLS cert validation
+ -nextproto string
+ Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
+
+$ h2i google.com
+Connecting to google.com:443 ...
+Connected to 74.125.224.41:443
+Negotiated protocol "h2-14"
+[FrameHeader SETTINGS len=18]
+ [MAX_CONCURRENT_STREAMS = 100]
+ [INITIAL_WINDOW_SIZE = 1048576]
+ [MAX_FRAME_SIZE = 16384]
+[FrameHeader WINDOW_UPDATE len=4]
+ Window-Increment = 983041
+
+h2i> PING h2iSayHI
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2iSayHI"
+h2i> headers
+(as HTTP/1.1)> GET / HTTP/1.1
+(as HTTP/1.1)> Host: ip.appspot.com
+(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
+(as HTTP/1.1)>
+Opening Stream-ID 1:
+ :authority = ip.appspot.com
+ :method = GET
+ :path = /
+ :scheme = https
+ user-agent = h2i/brad-n-blake
+[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
+ :status = "200"
+ alternate-protocol = "443:quic,p=1"
+ content-length = "15"
+ content-type = "text/html"
+ date = "Fri, 01 May 2015 23:06:56 GMT"
+ server = "Google Frontend"
+[FrameHeader DATA flags=END_STREAM stream=1 len=15]
+ "173.164.155.78\n"
+[FrameHeader PING len=8]
+ Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
+h2i> ping
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2i_ping"
+h2i> ping
+[FrameHeader PING flags=ACK len=8]
+ Data = "h2i_ping"
+h2i> ping
+[FrameHeader GOAWAY len=22]
+ Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
+
+ReadFrame: EOF
+```
+
+## Status
+
+Quick few hour hack. So much yet to do. Feel free to file issues for
+bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
+and I aren't yet accepting pull requests until things settle down.
+
diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go
new file mode 100644
index 000000000..b70976f77
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/h2i/h2i.go
@@ -0,0 +1,501 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+/*
+The h2i command is an interactive HTTP/2 console.
+
+Usage:
+ $ h2i [flags] <hostname>
+
+Interactive commands in the console: (all parts case-insensitive)
+
+ ping [data]
+ settings ack
+ settings FOO=n BAR=z
+ headers (open a new stream by typing HTTP/1.1)
+*/
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/crypto/ssh/terminal"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+)
+
+// Flags
+var (
+ flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
+ flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
+ flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.")
+)
+
+type command struct {
+ run func(*h2i, []string) error // required
+
+ // complete optionally specifies tokens (case-insensitive) which are
+ // valid for this subcommand.
+ complete func() []string
+}
+
+var commands = map[string]command{
+ "ping": {run: (*h2i).cmdPing},
+ "settings": {
+ run: (*h2i).cmdSettings,
+ complete: func() []string {
+ return []string{
+ "ACK",
+ http2.SettingHeaderTableSize.String(),
+ http2.SettingEnablePush.String(),
+ http2.SettingMaxConcurrentStreams.String(),
+ http2.SettingInitialWindowSize.String(),
+ http2.SettingMaxFrameSize.String(),
+ http2.SettingMaxHeaderListSize.String(),
+ }
+ },
+ },
+ "quit": {run: (*h2i).cmdQuit},
+ "headers": {run: (*h2i).cmdHeaders},
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage: h2i <hostname>\n\n")
+ flag.PrintDefaults()
+}
+
+// withPort adds ":443" if another port isn't already present.
+func withPort(host string) string {
+ if _, _, err := net.SplitHostPort(host); err != nil {
+ return net.JoinHostPort(host, "443")
+ }
+ return host
+}
+
+// h2i is the app's state.
+type h2i struct {
+ host string
+ tc *tls.Conn
+ framer *http2.Framer
+ term *terminal.Terminal
+
+ // owned by the command loop:
+ streamID uint32
+ hbuf bytes.Buffer
+ henc *hpack.Encoder
+
+ // owned by the readFrames loop:
+ peerSetting map[http2.SettingID]uint32
+ hdec *hpack.Decoder
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() != 1 {
+ usage()
+ os.Exit(2)
+ }
+ log.SetFlags(0)
+
+ host := flag.Arg(0)
+ app := &h2i{
+ host: host,
+ peerSetting: make(map[http2.SettingID]uint32),
+ }
+ app.henc = hpack.NewEncoder(&app.hbuf)
+
+ if err := app.Main(); err != nil {
+ if app.term != nil {
+ app.logf("%v\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stdout, "\n")
+}
+
+func (app *h2i) Main() error {
+ cfg := &tls.Config{
+ ServerName: app.host,
+ NextProtos: strings.Split(*flagNextProto, ","),
+ InsecureSkipVerify: *flagInsecure,
+ }
+
+ hostAndPort := withPort(app.host)
+ log.Printf("Connecting to %s ...", hostAndPort)
+ tc, err := tls.Dial("tcp", hostAndPort, cfg)
+ if err != nil {
+ return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err)
+ }
+ log.Printf("Connected to %v", tc.RemoteAddr())
+ defer tc.Close()
+
+ if err := tc.Handshake(); err != nil {
+ return fmt.Errorf("TLS handshake: %v", err)
+ }
+ if !*flagInsecure {
+ if err := tc.VerifyHostname(app.host); err != nil {
+ return fmt.Errorf("VerifyHostname: %v", err)
+ }
+ }
+ state := tc.ConnectionState()
+ log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
+ if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
+ return fmt.Errorf("Could not negotiate protocol mutually")
+ }
+
+ if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
+ return err
+ }
+
+ app.framer = http2.NewFramer(tc, tc)
+
+ oldState, err := terminal.MakeRaw(0)
+ if err != nil {
+ return err
+ }
+ defer terminal.Restore(0, oldState)
+
+ var screen = struct {
+ io.Reader
+ io.Writer
+ }{os.Stdin, os.Stdout}
+
+ app.term = terminal.NewTerminal(screen, "h2i> ")
+ lastWord := regexp.MustCompile(`.+\W(\w+)$`)
+ app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
+ if key != '\t' {
+ return
+ }
+ if pos != len(line) {
+ // TODO: we're being lazy for now, only supporting tab completion at the end.
+ return
+ }
+ // Auto-complete for the command itself.
+ if !strings.Contains(line, " ") {
+ var name string
+ name, _, ok = lookupCommand(line)
+ if !ok {
+ return
+ }
+ return name, len(name), true
+ }
+ _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
+ if !ok || c.complete == nil {
+ return
+ }
+ if strings.HasSuffix(line, " ") {
+ app.logf("%s", strings.Join(c.complete(), " "))
+ return line, pos, true
+ }
+ m := lastWord.FindStringSubmatch(line)
+ if m == nil {
+ return line, len(line), true
+ }
+ soFar := m[1]
+ var match []string
+ for _, cand := range c.complete() {
+ if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
+ continue
+ }
+ match = append(match, cand)
+ }
+ if len(match) == 0 {
+ return
+ }
+ if len(match) > 1 {
+ // TODO: auto-complete any common prefix
+ app.logf("%s", strings.Join(match, " "))
+ return line, pos, true
+ }
+ newLine = line[:len(line)-len(soFar)] + match[0]
+ return newLine, len(newLine), true
+
+ }
+
+ errc := make(chan error, 2)
+ go func() { errc <- app.readFrames() }()
+ go func() { errc <- app.readConsole() }()
+ return <-errc
+}
+
+func (app *h2i) logf(format string, args ...interface{}) {
+ fmt.Fprintf(app.term, format+"\n", args...)
+}
+
+func (app *h2i) readConsole() error {
+ if s := *flagSettings; s != "omit" {
+ var args []string
+ if s != "empty" {
+ args = strings.Split(s, ",")
+ }
+ _, c, ok := lookupCommand("settings")
+ if !ok {
+ panic("settings command not found")
+ }
+ c.run(app, args)
+ }
+
+ for {
+ line, err := app.term.ReadLine()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("terminal.ReadLine: %v", err)
+ }
+ f := strings.Fields(line)
+ if len(f) == 0 {
+ continue
+ }
+ cmd, args := f[0], f[1:]
+ if _, c, ok := lookupCommand(cmd); ok {
+ err = c.run(app, args)
+ } else {
+ app.logf("Unknown command %q", line)
+ }
+ if err == errExitApp {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func lookupCommand(prefix string) (name string, c command, ok bool) {
+ prefix = strings.ToLower(prefix)
+ if c, ok = commands[prefix]; ok {
+ return prefix, c, ok
+ }
+
+ for full, candidate := range commands {
+ if strings.HasPrefix(full, prefix) {
+ if c.run != nil {
+ return "", command{}, false // ambiguous
+ }
+ c = candidate
+ name = full
+ }
+ }
+ return name, c, c.run != nil
+}
+
+var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
+
+func (a *h2i) cmdQuit(args []string) error {
+ if len(args) > 0 {
+ a.logf("the QUIT command takes no argument")
+ return nil
+ }
+ return errExitApp
+}
+
+func (a *h2i) cmdSettings(args []string) error {
+ if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
+ return a.framer.WriteSettingsAck()
+ }
+ var settings []http2.Setting
+ for _, arg := range args {
+ if strings.EqualFold(arg, "ACK") {
+ a.logf("Error: ACK must be only argument with the SETTINGS command")
+ return nil
+ }
+ eq := strings.Index(arg, "=")
+ if eq == -1 {
+ a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
+ return nil
+ }
+ sid, ok := settingByName(arg[:eq])
+ if !ok {
+ a.logf("Error: unknown setting name %q", arg[:eq])
+ return nil
+ }
+ val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
+ if err != nil {
+ a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
+ return nil
+ }
+ settings = append(settings, http2.Setting{
+ ID: sid,
+ Val: uint32(val),
+ })
+ }
+ a.logf("Sending: %v", settings)
+ return a.framer.WriteSettings(settings...)
+}
+
+func settingByName(name string) (http2.SettingID, bool) {
+ for _, sid := range [...]http2.SettingID{
+ http2.SettingHeaderTableSize,
+ http2.SettingEnablePush,
+ http2.SettingMaxConcurrentStreams,
+ http2.SettingInitialWindowSize,
+ http2.SettingMaxFrameSize,
+ http2.SettingMaxHeaderListSize,
+ } {
+ if strings.EqualFold(sid.String(), name) {
+ return sid, true
+ }
+ }
+ return 0, false
+}
+
+func (app *h2i) cmdPing(args []string) error {
+ if len(args) > 1 {
+ app.logf("invalid PING usage: only accepts 0 or 1 args")
+ return nil // nil means don't end the program
+ }
+ var data [8]byte
+ if len(args) == 1 {
+ copy(data[:], args[0])
+ } else {
+ copy(data[:], "h2i_ping")
+ }
+ return app.framer.WritePing(false, data)
+}
+
+func (app *h2i) cmdHeaders(args []string) error {
+ if len(args) > 0 {
+ app.logf("Error: HEADERS doesn't yet take arguments.")
+ // TODO: flags for restricting window size, to force CONTINUATION
+ // frames.
+ return nil
+ }
+ var h1req bytes.Buffer
+ app.term.SetPrompt("(as HTTP/1.1)> ")
+ defer app.term.SetPrompt("h2i> ")
+ for {
+ line, err := app.term.ReadLine()
+ if err != nil {
+ return err
+ }
+ h1req.WriteString(line)
+ h1req.WriteString("\r\n")
+ if line == "" {
+ break
+ }
+ }
+ req, err := http.ReadRequest(bufio.NewReader(&h1req))
+ if err != nil {
+ app.logf("Invalid HTTP/1.1 request: %v", err)
+ return nil
+ }
+ if app.streamID == 0 {
+ app.streamID = 1
+ } else {
+ app.streamID += 2
+ }
+ app.logf("Opening Stream-ID %d:", app.streamID)
+ hbf := app.encodeHeaders(req)
+ if len(hbf) > 16<<10 {
+ app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
+ return nil
+ }
+ return app.framer.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: app.streamID,
+ BlockFragment: hbf,
+ EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
+ EndHeaders: true, // for now
+ })
+}
+
+func (app *h2i) readFrames() error {
+ for {
+ f, err := app.framer.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame: %v", err)
+ }
+ app.logf("%v", f)
+ switch f := f.(type) {
+ case *http2.PingFrame:
+ app.logf(" Data = %q", f.Data)
+ case *http2.SettingsFrame:
+ f.ForeachSetting(func(s http2.Setting) error {
+ app.logf(" %v", s)
+ app.peerSetting[s.ID] = s.Val
+ return nil
+ })
+ case *http2.WindowUpdateFrame:
+ app.logf(" Window-Increment = %v\n", f.Increment)
+ case *http2.GoAwayFrame:
+ app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode)
+ case *http2.DataFrame:
+ app.logf(" %q", f.Data())
+ case *http2.HeadersFrame:
+ if f.HasPriority() {
+ app.logf(" PRIORITY = %v", f.Priority)
+ }
+ if app.hdec == nil {
+ // TODO: if the user uses h2i to send a SETTINGS frame advertising
+ // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
+ // and stuff here instead of using the 4k default. But for now:
+ tableSize := uint32(4 << 10)
+ app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
+ }
+ app.hdec.Write(f.HeaderBlockFragment())
+ }
+ }
+}
+
+// called from readLoop
+func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
+ if f.Sensitive {
+ app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
+ }
+ app.logf(" %s = %q", f.Name, f.Value)
+}
+
+func (app *h2i) encodeHeaders(req *http.Request) []byte {
+ app.hbuf.Reset()
+
+ // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ path := req.URL.Path
+ if path == "" {
+ path = "/"
+ }
+
+ app.writeHeader(":authority", host) // probably not right for all sites
+ app.writeHeader(":method", req.Method)
+ app.writeHeader(":path", path)
+ app.writeHeader(":scheme", "https")
+
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ if lowKey == "host" {
+ continue
+ }
+ for _, v := range vv {
+ app.writeHeader(lowKey, v)
+ }
+ }
+ return app.hbuf.Bytes()
+}
+
+func (app *h2i) writeHeader(name, value string) {
+ app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+ app.logf(" %s = %s", name, value)
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 000000000..c2805f6ac
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 000000000..f9bb03398
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ for idx, hf := range staticTable {
+ if !constantTimeStringCompare(hf.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(idx + 1)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(hf.Value, f.Value) {
+ continue
+ }
+ i = uint64(idx + 1)
+ nameValueMatch = true
+ return
+ }
+
+ j, nameValueMatch := e.dynTab.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ i = j + uint64(len(staticTable))
+ }
+ return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go
new file mode 100644
index 000000000..92286f3ba
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go
@@ -0,0 +1,330 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "encoding/hex"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestEncoderTableSizeUpdate(t *testing.T) {
+ tests := []struct {
+ size1, size2 uint32
+ wantHex string
+ }{
+ // Should emit 2 table size updates (2048 and 4096)
+ {2048, 4096, "3fe10f 3fe11f 82"},
+
+ // Should emit 1 table size update (2048)
+ {16384, 2048, "3fe10f 82"},
+ }
+ for _, tt := range tests {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ e.SetMaxDynamicTableSize(tt.size1)
+ e.SetMaxDynamicTableSize(tt.size2)
+ if err := e.WriteField(pair(":method", "GET")); err != nil {
+ t.Fatal(err)
+ }
+ want := removeSpace(tt.wantHex)
+ if got := hex.EncodeToString(buf.Bytes()); got != want {
+ t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
+ }
+ }
+}
+
+func TestEncoderWriteField(t *testing.T) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ var got []HeaderField
+ d := NewDecoder(4<<10, func(f HeaderField) {
+ got = append(got, f)
+ })
+
+ tests := []struct {
+ hdrs []HeaderField
+ }{
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ }},
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ }},
+ {[]HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ }},
+ }
+ for i, tt := range tests {
+ buf.Reset()
+ got = got[:0]
+ for _, hf := range tt.hdrs {
+ if err := e.WriteField(hf); err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, err := d.Write(buf.Bytes())
+ if err != nil {
+ t.Errorf("%d. Decoder Write = %v", i, err)
+ }
+ if !reflect.DeepEqual(got, tt.hdrs) {
+ t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
+ }
+ }
+}
+
+func TestEncoderSearchTable(t *testing.T) {
+ e := NewEncoder(nil)
+
+ e.dynTab.add(pair("foo", "bar"))
+ e.dynTab.add(pair("blake", "miz"))
+ e.dynTab.add(pair(":method", "GET"))
+
+ tests := []struct {
+ hf HeaderField
+ wantI uint64
+ wantMatch bool
+ }{
+ // Name and Value match
+ {pair("foo", "bar"), uint64(len(staticTable) + 3), true},
+ {pair("blake", "miz"), uint64(len(staticTable) + 2), true},
+ {pair(":method", "GET"), 2, true},
+
+ // Only name match because Sensitive == true
+ {HeaderField{":method", "GET", true}, 2, false},
+
+ // Only Name matches
+ {pair("foo", "..."), uint64(len(staticTable) + 3), false},
+ {pair("blake", "..."), uint64(len(staticTable) + 2), false},
+ {pair(":method", "..."), 2, false},
+
+ // None match
+ {pair("foo-", "bar"), 0, false},
+ }
+ for _, tt := range tests {
+ if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
+ t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
+ }
+ }
+}
+
+func TestAppendVarInt(t *testing.T) {
+ tests := []struct {
+ n byte
+ i uint64
+ want []byte
+ }{
+ // Fits in a byte:
+ {1, 0, []byte{0}},
+ {2, 2, []byte{2}},
+ {3, 6, []byte{6}},
+ {4, 14, []byte{14}},
+ {5, 30, []byte{30}},
+ {6, 62, []byte{62}},
+ {7, 126, []byte{126}},
+ {8, 254, []byte{254}},
+
+ // Multiple bytes:
+ {5, 1337, []byte{31, 154, 10}},
+ }
+ for _, tt := range tests {
+ got := appendVarInt(nil, tt.n, tt.i)
+ if !bytes.Equal(got, tt.want) {
+ t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
+ }
+ }
+}
+
+func TestAppendHpackString(t *testing.T) {
+ tests := []struct {
+ s, wantHex string
+ }{
+ // Huffman encoded
+ {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+
+ // Not Huffman encoded
+ {"a", "01 61"},
+
+ // zero length
+ {"", "00"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendHpackString(nil, tt.s)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
+ }
+ }
+}
+
+func TestAppendIndexed(t *testing.T) {
+ tests := []struct {
+ i uint64
+ wantHex string
+ }{
+ // 1 byte
+ {1, "81"},
+ {126, "fe"},
+
+ // 2 bytes
+ {127, "ff00"},
+ {128, "ff01"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendIndexed(nil, tt.i)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
+ }
+ }
+}
+
+func TestAppendNewName(t *testing.T) {
+ tests := []struct {
+ f HeaderField
+ indexing bool
+ wantHex string
+ }{
+ // Incremental indexing
+ {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+ // Without indexing
+ {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+ // Never indexed
+ {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+ {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendNewName(nil, tt.f, tt.indexing)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+ }
+ }
+}
+
+func TestAppendIndexedName(t *testing.T) {
+ tests := []struct {
+ f HeaderField
+ i uint64
+ indexing bool
+ wantHex string
+ }{
+ // Incremental indexing
+ {HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
+
+ // Without indexing
+ {HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
+
+ // Never indexed
+ {HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
+ {HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+ }
+ }
+}
+
+func TestAppendTableSize(t *testing.T) {
+ tests := []struct {
+ i uint32
+ wantHex string
+ }{
+ // Fits into 1 byte
+ {30, "3e"},
+
+ // Extra byte
+ {31, "3f00"},
+ {32, "3f01"},
+ }
+ for _, tt := range tests {
+ want := removeSpace(tt.wantHex)
+ buf := appendTableSize(nil, tt.i)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
+ }
+ }
+}
+
+func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ tests := []struct {
+ v uint32
+ wantUpdate bool
+ wantMinSize uint32
+ wantMaxSize uint32
+ }{
+ // Set new table size to 2048
+ {2048, true, 2048, 2048},
+
+ // Set new table size to 16384, but still limited to
+ // 4096
+ {16384, true, 2048, 4096},
+ }
+ for _, tt := range tests {
+ e.SetMaxDynamicTableSize(tt.v)
+ if got := e.tableSizeUpdate; tt.wantUpdate != got {
+ t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
+ }
+ if got := e.minSize; tt.wantMinSize != got {
+ t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
+ }
+ if got := e.dynTab.maxSize; tt.wantMaxSize != got {
+ t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
+ }
+ }
+}
+
+func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
+ e := NewEncoder(nil)
+ // 4095 < initialHeaderTableSize means maxSize is truncated to
+ // 4095.
+ e.SetMaxDynamicTableSizeLimit(4095)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ if got, want := e.maxSizeLimit, uint32(4095); got != want {
+ t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+ }
+ if got, want := e.tableSizeUpdate, true; got != want {
+ t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
+ }
+ // maxSize will be truncated to maxSizeLimit
+ e.SetMaxDynamicTableSize(16384)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ // 8192 > current maxSizeLimit, so maxSize does not change.
+ e.SetMaxDynamicTableSizeLimit(8192)
+ if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+ t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+ }
+ if got, want := e.maxSizeLimit, uint32(8192); got != want {
+ t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+ }
+}
+
+func removeSpace(s string) string {
+ return strings.Replace(s, " ", "", -1)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 000000000..135b9f62c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,542 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7541 section 4.1.
+func (hf HeaderField) Size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ }
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // ents is the FIFO described at
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // The newest (low index) is append at the end, and items are
+ // evicted from the front.
+ ents []HeaderField
+ size uint32
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.ents = append(dt.ents, f)
+ dt.size += f.Size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+ base := dt.ents // keep base pointer of slice
+ for dt.size > dt.maxSize {
+ dt.size -= dt.ents[0].Size()
+ dt.ents = dt.ents[1:]
+ }
+
+ // Shift slice contents down if we evicted things.
+ if len(dt.ents) != len(base) {
+ copy(base, dt.ents)
+ dt.ents = base[:len(dt.ents)]
+ }
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ c := byte(0)
+
+ for i := 0; i < len(a); i++ {
+ c |= a[i] ^ b[i]
+ }
+
+ return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ l := len(dt.ents)
+ for j := l - 1; j >= 0; j-- {
+ ent := dt.ents[j]
+ if !constantTimeStringCompare(ent.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(l - j)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(ent.Value, f.Value) {
+ continue
+ }
+ i = uint64(l - j)
+ nameValueMatch = true
+ return
+ }
+ return
+}
+
+func (d *Decoder) maxTableIndex() int {
+ return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i < 1 {
+ return
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ if i <= uint64(len(staticTable)) {
+ return staticTable[i-1], true
+ }
+ dents := d.dynTab.ents
+ return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ return "", nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ if wantStr {
+ s = string(p[:strLen])
+ }
+ return s, p[strLen:], nil
+ }
+
+ if wantStr {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+ buf.Reset()
+ return "", nil, err
+ }
+ s = buf.String()
+ buf.Reset() // be nice to GC
+ }
+ return s, p[strLen:], nil
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
new file mode 100644
index 000000000..4c7b17bfb
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
@@ -0,0 +1,854 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestStaticTable(t *testing.T) {
+ fromSpec := `
+ +-------+-----------------------------+---------------+
+ | 1 | :authority | |
+ | 2 | :method | GET |
+ | 3 | :method | POST |
+ | 4 | :path | / |
+ | 5 | :path | /index.html |
+ | 6 | :scheme | http |
+ | 7 | :scheme | https |
+ | 8 | :status | 200 |
+ | 9 | :status | 204 |
+ | 10 | :status | 206 |
+ | 11 | :status | 304 |
+ | 12 | :status | 400 |
+ | 13 | :status | 404 |
+ | 14 | :status | 500 |
+ | 15 | accept-charset | |
+ | 16 | accept-encoding | gzip, deflate |
+ | 17 | accept-language | |
+ | 18 | accept-ranges | |
+ | 19 | accept | |
+ | 20 | access-control-allow-origin | |
+ | 21 | age | |
+ | 22 | allow | |
+ | 23 | authorization | |
+ | 24 | cache-control | |
+ | 25 | content-disposition | |
+ | 26 | content-encoding | |
+ | 27 | content-language | |
+ | 28 | content-length | |
+ | 29 | content-location | |
+ | 30 | content-range | |
+ | 31 | content-type | |
+ | 32 | cookie | |
+ | 33 | date | |
+ | 34 | etag | |
+ | 35 | expect | |
+ | 36 | expires | |
+ | 37 | from | |
+ | 38 | host | |
+ | 39 | if-match | |
+ | 40 | if-modified-since | |
+ | 41 | if-none-match | |
+ | 42 | if-range | |
+ | 43 | if-unmodified-since | |
+ | 44 | last-modified | |
+ | 45 | link | |
+ | 46 | location | |
+ | 47 | max-forwards | |
+ | 48 | proxy-authenticate | |
+ | 49 | proxy-authorization | |
+ | 50 | range | |
+ | 51 | referer | |
+ | 52 | refresh | |
+ | 53 | retry-after | |
+ | 54 | server | |
+ | 55 | set-cookie | |
+ | 56 | strict-transport-security | |
+ | 57 | transfer-encoding | |
+ | 58 | user-agent | |
+ | 59 | vary | |
+ | 60 | via | |
+ | 61 | www-authenticate | |
+ +-------+-----------------------------+---------------+
+`
+ bs := bufio.NewScanner(strings.NewReader(fromSpec))
+ re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
+ for bs.Scan() {
+ l := bs.Text()
+ if !strings.Contains(l, "|") {
+ continue
+ }
+ m := re.FindStringSubmatch(l)
+ if m == nil {
+ continue
+ }
+ i, err := strconv.Atoi(m[1])
+ if err != nil {
+ t.Errorf("Bogus integer on line %q", l)
+ continue
+ }
+ if i < 1 || i > len(staticTable) {
+ t.Errorf("Bogus index %d on line %q", i, l)
+ continue
+ }
+ if got, want := staticTable[i-1].Name, m[2]; got != want {
+ t.Errorf("header index %d name = %q; want %q", i, got, want)
+ }
+ if got, want := staticTable[i-1].Value, m[3]; got != want {
+ t.Errorf("header index %d value = %q; want %q", i, got, want)
+ }
+ }
+ if err := bs.Err(); err != nil {
+ t.Error(err)
+ }
+}
+
+func (d *Decoder) mustAt(idx int) HeaderField {
+ if hf, ok := d.at(uint64(idx)); !ok {
+ panic(fmt.Sprintf("bogus index %d", idx))
+ } else {
+ return hf
+ }
+}
+
+func TestDynamicTableAt(t *testing.T) {
+ d := NewDecoder(4096, nil)
+ at := d.mustAt
+ if got, want := at(2), (pair(":method", "GET")); got != want {
+ t.Errorf("at(2) = %v; want %v", got, want)
+ }
+ d.dynTab.add(pair("foo", "bar"))
+ d.dynTab.add(pair("blake", "miz"))
+ if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
+ t.Errorf("at(dyn 1) = %v; want %v", got, want)
+ }
+ if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
+ t.Errorf("at(dyn 2) = %v; want %v", got, want)
+ }
+ if got, want := at(3), (pair(":method", "POST")); got != want {
+ t.Errorf("at(3) = %v; want %v", got, want)
+ }
+}
+
+func TestDynamicTableSearch(t *testing.T) {
+ dt := dynamicTable{}
+ dt.setMaxSize(4096)
+
+ dt.add(pair("foo", "bar"))
+ dt.add(pair("blake", "miz"))
+ dt.add(pair(":method", "GET"))
+
+ tests := []struct {
+ hf HeaderField
+ wantI uint64
+ wantMatch bool
+ }{
+ // Name and Value match
+ {pair("foo", "bar"), 3, true},
+ {pair(":method", "GET"), 1, true},
+
+ // Only name match because of Sensitive == true
+ {HeaderField{"blake", "miz", true}, 2, false},
+
+ // Only Name matches
+ {pair("foo", "..."), 3, false},
+ {pair("blake", "..."), 2, false},
+ {pair(":method", "..."), 1, false},
+
+ // None match
+ {pair("foo-", "bar"), 0, false},
+ }
+ for _, tt := range tests {
+ if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
+ t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
+ }
+ }
+}
+
+func TestDynamicTableSizeEvict(t *testing.T) {
+ d := NewDecoder(4096, nil)
+ if want := uint32(0); d.dynTab.size != want {
+ t.Fatalf("size = %d; want %d", d.dynTab.size, want)
+ }
+ add := d.dynTab.add
+ add(pair("blake", "eats pizza"))
+ if want := uint32(15 + 32); d.dynTab.size != want {
+ t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
+ }
+ add(pair("foo", "bar"))
+ if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
+ t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
+ }
+ d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
+ if want := uint32(6 + 32); d.dynTab.size != want {
+ t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
+ }
+ if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
+ t.Errorf("at(dyn 1) = %v; want %v", got, want)
+ }
+ add(pair("long", strings.Repeat("x", 500)))
+ if want := uint32(0); d.dynTab.size != want {
+ t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
+ }
+}
+
+func TestDecoderDecode(t *testing.T) {
+ tests := []struct {
+ name string
+ in []byte
+ want []HeaderField
+ wantDynTab []HeaderField // newest entry first
+ }{
+ // C.2.1 Literal Header Field with Indexing
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
+ {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
+ []HeaderField{pair("custom-key", "custom-header")},
+ []HeaderField{pair("custom-key", "custom-header")},
+ },
+
+ // C.2.2 Literal Header Field without Indexing
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
+ {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
+ []HeaderField{pair(":path", "/sample/path")},
+ []HeaderField{}},
+
+ // C.2.3 Literal Header Field never Indexed
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
+ {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
+ []HeaderField{{"password", "secret", true}},
+ []HeaderField{}},
+
+ // C.2.4 Indexed Header Field
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
+ {"C.2.4", []byte("\x82"),
+ []HeaderField{pair(":method", "GET")},
+ []HeaderField{}},
+ }
+ for _, tt := range tests {
+ d := NewDecoder(4096, nil)
+ hf, err := d.DecodeFull(tt.in)
+ if err != nil {
+ t.Errorf("%s: %v", tt.name, err)
+ continue
+ }
+ if !reflect.DeepEqual(hf, tt.want) {
+ t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
+ }
+ gotDynTab := d.dynTab.reverseCopy()
+ if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
+ t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
+ }
+ }
+}
+
+func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
+ hf = make([]HeaderField, len(dt.ents))
+ for i := range hf {
+ hf[i] = dt.ents[len(dt.ents)-1-i]
+ }
+ return
+}
+
+type encAndWant struct {
+ enc []byte
+ want []HeaderField
+ wantDynTab []HeaderField
+ wantDynSize uint32
+}
+
+// C.3 Request Examples without Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
+func TestDecodeC3_NoHuffman(t *testing.T) {
+ testDecodeSeries(t, 4096, []encAndWant{
+ {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ },
+ []HeaderField{
+ pair(":authority", "www.example.com"),
+ },
+ 57,
+ },
+ {dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ },
+ []HeaderField{
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 110,
+ },
+ {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ },
+ []HeaderField{
+ pair("custom-key", "custom-value"),
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 164,
+ },
+ })
+}
+
+// C.4 Request Examples with Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
+func TestDecodeC4_Huffman(t *testing.T) {
+ testDecodeSeries(t, 4096, []encAndWant{
+ {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ },
+ []HeaderField{
+ pair(":authority", "www.example.com"),
+ },
+ 57,
+ },
+ {dehex("8286 84be 5886 a8eb 1064 9cbf"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "http"),
+ pair(":path", "/"),
+ pair(":authority", "www.example.com"),
+ pair("cache-control", "no-cache"),
+ },
+ []HeaderField{
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 110,
+ },
+ {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
+ []HeaderField{
+ pair(":method", "GET"),
+ pair(":scheme", "https"),
+ pair(":path", "/index.html"),
+ pair(":authority", "www.example.com"),
+ pair("custom-key", "custom-value"),
+ },
+ []HeaderField{
+ pair("custom-key", "custom-value"),
+ pair("cache-control", "no-cache"),
+ pair(":authority", "www.example.com"),
+ },
+ 164,
+ },
+ })
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
+// "This section shows several consecutive header lists, corresponding
+// to HTTP responses, on the same connection. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur."
+func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
+ testDecodeSeries(t, 256, []encAndWant{
+ {dehex(`
+4803 3330 3258 0770 7269 7661 7465 611d
+4d6f 6e2c 2032 3120 4f63 7420 3230 3133
+2032 303a 3133 3a32 3120 474d 546e 1768
+7474 7073 3a2f 2f77 7777 2e65 7861 6d70
+6c65 2e63 6f6d
+`),
+ []HeaderField{
+ pair(":status", "302"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ pair(":status", "302"),
+ },
+ 222,
+ },
+ {dehex("4803 3330 37c1 c0bf"),
+ []HeaderField{
+ pair(":status", "307"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair(":status", "307"),
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ },
+ 222,
+ },
+ {dehex(`
+88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
+3230 3133 2032 303a 3133 3a32 3220 474d
+54c0 5a04 677a 6970 7738 666f 6f3d 4153
+444a 4b48 514b 425a 584f 5157 454f 5049
+5541 5851 5745 4f49 553b 206d 6178 2d61
+6765 3d33 3630 303b 2076 6572 7369 6f6e
+3d31
+`),
+ []HeaderField{
+ pair(":status", "200"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ pair("location", "https://www.example.com"),
+ pair("content-encoding", "gzip"),
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ },
+ []HeaderField{
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ pair("content-encoding", "gzip"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ },
+ 215,
+ },
+ })
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
+// "This section shows the same examples as the previous section, but
+// using Huffman encoding for the literal values. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur. The eviction mechanism
+// uses the length of the decoded literal values, so the same
+// evictions occurs as in the previous section."
+func TestDecodeC6_ResponsesHuffman(t *testing.T) {
+ testDecodeSeries(t, 256, []encAndWant{
+ {dehex(`
+4882 6402 5885 aec3 771a 4b61 96d0 7abe
+9410 54d4 44a8 2005 9504 0b81 66e0 82a6
+2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
+e9ae 82ae 43d3
+`),
+ []HeaderField{
+ pair(":status", "302"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ pair(":status", "302"),
+ },
+ 222,
+ },
+ {dehex("4883 640e ffc1 c0bf"),
+ []HeaderField{
+ pair(":status", "307"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("location", "https://www.example.com"),
+ },
+ []HeaderField{
+ pair(":status", "307"),
+ pair("location", "https://www.example.com"),
+ pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+ pair("cache-control", "private"),
+ },
+ 222,
+ },
+ {dehex(`
+88c1 6196 d07a be94 1054 d444 a820 0595
+040b 8166 e084 a62d 1bff c05a 839b d9ab
+77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
+3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
+9587 3160 65c0 03ed 4ee5 b106 3d50 07
+`),
+ []HeaderField{
+ pair(":status", "200"),
+ pair("cache-control", "private"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ pair("location", "https://www.example.com"),
+ pair("content-encoding", "gzip"),
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ },
+ []HeaderField{
+ pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+ pair("content-encoding", "gzip"),
+ pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+ },
+ 215,
+ },
+ })
+}
+
+func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
+ d := NewDecoder(size, nil)
+ for i, step := range steps {
+ hf, err := d.DecodeFull(step.enc)
+ if err != nil {
+ t.Fatalf("Error at step index %d: %v", i, err)
+ }
+ if !reflect.DeepEqual(hf, step.want) {
+ t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
+ }
+ gotDynTab := d.dynTab.reverseCopy()
+ if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
+ t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
+ }
+ if d.dynTab.size != step.wantDynSize {
+ t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
+ }
+ }
+}
+
+func TestHuffmanDecodeExcessPadding(t *testing.T) {
+ tests := [][]byte{
+ {0xff}, // Padding Exceeds 7 bits
+ {0x1f, 0xff}, // {"a", 1 byte excess padding}
+ {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding}
+ {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding}
+ {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding}
+ {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol.
+ }
+ for i, in := range tests {
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err)
+ }
+ }
+}
+
+func TestHuffmanDecodeEOS(t *testing.T) {
+ in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"}
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("error = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) {
+ in := []byte{0x00, 0x01} // {"0", "0", "0"}
+ var buf bytes.Buffer
+ if err := huffmanDecode(&buf, 2, in); err != ErrStringLength {
+ t.Errorf("error = %v; want ErrStringLength", err)
+ }
+}
+
+func TestHuffmanDecodeCorruptPadding(t *testing.T) {
+ in := []byte{0x00}
+ var buf bytes.Buffer
+ if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
+ t.Errorf("error = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func TestHuffmanDecode(t *testing.T) {
+ tests := []struct {
+ inHex, want string
+ }{
+ {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
+ {"a8eb 1064 9cbf", "no-cache"},
+ {"25a8 49e9 5ba9 7d7f", "custom-key"},
+ {"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
+ {"6402", "302"},
+ {"aec3 771a 4b", "private"},
+ {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
+ {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
+ {"9bd9 ab", "gzip"},
+ {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
+ "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
+ }
+ for i, tt := range tests {
+ var buf bytes.Buffer
+ in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
+ if err != nil {
+ t.Errorf("%d. hex input error: %v", i, err)
+ continue
+ }
+ if _, err := HuffmanDecode(&buf, in); err != nil {
+ t.Errorf("%d. decode error: %v", i, err)
+ continue
+ }
+ if got := buf.String(); tt.want != got {
+ t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
+ }
+ }
+}
+
+func TestAppendHuffmanString(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+ {"no-cache", "a8eb 1064 9cbf"},
+ {"custom-key", "25a8 49e9 5ba9 7d7f"},
+ {"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
+ {"302", "6402"},
+ {"private", "aec3 771a 4b"},
+ {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
+ {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
+ {"gzip", "9bd9 ab"},
+ {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
+ "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
+ }
+ for i, tt := range tests {
+ buf := []byte{}
+ want := strings.Replace(tt.want, " ", "", -1)
+ buf = AppendHuffmanString(buf, tt.in)
+ if got := hex.EncodeToString(buf); want != got {
+ t.Errorf("%d. encode = %q; want %q", i, got, want)
+ }
+ }
+}
+
+func TestHuffmanMaxStrLen(t *testing.T) {
+ const msg = "Some string"
+ huff := AppendHuffmanString(nil, msg)
+
+ testGood := func(max int) {
+ var out bytes.Buffer
+ if err := huffmanDecode(&out, max, huff); err != nil {
+ t.Errorf("For maxLen=%d, unexpected error: %v", max, err)
+ }
+ if out.String() != msg {
+ t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg)
+ }
+ }
+ testGood(0)
+ testGood(len(msg))
+ testGood(len(msg) + 1)
+
+ var out bytes.Buffer
+ if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {
+ t.Errorf("err = %v; want ErrStringLength", err)
+ }
+}
+
+func TestHuffmanRoundtripStress(t *testing.T) {
+ const Len = 50 // of uncompressed string
+ input := make([]byte, Len)
+ var output bytes.Buffer
+ var huff []byte
+
+ n := 5000
+ if testing.Short() {
+ n = 100
+ }
+ seed := time.Now().UnixNano()
+ t.Logf("Seed = %v", seed)
+ src := rand.New(rand.NewSource(seed))
+ var encSize int64
+ for i := 0; i < n; i++ {
+ for l := range input {
+ input[l] = byte(src.Intn(256))
+ }
+ huff = AppendHuffmanString(huff[:0], string(input))
+ encSize += int64(len(huff))
+ output.Reset()
+ if err := huffmanDecode(&output, 0, huff); err != nil {
+ t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err)
+ continue
+ }
+ if !bytes.Equal(output.Bytes(), input) {
+ t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes())
+ }
+ }
+ t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)
+}
+
+func TestHuffmanDecodeFuzz(t *testing.T) {
+ const Len = 50 // of compressed
+ var buf, zbuf bytes.Buffer
+
+ n := 5000
+ if testing.Short() {
+ n = 100
+ }
+ seed := time.Now().UnixNano()
+ t.Logf("Seed = %v", seed)
+ src := rand.New(rand.NewSource(seed))
+ numFail := 0
+ for i := 0; i < n; i++ {
+ zbuf.Reset()
+ if i == 0 {
+ // Start with at least one invalid one.
+ zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8")
+ } else {
+ for l := 0; l < Len; l++ {
+ zbuf.WriteByte(byte(src.Intn(256)))
+ }
+ }
+
+ buf.Reset()
+ if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {
+ if err == ErrInvalidHuffman {
+ numFail++
+ continue
+ }
+ t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err)
+ continue
+ }
+ }
+ t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n)
+ if numFail < 1 {
+ t.Error("expected at least one invalid huffman encoding (test starts with one)")
+ }
+}
+
+func TestReadVarInt(t *testing.T) {
+ type res struct {
+ i uint64
+ consumed int
+ err error
+ }
+ tests := []struct {
+ n byte
+ p []byte
+ want res
+ }{
+ // Fits in a byte:
+ {1, []byte{0}, res{0, 1, nil}},
+ {2, []byte{2}, res{2, 1, nil}},
+ {3, []byte{6}, res{6, 1, nil}},
+ {4, []byte{14}, res{14, 1, nil}},
+ {5, []byte{30}, res{30, 1, nil}},
+ {6, []byte{62}, res{62, 1, nil}},
+ {7, []byte{126}, res{126, 1, nil}},
+ {8, []byte{254}, res{254, 1, nil}},
+
+ // Doesn't fit in a byte:
+ {1, []byte{1}, res{0, 0, errNeedMore}},
+ {2, []byte{3}, res{0, 0, errNeedMore}},
+ {3, []byte{7}, res{0, 0, errNeedMore}},
+ {4, []byte{15}, res{0, 0, errNeedMore}},
+ {5, []byte{31}, res{0, 0, errNeedMore}},
+ {6, []byte{63}, res{0, 0, errNeedMore}},
+ {7, []byte{127}, res{0, 0, errNeedMore}},
+ {8, []byte{255}, res{0, 0, errNeedMore}},
+
+ // Ignoring top bits:
+ {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
+ {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
+ {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
+
+ // Extra byte:
+ {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
+
+ // Short a byte:
+ {5, []byte{191, 154}, res{0, 0, errNeedMore}},
+
+ // integer overflow:
+ {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
+ }
+ for _, tt := range tests {
+ i, remain, err := readVarInt(tt.n, tt.p)
+ consumed := len(tt.p) - len(remain)
+ got := res{i, consumed, err}
+ if got != tt.want {
+ t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
+ }
+ }
+}
+
+// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56
+func TestHuffmanFuzzCrash(t *testing.T) {
+ got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8"))
+ if got != "" {
+ t.Errorf("Got %q; want empty string", got)
+ }
+ if err != ErrInvalidHuffman {
+ t.Errorf("Err = %v; want ErrInvalidHuffman", err)
+ }
+}
+
+func dehex(s string) []byte {
+ s = strings.Replace(s, " ", "", -1)
+ s = strings.Replace(s, "\n", "", -1)
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func TestEmitEnabled(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
+ enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
+
+ numCallback := 0
+ var dec *Decoder
+ dec = NewDecoder(8<<20, func(HeaderField) {
+ numCallback++
+ dec.SetEmitEnabled(false)
+ })
+ if !dec.EmitEnabled() {
+ t.Errorf("initial emit enabled = false; want true")
+ }
+ if _, err := dec.Write(buf.Bytes()); err != nil {
+ t.Error(err)
+ }
+ if numCallback != 1 {
+ t.Errorf("num callbacks = %d; want 1", numCallback)
+ }
+ if dec.EmitEnabled() {
+ t.Errorf("emit enabled = true; want false")
+ }
+}
+
+func TestSaveBufLimit(t *testing.T) {
+ const maxStr = 1 << 10
+ var got []HeaderField
+ dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
+ got = append(got, hf)
+ })
+ dec.SetMaxStringLength(maxStr)
+ var frag []byte
+ frag = append(frag[:0], encodeTypeByte(false, false))
+ frag = appendVarInt(frag, 7, 3)
+ frag = append(frag, "foo"...)
+ frag = appendVarInt(frag, 7, 3)
+ frag = append(frag, "bar"...)
+
+ if _, err := dec.Write(frag); err != nil {
+ t.Fatal(err)
+ }
+
+ want := []HeaderField{{Name: "foo", Value: "bar"}}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("After small writes, got %v; want %v", got, want)
+ }
+
+ frag = append(frag[:0], encodeTypeByte(false, false))
+ frag = appendVarInt(frag, 7, maxStr*3)
+ frag = append(frag, make([]byte, maxStr*3)...)
+
+ _, err := dec.Write(frag)
+ if err != ErrStringLength {
+ t.Fatalf("Write error = %v; want ErrStringLength", err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 000000000..8850e3946
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ n := rootHuffmanNode
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ } else {
+ cbits -= 8
+ }
+ }
+ }
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
+ break
+ }
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+ if mask := uint(1<<cbits - 1); cur&mask != mask {
+ // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+
+ return nil
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ if len(huffmanCodes) != 256 {
+ panic("unexpected size")
+ }
+ for i, code := range huffmanCodes {
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 000000000..b9283a023
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,352 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = [...]HeaderField{
+ pair(":authority", ""), // index 1 (1-based)
+ pair(":method", "GET"),
+ pair(":method", "POST"),
+ pair(":path", "/"),
+ pair(":path", "/index.html"),
+ pair(":scheme", "http"),
+ pair(":scheme", "https"),
+ pair(":status", "200"),
+ pair(":status", "204"),
+ pair(":status", "206"),
+ pair(":status", "304"),
+ pair(":status", "400"),
+ pair(":status", "404"),
+ pair(":status", "500"),
+ pair("accept-charset", ""),
+ pair("accept-encoding", "gzip, deflate"),
+ pair("accept-language", ""),
+ pair("accept-ranges", ""),
+ pair("accept", ""),
+ pair("access-control-allow-origin", ""),
+ pair("age", ""),
+ pair("allow", ""),
+ pair("authorization", ""),
+ pair("cache-control", ""),
+ pair("content-disposition", ""),
+ pair("content-encoding", ""),
+ pair("content-language", ""),
+ pair("content-length", ""),
+ pair("content-location", ""),
+ pair("content-range", ""),
+ pair("content-type", ""),
+ pair("cookie", ""),
+ pair("date", ""),
+ pair("etag", ""),
+ pair("expect", ""),
+ pair("expires", ""),
+ pair("from", ""),
+ pair("host", ""),
+ pair("if-match", ""),
+ pair("if-modified-since", ""),
+ pair("if-none-match", ""),
+ pair("if-range", ""),
+ pair("if-unmodified-since", ""),
+ pair("last-modified", ""),
+ pair("link", ""),
+ pair("location", ""),
+ pair("max-forwards", ""),
+ pair("proxy-authenticate", ""),
+ pair("proxy-authorization", ""),
+ pair("range", ""),
+ pair("referer", ""),
+ pair("refresh", ""),
+ pair("retry-after", ""),
+ pair("server", ""),
+ pair("set-cookie", ""),
+ pair("strict-transport-security", ""),
+ pair("transfer-encoding", ""),
+ pair("user-agent", ""),
+ pair("vary", ""),
+ pair("via", ""),
+ pair("www-authenticate", ""),
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 000000000..2e27b093c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,365 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+//
+package http2 // import "golang.org/x/net/http2"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/lex/httplex"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateResvLocal
+ stateResvRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateResvLocal: "ResvLocal",
+ stateResvRemote: "ResvRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httplex.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ // TODO: pick something better? this is a bit under
+ // (3 x typical 1500 byte MTU) at least.
+ return bufio.NewWriterSize(nil, 4<<10)
+ },
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// *) a non-empty string starting with '/', but not with with "//",
+// *) the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
+}
diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go
new file mode 100644
index 000000000..22c2ace82
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2_test.go
@@ -0,0 +1,198 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "net/http"
+ "os/exec"
+ "strconv"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
+
+func condSkipFailingTest(t *testing.T) {
+ if !*knownFailing {
+ t.Skip("Skipping known-failing test without --known_failing")
+ }
+}
+
+func init() {
+ DebugGoroutines = true
+ flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging")
+}
+
+func TestSettingString(t *testing.T) {
+ tests := []struct {
+ s Setting
+ want string
+ }{
+ {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
+ {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
+ }
+ for i, tt := range tests {
+ got := fmt.Sprint(tt.s)
+ if got != tt.want {
+ t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
+ }
+ }
+}
+
+type twriter struct {
+ t testing.TB
+ st *serverTester // optional
+}
+
+func (w twriter) Write(p []byte) (n int, err error) {
+ if w.st != nil {
+ ps := string(p)
+ for _, phrase := range w.st.logFilter {
+ if strings.Contains(ps, phrase) {
+ return len(p), nil // no logging
+ }
+ }
+ }
+ w.t.Logf("%s", p)
+ return len(p), nil
+}
+
+// like encodeHeader, but don't add implicit pseudo headers.
+func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ headers = headers[2:]
+ if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
+ t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+ }
+ return buf.Bytes()
+}
+
+// Verify that curl has http2.
+func requireCurl(t *testing.T) {
+ out, err := dockerLogs(curl(t, "--version"))
+ if err != nil {
+ t.Skipf("failed to determine curl features; skipping test")
+ }
+ if !strings.Contains(string(out), "HTTP2") {
+ t.Skip("curl doesn't support HTTP2; skipping test")
+ }
+}
+
+func curl(t *testing.T, args ...string) (container string) {
+ out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
+ if err != nil {
+ t.Skipf("Failed to run curl in docker: %v, %s", err, out)
+ }
+ return strings.TrimSpace(string(out))
+}
+
+// Verify that h2load exists.
+func requireH2load(t *testing.T) {
+ out, err := dockerLogs(h2load(t, "--version"))
+ if err != nil {
+ t.Skipf("failed to probe h2load; skipping test: %s", out)
+ }
+ if !strings.Contains(string(out), "h2load nghttp2/") {
+ t.Skipf("h2load not present; skipping test. (Output=%q)", out)
+ }
+}
+
+func h2load(t *testing.T, args ...string) (container string) {
+ out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
+ if err != nil {
+ t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
+ }
+ return strings.TrimSpace(string(out))
+}
+
+type puppetCommand struct {
+ fn func(w http.ResponseWriter, r *http.Request)
+ done chan<- bool
+}
+
+type handlerPuppet struct {
+ ch chan puppetCommand
+}
+
+func newHandlerPuppet() *handlerPuppet {
+ return &handlerPuppet{
+ ch: make(chan puppetCommand),
+ }
+}
+
+func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
+ for cmd := range p.ch {
+ cmd.fn(w, r)
+ cmd.done <- true
+ }
+}
+
+func (p *handlerPuppet) done() { close(p.ch) }
+func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
+ done := make(chan bool)
+ p.ch <- puppetCommand{fn, done}
+ <-done
+}
+func dockerLogs(container string) ([]byte, error) {
+ out, err := exec.Command("docker", "wait", container).CombinedOutput()
+ if err != nil {
+ return out, err
+ }
+ exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
+ if err != nil {
+ return out, errors.New("unexpected exit status from docker wait")
+ }
+ out, err = exec.Command("docker", "logs", container).CombinedOutput()
+ exec.Command("docker", "rm", container).Run()
+ if err == nil && exitStatus != 0 {
+ err = fmt.Errorf("exit status %d: %s", exitStatus, out)
+ }
+ return out, err
+}
+
+func kill(container string) {
+ exec.Command("docker", "kill", container).Run()
+ exec.Command("docker", "rm", container).Run()
+}
+
+func cleanDate(res *http.Response) {
+ if d := res.Header["Date"]; len(d) == 1 {
+ d[0] = "XXX"
+ }
+}
+
+func TestSorterPoolAllocs(t *testing.T) {
+ ss := []string{"a", "b", "c"}
+ h := http.Header{
+ "a": nil,
+ "b": nil,
+ "c": nil,
+ }
+ sorter := new(sorter)
+
+ if allocs := testing.AllocsPerRun(100, func() {
+ sorter.SortStrings(ss)
+ }); allocs >= 1 {
+ t.Logf("SortStrings allocs = %v; want <1", allocs)
+ }
+
+ if allocs := testing.AllocsPerRun(5, func() {
+ if len(sorter.Keys(h)) != 3 {
+ t.Fatal("wrong result")
+ }
+ }); allocs > 0 {
+ t.Logf("Keys allocs = %v; want <1", allocs)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 000000000..efd2e1282
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return 0
+
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 000000000..667867f4d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,77 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+)
+
+type contextContext interface{}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+ return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+ // Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace) {}
+func traceWroteHeaders(*clientTrace) {}
+func traceWroteRequest(*clientTrace, error) {}
+func traceGot100Continue(trace *clientTrace) {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req
+}
+
+// temporary copy of Go 1.6's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 000000000..53b7a1daf
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,153 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+func (p *pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, errClosedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go
new file mode 100644
index 000000000..763229999
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe_test.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestPipeClose(t *testing.T) {
+ var p pipe
+ p.b = new(bytes.Buffer)
+ a := errors.New("a")
+ b := errors.New("b")
+ p.CloseWithError(a)
+ p.CloseWithError(b)
+ _, err := p.Read(make([]byte, 1))
+ if err != a {
+ t.Errorf("err = %v want %v", err, a)
+ }
+}
+
+func TestPipeDoneChan(t *testing.T) {
+ var p pipe
+ done := p.Done()
+ select {
+ case <-done:
+ t.Fatal("done too soon")
+ default:
+ }
+ p.CloseWithError(io.EOF)
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_ErrFirst(t *testing.T) {
+ var p pipe
+ p.CloseWithError(io.EOF)
+ done := p.Done()
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_Break(t *testing.T) {
+ var p pipe
+ done := p.Done()
+ select {
+ case <-done:
+ t.Fatal("done too soon")
+ default:
+ }
+ p.BreakWithError(io.EOF)
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
+ var p pipe
+ p.BreakWithError(io.EOF)
+ done := p.Done()
+ select {
+ case <-done:
+ default:
+ t.Fatal("should be done")
+ }
+}
+
+func TestPipeCloseWithError(t *testing.T) {
+ p := &pipe{b: new(bytes.Buffer)}
+ const body = "foo"
+ io.WriteString(p, body)
+ a := errors.New("test error")
+ p.CloseWithError(a)
+ all, err := ioutil.ReadAll(p)
+ if string(all) != body {
+ t.Errorf("read bytes = %q; want %q", all, body)
+ }
+ if err != a {
+ t.Logf("read error = %v, %v", err, a)
+ }
+}
+
+func TestPipeBreakWithError(t *testing.T) {
+ p := &pipe{b: new(bytes.Buffer)}
+ io.WriteString(p, "foo")
+ a := errors.New("test err")
+ p.BreakWithError(a)
+ all, err := ioutil.ReadAll(p)
+ if string(all) != "" {
+ t.Errorf("read bytes = %q; want empty string", all)
+ }
+ if err != a {
+ t.Logf("read error = %v, %v", err, a)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/priority_test.go b/vendor/golang.org/x/net/http2/priority_test.go
new file mode 100644
index 000000000..a3fe2bb49
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/priority_test.go
@@ -0,0 +1,118 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "testing"
+)
+
+func TestPriority(t *testing.T) {
+ // A -> B
+ // move A's parent to B
+ streams := make(map[uint32]*stream)
+ a := &stream{
+ parent: nil,
+ weight: 16,
+ }
+ streams[1] = a
+ b := &stream{
+ parent: a,
+ weight: 16,
+ }
+ streams[2] = b
+ adjustStreamPriority(streams, 1, PriorityParam{
+ Weight: 20,
+ StreamDep: 2,
+ })
+ if a.parent != b {
+ t.Errorf("Expected A's parent to be B")
+ }
+ if a.weight != 20 {
+ t.Errorf("Expected A's weight to be 20; got %d", a.weight)
+ }
+ if b.parent != nil {
+ t.Errorf("Expected B to have no parent")
+ }
+ if b.weight != 16 {
+ t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+ }
+}
+
+func TestPriorityExclusiveZero(t *testing.T) {
+ // A B and C are all children of the 0 stream.
+ // Exclusive reprioritization to any of the streams
+ // should bring the rest of the streams under the
+ // reprioritized stream
+ streams := make(map[uint32]*stream)
+ a := &stream{
+ parent: nil,
+ weight: 16,
+ }
+ streams[1] = a
+ b := &stream{
+ parent: nil,
+ weight: 16,
+ }
+ streams[2] = b
+ c := &stream{
+ parent: nil,
+ weight: 16,
+ }
+ streams[3] = c
+ adjustStreamPriority(streams, 3, PriorityParam{
+ Weight: 20,
+ StreamDep: 0,
+ Exclusive: true,
+ })
+ if a.parent != c {
+ t.Errorf("Expected A's parent to be C")
+ }
+ if a.weight != 16 {
+ t.Errorf("Expected A's weight to be 16; got %d", a.weight)
+ }
+ if b.parent != c {
+ t.Errorf("Expected B's parent to be C")
+ }
+ if b.weight != 16 {
+ t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+ }
+ if c.parent != nil {
+ t.Errorf("Expected C to have no parent")
+ }
+ if c.weight != 20 {
+ t.Errorf("Expected C's weight to be 20; got %d", b.weight)
+ }
+}
+
+func TestPriorityOwnParent(t *testing.T) {
+ streams := make(map[uint32]*stream)
+ a := &stream{
+ parent: nil,
+ weight: 16,
+ }
+ streams[1] = a
+ b := &stream{
+ parent: a,
+ weight: 16,
+ }
+ streams[2] = b
+ adjustStreamPriority(streams, 1, PriorityParam{
+ Weight: 20,
+ StreamDep: 1,
+ })
+ if a.parent != nil {
+ t.Errorf("Expected A's parent to be nil")
+ }
+ if a.weight != 20 {
+ t.Errorf("Expected A's weight to be 20; got %d", a.weight)
+ }
+ if b.parent != a {
+ t.Errorf("Expected B's parent to be A")
+ }
+ if b.weight != 16 {
+ t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+ }
+
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 000000000..8206fa79d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2292 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable? or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if conf == nil {
+ conf = new(Server)
+ }
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil {
+ // If they already provided a CipherSuite list, return
+ // an error if it has a bad order or is missing
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
+ const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ haveRequired := false
+ sawBad := false
+ for i, cs := range s.TLSConfig.CipherSuites {
+ if cs == requiredCipher {
+ haveRequired = true
+ }
+ if isBadCipher(cs) {
+ sawBad = true
+ } else if sawBad {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+ // to switch to "h2".
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan frameWriteMsg, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ advMaxStreams: s.maxConcurrentStreams(),
+ writeSched: writeScheduler{
+ maxFrameSize: initialMaxFrameSize,
+ },
+ initialWindowSize: initialWindowSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+ fr := NewFramer(sc.bw, c)
+ fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ baseCtx contextContext
+ framer *Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func(int) // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curOpenStreams uint32 // client's number of open streams
+ maxStreamID uint32 // max ever seen
+ streams map[uint32]*stream
+ initialWindowSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ writeSched writeScheduler
+ inGoAway bool // we've started to or sent GOAWAY
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+ freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx contextContext
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ reqBuf []byte
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://http2.github.io/http2-spec/#rfc.section.5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID <= sc.maxStreamID {
+ return stateClosed, nil
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ gate := make(gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ wm frameWriteMsg // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+ err := wm.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wm, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(frameWriteMsg{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+
+ // TODO: more actual settings, notably
+ // SettingInitialWindowSize, but then we also
+ // want to bump up the conn window size the
+ // same amount here right after the settings
+ },
+ })
+ sc.unackedSettings++
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.NewTimer(firstSettingsTimeout)
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wm := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wm)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer.C != nil {
+ settingsTimer.Stop()
+ settingsTimer.C = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case <-settingsTimer.C:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case <-sc.shutdownTimerCh:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case fn := <-sc.testHookCh:
+ fn(loopNum)
+ }
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wm:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+ sc.serveG.check()
+
+ var ignoreWrite bool
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wm.write.(type) {
+ case *writeResHeaders:
+ wm.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wm.stream.wroteHeaders {
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ sc.writeSched.add(wm)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wm.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ panic("internal error: attempt to send frame on half-closed-local stream")
+ case stateClosed:
+ if st.sentReset || st.gotReset {
+ // Skip this frame.
+ sc.scheduleFrameWrite()
+ return
+ }
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ go sc.writeFrameAsync(wm)
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+
+ wm := res.wm
+ st := wm.stream
+
+ closeStream := endsStream(wm.write)
+
+ if _, ok := wm.write.(handlerPanicRST); ok {
+ sc.closeStream(st, errHandlerPanicked)
+ }
+
+ // Reply (if requested) to the blocked ServeHTTP goroutine.
+ if ch := wm.done; ch != nil {
+ select {
+ case ch <- res.err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ }
+ }
+ wm.write = nil // prevent use (assume it's tainted after wm.done send)
+
+ if closeStream {
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for finishing writing to a ResponseWriter
+ // while still reading data (see possible TODO
+ // at top of this file), we go into closed
+ // state here anyway, after telling the peer
+ // we're hanging up on them.
+ st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+ errCancel := streamError(st.id, ErrCodeCancel)
+ sc.resetStream(errCancel)
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ }
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame {
+ return
+ }
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(frameWriteMsg{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ return
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ return
+ }
+ if !sc.inGoAway {
+ if wm, ok := sc.writeSched.take(); ok {
+ sc.startFrameWrite(wm)
+ return
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ return
+ }
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if code != ErrCodeNo {
+ sc.shutDownIn(250 * time.Millisecond)
+ } else {
+ // TODO: configurable
+ sc.shutDownIn(1 * time.Second)
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.NewTimer(d)
+ sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(frameWriteMsg{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.sentReset = true
+ sc.closeStream(st, se)
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ st := sc.streams[f.StreamID]
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return streamError(f.StreamID, ErrCodeFlowControl)
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.gotReset = true
+ st.cancelCtx()
+ sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ sc.curOpenStreams--
+ if sc.curOpenStreams == 0 {
+ sc.setConnState(http.StateIdle)
+ }
+ delete(sc.streams, st.id)
+ if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.forgetStream(st.id)
+ if st.reqBuf != nil {
+ // Stash this request body buffer (64k) away for reuse
+ // by a future POST/PUT/etc.
+ //
+ // TODO(bradfitz): share on the server? sync.Pool?
+ // Server requires locks and might hurt contention.
+ // sync.Pool might work, or might be worse, depending
+ // on goroutine CPU migrations. (get and put on
+ // separate CPUs). Maybe a mix of strategies. But
+ // this is an easy win for now.
+ sc.freeRequestBodyBuf = st.reqBuf
+ }
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.writeSched.maxFrameSize = s.Val
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialWindowSize
+ sc.initialWindowSize = int32(val)
+ growth := sc.initialWindowSize - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ data := f.Data()
+
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ st, ok := sc.streams[id]
+ if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if sc.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
+ }
+ // Deduct the flow control from inflow, since we're
+ // going to immediately add it back in
+ // sendWindowUpdate, which also schedules sending the
+ // frames.
+ sc.inflow.take(int32(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if f.Length > 0 {
+ // Check whether the client has flow control quota.
+ if st.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
+ }
+ st.inflow.take(int32(f.Length))
+
+ if len(data) > 0 {
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
+ }
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ st := sc.streams[f.Header().StreamID]
+ if st != nil {
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxStreamID {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.maxStreamID = id
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st = &stream{
+ sc: sc,
+ id: id,
+ state: stateOpen,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ if f.StreamEnded() {
+ st.state = stateHalfClosedRemote
+ }
+ st.cw.Init()
+
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+ sc.streams[id] = st
+ if f.HasPriority() {
+ adjustStreamPriority(sc.streams, st.id, f.Priority)
+ }
+ sc.curOpenStreams++
+ if sc.curOpenStreams == 1 {
+ sc.setConnState(http.StateActive)
+ }
+ if sc.curOpenStreams > sc.advMaxStreams {
+ // "Endpoints MUST NOT exceed the limit set by their
+ // peer. An endpoint that receives a HEADERS frame
+ // that causes their advertised concurrent stream
+ // limit to be exceeded MUST treat this as a stream
+ // error (Section 5.4.2) of type PROTOCOL_ERROR or
+ // REFUSED_STREAM."
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return streamError(st.id, ErrCodeRefusedStream)
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2Request(req); err != nil {
+ handler = new400Handler(err)
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+ st, ok := streams[streamID]
+ if !ok {
+ // TODO: not quite correct (this streamID might
+ // already exist in the dep tree, but be closed), but
+ // close enough for now.
+ return
+ }
+ st.weight = priority.Weight
+ parent := streams[priority.StreamDep] // might be nil
+ if parent == st {
+ // if client tries to set this stream to be the parent of itself
+ // ignore and keep going
+ return
+ }
+
+ // section 5.3.3: If a stream is made dependent on one of its
+ // own dependencies, the formerly dependent stream is first
+ // moved to be dependent on the reprioritized stream's previous
+ // parent. The moved dependency retains its weight.
+ for piter := parent; piter != nil; piter = piter.parent {
+ if piter == st {
+ parent.parent = st.parent
+ break
+ }
+ }
+ st.parent = parent
+ if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+ for _, openStream := range streams {
+ if openStream != st && openStream.parent == st.parent {
+ openStream.parent = st
+ }
+ }
+ }
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ method := f.PseudoValue("method")
+ path := f.PseudoValue("path")
+ scheme := f.PseudoValue("scheme")
+ authority := f.PseudoValue("authority")
+
+ isConnect := method == "CONNECT"
+ if isConnect {
+ if path != "" || scheme != "" || authority == "" {
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ } else if method == "" || path == "" ||
+ (scheme != "https" && scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ var tlsState *tls.ConnectionState // nil if not scheme https
+
+ if scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ header := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+
+ if authority == "" {
+ authority = header.Get("Host")
+ }
+ needsContinue := header.Get("Expect") == "100-continue"
+ if needsContinue {
+ header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := header["Cookie"]; len(cookies) > 1 {
+ header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(header, "Trailer")
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ var url_ *url.URL
+ var requestURI string
+ if isConnect {
+ url_ = &url.URL{Host: authority}
+ requestURI = authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ requestURI = path
+ }
+ req := &http.Request{
+ Method: method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = requestWithContext(req, st.ctx)
+ if bodyOpen {
+ // Disabled, per golang.org/issue/14960:
+ // st.reqBuf = sc.getRequestBodyBuf()
+ // TODO: remove this 64k of garbage per request (again, but without a data race):
+ buf := make([]byte, initialWindowSize)
+
+ body.pipe = &pipe{
+ b: &fixedBuffer{buf: buf},
+ }
+
+ if vv, ok := header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ }
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ rws.body = body
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+func (sc *serverConn) getRequestBodyBuf() []byte {
+ sc.serveG.check()
+ if buf := sc.freeRequestBodyBuf; buf != nil {
+ sc.freeRequestBodyBuf = nil
+ return buf
+ }
+ return make([]byte, initialWindowSize)
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ // Same as net/http:
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+ sc.serveG.checkNotOn() // NOT on
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(frameWriteMsg{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if n > 0 {
+ b.conn.noteBodyReadFromHandler(b.stream, n)
+ }
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !ValidTrailerHeader(k) {
+ // Forbidden by RFC 2616 14.40.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ go func() {
+ rws.stream.cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2Request(req *http.Request) error {
+ for _, h := range connHeaders {
+ if _, ok := req.Header[h]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+ }
+ }
+ te := req.Header["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = http.CanonicalHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go
new file mode 100644
index 000000000..879e82135
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server_test.go
@@ -0,0 +1,3368 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
+
+func stderrv() io.Writer {
+ if *stderrVerbose {
+ return os.Stderr
+ }
+
+ return ioutil.Discard
+}
+
+type serverTester struct {
+ cc net.Conn // client conn
+ t testing.TB
+ ts *httptest.Server
+ fr *Framer
+ logBuf *bytes.Buffer
+ logFilter []string // substrings to filter out
+ scMu sync.Mutex // guards sc
+ sc *serverConn
+ hpackDec *hpack.Decoder
+ decodedHeaders [][2]string
+
+ // writing headers:
+ headerBuf bytes.Buffer
+ hpackEnc *hpack.Encoder
+}
+
+func init() {
+ testHookOnPanicMu = new(sync.Mutex)
+}
+
+func resetHooks() {
+ testHookOnPanicMu.Lock()
+ testHookOnPanic = nil
+ testHookOnPanicMu.Unlock()
+}
+
+type serverTesterOpt string
+
+var optOnlyServer = serverTesterOpt("only_server")
+var optQuiet = serverTesterOpt("quiet_logging")
+
+func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
+ resetHooks()
+
+ logBuf := new(bytes.Buffer)
+ ts := httptest.NewUnstartedServer(handler)
+
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: true,
+ // The h2-14 is temporary, until curl is updated. (as used by unit tests
+ // in Docker)
+ NextProtos: []string{NextProtoTLS, "h2-14"},
+ }
+
+ var onlyServer, quiet bool
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case func(*tls.Config):
+ v(tlsConfig)
+ case func(*httptest.Server):
+ v(ts)
+ case serverTesterOpt:
+ switch v {
+ case optOnlyServer:
+ onlyServer = true
+ case optQuiet:
+ quiet = true
+ }
+ case func(net.Conn, http.ConnState):
+ ts.Config.ConnState = v
+ default:
+ t.Fatalf("unknown newServerTester option type %T", v)
+ }
+ }
+
+ ConfigureServer(ts.Config, &Server{})
+
+ st := &serverTester{
+ t: t,
+ ts: ts,
+ logBuf: logBuf,
+ }
+ st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
+ st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)
+
+ ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+ if quiet {
+ ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
+ } else {
+ ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, logBuf), "", log.LstdFlags)
+ }
+ ts.StartTLS()
+
+ if VerboseLogs {
+ t.Logf("Running test server at: %s", ts.URL)
+ }
+ testHookGetServerConn = func(v *serverConn) {
+ st.scMu.Lock()
+ defer st.scMu.Unlock()
+ st.sc = v
+ st.sc.testHookCh = make(chan func(int))
+ }
+ log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))
+ if !onlyServer {
+ cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ st.cc = cc
+ st.fr = NewFramer(cc, cc)
+ }
+ return st
+}
+
+func (st *serverTester) closeConn() {
+ st.scMu.Lock()
+ defer st.scMu.Unlock()
+ st.sc.conn.Close()
+}
+
+func (st *serverTester) addLogFilter(phrase string) {
+ st.logFilter = append(st.logFilter, phrase)
+}
+
+func (st *serverTester) stream(id uint32) *stream {
+ ch := make(chan *stream, 1)
+ st.sc.testHookCh <- func(int) {
+ ch <- st.sc.streams[id]
+ }
+ return <-ch
+}
+
+func (st *serverTester) streamState(id uint32) streamState {
+ ch := make(chan streamState, 1)
+ st.sc.testHookCh <- func(int) {
+ state, _ := st.sc.state(id)
+ ch <- state
+ }
+ return <-ch
+}
+
+// loopNum reports how many times this conn's select loop has gone around.
+func (st *serverTester) loopNum() int {
+ lastc := make(chan int, 1)
+ st.sc.testHookCh <- func(loopNum int) {
+ lastc <- loopNum
+ }
+ return <-lastc
+}
+
+// awaitIdle heuristically awaits for the server conn's select loop to be idle.
+// The heuristic is that the server connection's serve loop must schedule
+// 50 times in a row without any channel sends or receives occurring.
+func (st *serverTester) awaitIdle() {
+ remain := 50
+ last := st.loopNum()
+ for remain > 0 {
+ n := st.loopNum()
+ if n == last+1 {
+ remain--
+ } else {
+ remain = 50
+ }
+ last = n
+ }
+}
+
+func (st *serverTester) Close() {
+ if st.t.Failed() {
+ // If we failed already (and are likely in a Fatal,
+ // unwindowing), force close the connection, so the
+ // httptest.Server doesn't wait forever for the conn
+ // to close.
+ if st.cc != nil {
+ st.cc.Close()
+ }
+ }
+ st.ts.Close()
+ if st.cc != nil {
+ st.cc.Close()
+ }
+ log.SetOutput(os.Stderr)
+}
+
+// greet initiates the client's HTTP/2 connection into a state where
+// frames may be sent.
+func (st *serverTester) greet() {
+ st.writePreface()
+ st.writeInitialSettings()
+ st.wantSettings()
+ st.writeSettingsAck()
+ st.wantSettingsAck()
+}
+
+func (st *serverTester) writePreface() {
+ n, err := st.cc.Write(clientPreface)
+ if err != nil {
+ st.t.Fatalf("Error writing client preface: %v", err)
+ }
+ if n != len(clientPreface) {
+ st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
+ }
+}
+
+func (st *serverTester) writeInitialSettings() {
+ if err := st.fr.WriteSettings(); err != nil {
+ st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
+ }
+}
+
+func (st *serverTester) writeSettingsAck() {
+ if err := st.fr.WriteSettingsAck(); err != nil {
+ st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
+ }
+}
+
+func (st *serverTester) writeHeaders(p HeadersFrameParam) {
+ if err := st.fr.WriteHeaders(p); err != nil {
+ st.t.Fatalf("Error writing HEADERS: %v", err)
+ }
+}
+
+func (st *serverTester) encodeHeaderField(k, v string) {
+ err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ if err != nil {
+ st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+}
+
+// encodeHeaderRaw is the magic-free version of encodeHeader.
+// It takes 0 or more (k, v) pairs and encodes them.
+func (st *serverTester) encodeHeaderRaw(headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+ st.headerBuf.Reset()
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ st.encodeHeaderField(k, v)
+ headers = headers[2:]
+ }
+ return st.headerBuf.Bytes()
+}
+
+// encodeHeader encodes headers and returns their HPACK bytes. headers
+// must contain an even number of key/value pairs. There may be
+// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
+// :scheme headers default to GET, / and https.
+func (st *serverTester) encodeHeader(headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+
+ st.headerBuf.Reset()
+
+ if len(headers) == 0 {
+ // Fast path, mostly for benchmarks, so test code doesn't pollute
+ // profiles when we're looking to improve server allocations.
+ st.encodeHeaderField(":method", "GET")
+ st.encodeHeaderField(":path", "/")
+ st.encodeHeaderField(":scheme", "https")
+ return st.headerBuf.Bytes()
+ }
+
+ if len(headers) == 2 && headers[0] == ":method" {
+ // Another fast path for benchmarks.
+ st.encodeHeaderField(":method", headers[1])
+ st.encodeHeaderField(":path", "/")
+ st.encodeHeaderField(":scheme", "https")
+ return st.headerBuf.Bytes()
+ }
+
+ pseudoCount := map[string]int{}
+ keys := []string{":method", ":path", ":scheme"}
+ vals := map[string][]string{
+ ":method": {"GET"},
+ ":path": {"/"},
+ ":scheme": {"https"},
+ }
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ headers = headers[2:]
+ if _, ok := vals[k]; !ok {
+ keys = append(keys, k)
+ }
+ if strings.HasPrefix(k, ":") {
+ pseudoCount[k]++
+ if pseudoCount[k] == 1 {
+ vals[k] = []string{v}
+ } else {
+ // Allows testing of invalid headers w/ dup pseudo fields.
+ vals[k] = append(vals[k], v)
+ }
+ } else {
+ vals[k] = append(vals[k], v)
+ }
+ }
+ for _, k := range keys {
+ for _, v := range vals[k] {
+ st.encodeHeaderField(k, v)
+ }
+ }
+ return st.headerBuf.Bytes()
+}
+
+// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
+func (st *serverTester) bodylessReq1(headers ...string) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(headers...),
+ EndStream: true,
+ EndHeaders: true,
+ })
+}
+
+func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
+ if err := st.fr.WriteData(streamID, endStream, data); err != nil {
+ st.t.Fatalf("Error writing DATA: %v", err)
+ }
+}
+
+func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
+ if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
+ st.t.Fatalf("Error writing DATA: %v", err)
+ }
+}
+
+func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) {
+ ch := make(chan interface{}, 1)
+ go func() {
+ fr, err := fr.ReadFrame()
+ if err != nil {
+ ch <- err
+ } else {
+ ch <- fr
+ }
+ }()
+ t := time.NewTimer(wait)
+ select {
+ case v := <-ch:
+ t.Stop()
+ if fr, ok := v.(Frame); ok {
+ return fr, nil
+ }
+ return nil, v.(error)
+ case <-t.C:
+ return nil, errors.New("timeout waiting for frame")
+ }
+}
+
+func (st *serverTester) readFrame() (Frame, error) {
+ return readFrameTimeout(st.fr, 2*time.Second)
+}
+
+func (st *serverTester) wantHeaders() *HeadersFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *HeadersFrame", f)
+ }
+ return hf
+}
+
+func (st *serverTester) wantContinuation() *ContinuationFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
+ }
+ cf, ok := f.(*ContinuationFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *ContinuationFrame", f)
+ }
+ return cf
+}
+
+func (st *serverTester) wantData() *DataFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a DATA frame: %v", err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *DataFrame", f)
+ }
+ return df
+}
+
+func (st *serverTester) wantSettings() *SettingsFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *SettingsFrame", f)
+ }
+ return sf
+}
+
+func (st *serverTester) wantPing() *PingFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a PING frame: %v", err)
+ }
+ pf, ok := f.(*PingFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *PingFrame", f)
+ }
+ return pf
+}
+
+func (st *serverTester) wantGoAway() *GoAwayFrame {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
+ }
+ gf, ok := f.(*GoAwayFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *GoAwayFrame", f)
+ }
+ return gf
+}
+
+func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
+ }
+ rs, ok := f.(*RSTStreamFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
+ }
+ if rs.FrameHeader.StreamID != streamID {
+ st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
+ }
+ if rs.ErrCode != errCode {
+ st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
+ }
+}
+
+func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
+ }
+ wu, ok := f.(*WindowUpdateFrame)
+ if !ok {
+ st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
+ }
+ if wu.FrameHeader.StreamID != streamID {
+ st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
+ }
+ if wu.Increment != incr {
+ st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
+ }
+}
+
+func (st *serverTester) wantSettingsAck() {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ st.t.Fatalf("Wanting a settings ACK, received a %T", f)
+ }
+ if !sf.Header().Flags.Has(FlagSettingsAck) {
+ st.t.Fatal("Settings Frame didn't have ACK set")
+ }
+
+}
+
+func TestServer(t *testing.T) {
+ gotReq := make(chan bool, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Foo", "Bar")
+ gotReq <- true
+ })
+ defer st.Close()
+
+ covers("3.5", `
+ The server connection preface consists of a potentially empty
+ SETTINGS frame ([SETTINGS]) that MUST be the first frame the
+ server sends in the HTTP/2 connection.
+ `)
+
+ st.writePreface()
+ st.writeInitialSettings()
+ st.wantSettings()
+ st.writeSettingsAck()
+ st.wantSettingsAck()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+
+ select {
+ case <-gotReq:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for request")
+ }
+}
+
+func TestServer_Request_Get(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader("foo-bar", "some-value"),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Method != "GET" {
+ t.Errorf("Method = %q; want GET", r.Method)
+ }
+ if r.URL.Path != "/" {
+ t.Errorf("URL.Path = %q; want /", r.URL.Path)
+ }
+ if r.ContentLength != 0 {
+ t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+ }
+ if r.Close {
+ t.Error("Close = true; want false")
+ }
+ if !strings.Contains(r.RemoteAddr, ":") {
+ t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
+ }
+ if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
+ t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
+ }
+ wantHeader := http.Header{
+ "Foo-Bar": []string{"some-value"},
+ }
+ if !reflect.DeepEqual(r.Header, wantHeader) {
+ t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+ }
+ if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+ t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+ }
+ })
+}
+
+func TestServer_Request_Get_PathSlashes(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":path", "/%2f/"),
+ EndStream: true, // no DATA frames
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.RequestURI != "/%2f/" {
+ t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
+ }
+ if r.URL.Path != "///" {
+ t.Errorf("URL.Path = %q; want ///", r.URL.Path)
+ }
+ })
+}
+
+// TODO: add a test with EndStream=true on the HEADERS but setting a
+// Content-Length anyway. Should we just omit it and force it to
+// zero?
+
+func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != 0 {
+ t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+ }
+ if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+ t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+ }
+ })
+}
+
+func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
+ testBodyContents(t, -1, "", func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, nil) // just kidding. empty body.
+ })
+}
+
+func TestServer_Request_Post_Body_OneData(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, -1, content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte(content))
+ })
+}
+
+func TestServer_Request_Post_Body_TwoData(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, -1, content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte(content[:5]))
+ st.writeData(1, true, []byte(content[5:]))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
+ const content = "Some content"
+ testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", strconv.Itoa(len(content)),
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte(content))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
+ testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
+ func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", "3",
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte("12"))
+ })
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
+ testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
+ func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(
+ ":method", "POST",
+ "content-length", "4",
+ ),
+ EndStream: false, // to say DATA frames are coming
+ EndHeaders: true,
+ })
+ st.writeData(1, true, []byte("12345"))
+ })
+}
+
+func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
+ testServerRequest(t, write, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != wantContentLength {
+ t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+ }
+ all, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(all) != wantBody {
+ t.Errorf("Read = %q; want %q", all, wantBody)
+ }
+ if err := r.Body.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ })
+}
+
+func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
+ testServerRequest(t, write, func(r *http.Request) {
+ if r.Method != "POST" {
+ t.Errorf("Method = %q; want POST", r.Method)
+ }
+ if r.ContentLength != wantContentLength {
+ t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+ }
+ all, err := ioutil.ReadAll(r.Body)
+ if err == nil {
+ t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
+ wantReadError, all)
+ }
+ if !strings.Contains(err.Error(), wantReadError) {
+ t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
+ }
+ if err := r.Body.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ })
+}
+
+// Using a Host header, instead of :authority
+func TestServer_Request_Get_Host(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader("host", host),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Host != host {
+ t.Errorf("Host = %q; want %q", r.Host, host)
+ }
+ })
+}
+
+// Using an :authority pseudo-header, instead of Host
+func TestServer_Request_Get_Authority(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":authority", host),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if r.Host != host {
+ t.Errorf("Host = %q; want %q", r.Host, host)
+ }
+ })
+}
+
+func TestServer_Request_WithContinuation(t *testing.T) {
+ wantHeader := http.Header{
+ "Foo-One": []string{"value-one"},
+ "Foo-Two": []string{"value-two"},
+ "Foo-Three": []string{"value-three"},
+ }
+ testServerRequest(t, func(st *serverTester) {
+ fullHeaders := st.encodeHeader(
+ "foo-one", "value-one",
+ "foo-two", "value-two",
+ "foo-three", "value-three",
+ )
+ remain := fullHeaders
+ chunks := 0
+ for len(remain) > 0 {
+ const maxChunkSize = 5
+ chunk := remain
+ if len(chunk) > maxChunkSize {
+ chunk = chunk[:maxChunkSize]
+ }
+ remain = remain[len(chunk):]
+
+ if chunks == 0 {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: chunk,
+ EndStream: true, // no DATA frames
+ EndHeaders: false, // we'll have continuation frames
+ })
+ } else {
+ err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ chunks++
+ }
+ if chunks < 2 {
+ t.Fatal("too few chunks")
+ }
+ }, func(r *http.Request) {
+ if !reflect.DeepEqual(r.Header, wantHeader) {
+ t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+ }
+ })
+}
+
+// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
+func TestServer_Request_CookieConcat(t *testing.T) {
+ const host = "example.com"
+ testServerRequest(t, func(st *serverTester) {
+ st.bodylessReq1(
+ ":authority", host,
+ "cookie", "a=b",
+ "cookie", "c=d",
+ "cookie", "e=f",
+ )
+ }, func(r *http.Request) {
+ const want = "a=b; c=d; e=f"
+ if got := r.Header.Get("Cookie"); got != want {
+ t.Errorf("Cookie = %q; want %q", got, want)
+ }
+ })
+}
+
+func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") })
+}
+
+func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid value" ...
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter("duplicate pseudo-header")
+ st.bodylessReq1(":method", "GET", ":method", "POST")
+ })
+}
+
+func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All pseudo-header fields MUST appear in the header block
+ // before regular header fields. Any request or response that
+ // contains a pseudo-header field that appears in a header
+ // block after a regular header field MUST be treated as
+ // malformed (Section 8.1.2.6)."
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter("pseudo-header after regular header")
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
+ enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
+ enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
+ enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: buf.Bytes(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
+}
+
+func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
+ testRejectRequest(t, func(st *serverTester) {
+ st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
+ st.bodylessReq1(":unknown_thing", "")
+ })
+}
+
+func testRejectRequest(t *testing.T, send func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ t.Fatal("server request made it to handler; should've been rejected")
+ })
+ defer st.Close()
+
+ st.greet()
+ send(st)
+ st.wantRSTStream(1, ErrCodeProtocol)
+}
+
+func TestServer_Request_Connect(t *testing.T) {
+ testServerRequest(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }, func(r *http.Request) {
+ if g, w := r.Method, "CONNECT"; g != w {
+ t.Errorf("Method = %q; want %q", g, w)
+ }
+ if g, w := r.RequestURI, "example.com:123"; g != w {
+ t.Errorf("RequestURI = %q; want %q", g, w)
+ }
+ if g, w := r.URL.Host, "example.com:123"; g != w {
+ t.Errorf("URL.Host = %q; want %q", g, w)
+ }
+ })
+}
+
+func TestServer_Request_Connect_InvalidPath(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ":path", "/bogus",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Request_Connect_InvalidScheme(t *testing.T) {
+ testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeaderRaw(
+ ":method", "CONNECT",
+ ":authority", "example.com:123",
+ ":scheme", "https",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+func TestServer_Ping(t *testing.T) {
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+
+ // Server should ignore this one, since it has ACK set.
+ ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
+ if err := st.fr.WritePing(true, ackPingData); err != nil {
+ t.Fatal(err)
+ }
+
+ // But the server should reply to this one, since ACK is false.
+ pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
+ if err := st.fr.WritePing(false, pingData); err != nil {
+ t.Fatal(err)
+ }
+
+ pf := st.wantPing()
+ if !pf.Flags.Has(FlagPingAck) {
+ t.Error("response ping doesn't have ACK set")
+ }
+ if pf.Data != pingData {
+ t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
+ }
+}
+
+func TestServer_RejectsLargeFrames(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("see golang.org/issue/13434")
+ }
+
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+
+ // Write too large of a frame (too large by one byte)
+ // We ignore the return value because it's expected that the server
+ // will only read the first 9 bytes (the headre) and then disconnect.
+ st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
+
+ gf := st.wantGoAway()
+ if gf.ErrCode != ErrCodeFrameSize {
+ t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
+ }
+ if st.logBuf.Len() != 0 {
+ // Previously we spun here for a bit until the GOAWAY disconnect
+ // timer fired, logging while we fired.
+ t.Errorf("unexpected server output: %.500s\n", st.logBuf.Bytes())
+ }
+}
+
+func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
+ puppet := newHandlerPuppet()
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ puppet.act(w, r)
+ })
+ defer st.Close()
+ defer puppet.done()
+
+ st.greet()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // data coming
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte("abcdef"))
+ puppet.do(readBodyHandler(t, "abc"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ puppet.do(readBodyHandler(t, "def"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
+ puppet.do(readBodyHandler(t, "ghi"))
+ puppet.do(readBodyHandler(t, "jkl"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
+}
+
+// the version of the TestServer_Handler_Sends_WindowUpdate with padding.
+// See golang.org/issue/16556
+func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
+ puppet := newHandlerPuppet()
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ puppet.act(w, r)
+ })
+ defer st.Close()
+ defer puppet.done()
+
+ st.greet()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeDataPadded(1, false, []byte("abcdef"), []byte("1234"))
+
+ // Expect to immediately get our 5 bytes of padding back for
+ // both the connection and stream (4 bytes of padding + 1 byte of length)
+ st.wantWindowUpdate(0, 5)
+ st.wantWindowUpdate(1, 5)
+
+ puppet.do(readBodyHandler(t, "abc"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+
+ puppet.do(readBodyHandler(t, "def"))
+ st.wantWindowUpdate(0, 3)
+ st.wantWindowUpdate(1, 3)
+}
+
+func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
+ st := newServerTester(t, nil)
+ defer st.Close()
+ st.greet()
+ if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
+ t.Fatal(err)
+ }
+ gf := st.wantGoAway()
+ if gf.ErrCode != ErrCodeFlowControl {
+ t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
+ }
+ if gf.LastStreamID != 0 {
+ t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
+ }
+}
+
+func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
+ inHandler := make(chan bool)
+ blockHandler := make(chan bool)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ <-blockHandler
+ })
+ defer st.Close()
+ defer close(blockHandler)
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ // Send a bogus window update:
+ if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
+ t.Fatal(err)
+ }
+ st.wantRSTStream(1, ErrCodeFlowControl)
+}
+
+// testServerPostUnblock sends a hanging POST with unsent data to handler,
+// then runs fn once in the handler, and verifies that the error returned from
+// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
+func testServerPostUnblock(t *testing.T,
+ handler func(http.ResponseWriter, *http.Request) error,
+ fn func(*serverTester),
+ checkErr func(error),
+ otherHeaders ...string) {
+ inHandler := make(chan bool)
+ errc := make(chan error, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ errc <- handler(w, r)
+ })
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ fn(st)
+ select {
+ case err := <-errc:
+ if checkErr != nil {
+ checkErr(err)
+ }
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for Handler to return")
+ }
+ st.Close()
+}
+
+func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
+ testServerPostUnblock(t,
+ func(w http.ResponseWriter, r *http.Request) (err error) {
+ _, err = r.Body.Read(make([]byte, 1))
+ return
+ },
+ func(st *serverTester) {
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ },
+ func(err error) {
+ want := StreamError{StreamID: 0x1, Code: 0x8}
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("Read error = %v; want %v", err, want)
+ }
+ },
+ )
+}
+
+func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
+ // Run this test a bunch, because it doesn't always
+ // deadlock. But with a bunch, it did.
+ n := 50
+ if testing.Short() {
+ n = 5
+ }
+ for i := 0; i < n; i++ {
+ testServer_RSTStream_Unblocks_Header_Write(t)
+ }
+}
+
+func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
+ inHandler := make(chan bool, 1)
+ unblockHandler := make(chan bool, 1)
+ headerWritten := make(chan bool, 1)
+ wroteRST := make(chan bool, 1)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ <-wroteRST
+ w.Header().Set("foo", "bar")
+ w.WriteHeader(200)
+ w.(http.Flusher).Flush()
+ headerWritten <- true
+ <-unblockHandler
+ })
+ defer st.Close()
+
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ wroteRST <- true
+ st.awaitIdle()
+ select {
+ case <-headerWritten:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for header write")
+ }
+ unblockHandler <- true
+}
+
+func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
+ testServerPostUnblock(t,
+ func(w http.ResponseWriter, r *http.Request) (err error) {
+ _, err = r.Body.Read(make([]byte, 1))
+ return
+ },
+ func(st *serverTester) { st.cc.Close() },
+ func(err error) {
+ if err == nil {
+ t.Error("unexpected nil error from Request.Body.Read")
+ }
+ },
+ )
+}
+
+var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
+ <-w.(http.CloseNotifier).CloseNotify()
+ return nil
+}
+
+func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ }, nil)
+}
+
+func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
+}
+
+// that CloseNotify unblocks after a stream error due to the client's
+// problem that's unrelated to them explicitly canceling it (which is
+// TestServer_CloseNotify_After_RSTStream above)
+func TestServer_CloseNotify_After_StreamError(t *testing.T) {
+ testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+ // data longer than declared Content-Length => stream error
+ st.writeData(1, true, []byte("1234"))
+ }, nil, "content-length", "3")
+}
+
+func TestServer_StateTransitions(t *testing.T) {
+ var st *serverTester
+ inHandler := make(chan bool)
+ writeData := make(chan bool)
+ leaveHandler := make(chan bool)
+ st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ inHandler <- true
+ if st.stream(1) == nil {
+ t.Errorf("nil stream 1 in handler")
+ }
+ if got, want := st.streamState(1), stateOpen; got != want {
+ t.Errorf("in handler, state is %v; want %v", got, want)
+ }
+ writeData <- true
+ if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
+ t.Errorf("body read = %d, %v; want 0, EOF", n, err)
+ }
+ if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
+ t.Errorf("in handler, state is %v; want %v", got, want)
+ }
+
+ <-leaveHandler
+ })
+ st.greet()
+ if st.stream(1) != nil {
+ t.Fatal("stream 1 should be empty")
+ }
+ if got := st.streamState(1); got != stateIdle {
+ t.Fatalf("stream 1 should be idle; got %v", got)
+ }
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false, // keep it open
+ EndHeaders: true,
+ })
+ <-inHandler
+ <-writeData
+ st.writeData(1, true, nil)
+
+ leaveHandler <- true
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("expected END_STREAM flag")
+ }
+
+ if got, want := st.streamState(1), stateClosed; got != want {
+ t.Errorf("at end, state is %v; want %v", got, want)
+ }
+ if st.stream(1) != nil {
+ t.Fatal("at end, stream 1 should be gone")
+ }
+}
+
+// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ st.writeHeaders(HeadersFrameParam{ // Not a continuation.
+ StreamID: 3, // different stream.
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+// test HEADERS w/o EndHeaders + PING (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ if err := st.fr.WritePing(false, [8]byte{}); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
+func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
+func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+ if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// No HEADERS on stream 0.
+func TestServer_Rejects_Headers0(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 0,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ })
+}
+
+// No CONTINUATION on stream 0.
+func TestServer_Rejects_Continuation0(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ st.fr.AllowIllegalWrites = true
+ if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+func TestServer_Rejects_PushPromise(t *testing.T) {
+ testServerRejectsConn(t, func(st *serverTester) {
+ pp := PushPromiseParam{
+ StreamID: 1,
+ PromiseID: 3,
+ }
+ if err := st.fr.WritePushPromise(pp); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// testServerRejectsConn tests that the server hangs up with a GOAWAY
+// frame and a server close after the client does something
+// deserving a CONNECTION_ERROR.
+func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ st.addLogFilter("connection error: PROTOCOL_ERROR")
+ defer st.Close()
+ st.greet()
+ writeReq(st)
+
+ st.wantGoAway()
+ errc := make(chan error, 1)
+ go func() {
+ fr, err := st.fr.ReadFrame()
+ if err == nil {
+ err = fmt.Errorf("got frame of type %T", fr)
+ }
+ errc <- err
+ }()
+ select {
+ case err := <-errc:
+ if err != io.EOF {
+ t.Errorf("ReadFrame = %v; want io.EOF", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for disconnect")
+ }
+}
+
+// testServerRejectsStream tests that the server sends a RST_STREAM with the provided
+// error code after a client sends a bogus request.
+func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ defer st.Close()
+ st.greet()
+ writeReq(st)
+ st.wantRSTStream(1, code)
+}
+
+// testServerRequest sets up an idle HTTP/2 connection and lets you
+// write a single request with writeReq, and then verify that the
+// *http.Request is built correctly in checkReq.
+func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
+ gotReq := make(chan bool, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ if r.Body == nil {
+ t.Fatal("nil Body")
+ }
+ checkReq(r)
+ gotReq <- true
+ })
+ defer st.Close()
+
+ st.greet()
+ writeReq(st)
+
+ select {
+ case <-gotReq:
+ case <-time.After(2 * time.Second):
+ t.Error("timeout waiting for request")
+ }
+}
+
+func getSlash(st *serverTester) { st.bodylessReq1() }
+
+func TestServer_Response_NoData(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ // Nothing.
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("want END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ })
+}
+
+func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Foo-Bar", "some-value")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Fatal("want END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo-bar", "some-value"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "0"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Content-Type", "foo/bar")
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("don't want END_STREAM, expecting data")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "foo/bar"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("expected DATA to have END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ })
+}
+
+func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
+ const msg = "hi"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+// Header accessed only after the initial write.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ w.Header().Set("foo", "should be ignored")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+// Header accessed before the initial write and later mutated.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("foo", "proper value")
+ io.WriteString(w, msg)
+ w.Header().Set("foo", "should be ignored")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo", "proper value"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ })
+}
+
+func TestServer_Response_Data_SniffLenType(t *testing.T) {
+ const msg = "<html>this is HTML."
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("don't want END_STREAM, expecting data")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(msg))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("expected DATA to have END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ })
+}
+
+func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
+ const msg = "<html>this is HTML"
+ const msg2 = ", and this is the next chunk"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+ io.WriteString(w, msg2)
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/html; charset=utf-8"}, // sniffed
+ // and no content-length
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ {
+ df := st.wantData()
+ if df.StreamEnded() {
+ t.Error("unexpected END_STREAM flag")
+ }
+ if got := string(df.Data()); got != msg {
+ t.Errorf("got DATA %q; want %q", got, msg)
+ }
+ }
+ {
+ df := st.wantData()
+ if !df.StreamEnded() {
+ t.Error("wanted END_STREAM flag on last data chunk")
+ }
+ if got := string(df.Data()); got != msg2 {
+ t.Errorf("got DATA %q; want %q", got, msg2)
+ }
+ }
+ })
+}
+
+func TestServer_Response_LargeWrite(t *testing.T) {
+ const size = 1 << 20
+ const maxFrameSize = 16 << 10
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ n, err := w.Write(bytes.Repeat([]byte("a"), size))
+ if err != nil {
+ return fmt.Errorf("Write error: %v", err)
+ }
+ if n != size {
+ return fmt.Errorf("wrong size %d from Write", n)
+ }
+ return nil
+ }, func(st *serverTester) {
+ if err := st.fr.WriteSettings(
+ Setting{SettingInitialWindowSize, 0},
+ Setting{SettingMaxFrameSize, maxFrameSize},
+ ); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ // Give the handler quota to write:
+ if err := st.fr.WriteWindowUpdate(1, size); err != nil {
+ t.Fatal(err)
+ }
+ // Give the handler quota to write to connection-level
+ // window as well
+ if err := st.fr.WriteWindowUpdate(0, size); err != nil {
+ t.Fatal(err)
+ }
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"}, // sniffed
+ // and no content-length
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+ var bytes, frames int
+ for {
+ df := st.wantData()
+ bytes += len(df.Data())
+ frames++
+ for _, b := range df.Data() {
+ if b != 'a' {
+ t.Fatal("non-'a' byte seen in DATA")
+ }
+ }
+ if df.StreamEnded() {
+ break
+ }
+ }
+ if bytes != size {
+ t.Errorf("Got %d bytes; want %d", bytes, size)
+ }
+ if want := int(size / maxFrameSize); frames < want || frames > want*2 {
+ t.Errorf("Got %d frames; want %d", frames, size)
+ }
+ })
+}
+
+// Test that the handler can't write more than the client allows
+func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
+ // Make these reads. Before each read, the client adds exactly enough
+ // flow-control to satisfy the read. Numbers chosen arbitrarily.
+ reads := []int{123, 1, 13, 127}
+ size := 0
+ for _, n := range reads {
+ size += n
+ }
+
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ n, err := w.Write(bytes.Repeat([]byte("a"), size))
+ if err != nil {
+ return fmt.Errorf("Write error: %v", err)
+ }
+ if n != size {
+ return fmt.Errorf("wrong size %d from Write", n)
+ }
+ return nil
+ }, func(st *serverTester) {
+ // Set the window size to something explicit for this test.
+ // It's also how much initial data we expect.
+ if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ df := st.wantData()
+ if got := len(df.Data()); got != reads[0] {
+ t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got)
+ }
+
+ for _, quota := range reads[1:] {
+ if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
+ t.Fatal(err)
+ }
+ df := st.wantData()
+ if int(quota) != len(df.Data()) {
+ t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
+ }
+ }
+ })
+}
+
+// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
+func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
+ const size = 1 << 20
+ const maxFrameSize = 16 << 10
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ errc := make(chan error, 1)
+ go func() {
+ _, err := w.Write(bytes.Repeat([]byte("a"), size))
+ errc <- err
+ }()
+ select {
+ case err := <-errc:
+ if err == nil {
+ return errors.New("unexpected nil error from Write in handler")
+ }
+ return nil
+ case <-time.After(2 * time.Second):
+ return errors.New("timeout waiting for Write in handler")
+ }
+ }, func(st *serverTester) {
+ if err := st.fr.WriteSettings(
+ Setting{SettingInitialWindowSize, 0},
+ Setting{SettingMaxFrameSize, maxFrameSize},
+ ); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.(http.Flusher).Flush()
+ // Nothing; send empty DATA
+ return nil
+ }, func(st *serverTester) {
+ // Handler gets no data quota:
+ if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
+ t.Fatal(err)
+ }
+ st.wantSettingsAck()
+
+ getSlash(st) // make the single request
+
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+
+ df := st.wantData()
+ if got := len(df.Data()); got != 0 {
+ t.Fatalf("unexpected %d DATA bytes; want 0", got)
+ }
+ if !df.StreamEnded() {
+ t.Fatal("DATA didn't have END_STREAM")
+ }
+ })
+}
+
+func TestServer_Response_Automatic100Continue(t *testing.T) {
+ const msg = "foo"
+ const reply = "bar"
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ if v := r.Header.Get("Expect"); v != "" {
+ t.Errorf("Expect header = %q; want empty", v)
+ }
+ buf := make([]byte, len(msg))
+ // This read should trigger the 100-continue being sent.
+ if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
+ return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
+ }
+ _, err := io.WriteString(w, reply)
+ return err
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "100"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Fatalf("Got headers %v; want %v", goth, wanth)
+ }
+
+ // Okay, they sent status 100, so we can send our
+ // gigantic and/or sensitive "foo" payload now.
+ st.writeData(1, true, []byte(msg))
+
+ st.wantWindowUpdate(0, uint32(len(msg)))
+
+ hf = st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("expected data to follow")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ goth = st.decodeHeader(hf.HeaderBlockFragment())
+ wanth = [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", strconv.Itoa(len(reply))},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+
+ df := st.wantData()
+ if string(df.Data()) != reply {
+ t.Errorf("Client read %q; want %q", df.Data(), reply)
+ }
+ if !df.StreamEnded() {
+ t.Errorf("expect data stream end")
+ }
+ })
+}
+
+func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
+ errc := make(chan error, 1)
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ p := []byte("some data.\n")
+ for {
+ _, err := w.Write(p)
+ if err != nil {
+ errc <- err
+ return nil
+ }
+ }
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("unexpected END_STREAM flag")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("want END_HEADERS flag")
+ }
+ // Close the connection and wait for the handler to (hopefully) notice.
+ st.cc.Close()
+ select {
+ case <-errc:
+ case <-time.After(5 * time.Second):
+ t.Error("timeout")
+ }
+ })
+}
+
+func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
+ const testPath = "/some/path"
+
+ inHandler := make(chan uint32)
+ leaveHandler := make(chan bool)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ id := w.(*responseWriter).rws.stream.id
+ inHandler <- id
+ if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
+ t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
+ }
+ <-leaveHandler
+ })
+ defer st.Close()
+ st.greet()
+ nextStreamID := uint32(1)
+ streamID := func() uint32 {
+ defer func() { nextStreamID += 2 }()
+ return nextStreamID
+ }
+ sendReq := func(id uint32, headers ...string) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(headers...),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }
+ for i := 0; i < defaultMaxStreams; i++ {
+ sendReq(streamID())
+ <-inHandler
+ }
+ defer func() {
+ for i := 0; i < defaultMaxStreams; i++ {
+ leaveHandler <- true
+ }
+ }()
+
+ // And this one should cross the limit:
+ // (It's also sent as a CONTINUATION, to verify we still track the decoder context,
+ // even if we're rejecting it)
+ rejectID := streamID()
+ headerBlock := st.encodeHeader(":path", testPath)
+ frag1, frag2 := headerBlock[:3], headerBlock[3:]
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: rejectID,
+ BlockFragment: frag1,
+ EndStream: true,
+ EndHeaders: false, // CONTINUATION coming
+ })
+ if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
+ t.Fatal(err)
+ }
+ st.wantRSTStream(rejectID, ErrCodeProtocol)
+
+ // But let a handler finish:
+ leaveHandler <- true
+ st.wantHeaders()
+
+ // And now another stream should be able to start:
+ goodID := streamID()
+ sendReq(goodID, ":path", testPath)
+ select {
+ case got := <-inHandler:
+ if got != goodID {
+ t.Errorf("Got stream %d; want %d", got, goodID)
+ }
+ case <-time.After(3 * time.Second):
+ t.Error("timeout waiting for handler")
+ }
+}
+
+// So many response headers that the server needs to use CONTINUATION frames:
+func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ h := w.Header()
+ for i := 0; i < 5000; i++ {
+ h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
+ }
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.HeadersEnded() {
+ t.Fatal("got unwanted END_HEADERS flag")
+ }
+ n := 0
+ for {
+ n++
+ cf := st.wantContinuation()
+ if cf.HeadersEnded() {
+ break
+ }
+ }
+ if n < 5 {
+ t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
+ }
+ })
+}
+
+// This previously crashed (reported by Mathieu Lonjaret as observed
+// while using Camlistore) because we got a DATA frame from the client
+// after the handler exited and our logic at the time was wrong,
+// keeping a stream in the map in stateClosed, which tickled an
+// invariant check later when we tried to remove that stream (via
+// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
+// ended.
+func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ // nothing
+ return nil
+ }, func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: false, // DATA is coming
+ EndHeaders: true,
+ })
+ hf := st.wantHeaders()
+ if !hf.HeadersEnded() || !hf.StreamEnded() {
+ t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
+ }
+
+ // Sent when the a Handler closes while a client has
+ // indicated it's still sending DATA:
+ st.wantRSTStream(1, ErrCodeCancel)
+
+ // Now the handler has ended, so it's ended its
+ // stream, but the client hasn't closed its side
+ // (stateClosedLocal). So send more data and verify
+ // it doesn't crash with an internal invariant panic, like
+ // it did before.
+ st.writeData(1, true, []byte("foo"))
+
+ // Get our flow control bytes back, since the handler didn't get them.
+ st.wantWindowUpdate(0, uint32(len("foo")))
+
+ // Sent after a peer sends data anyway (admittedly the
+ // previous RST_STREAM might've still been in-flight),
+ // but they'll get the more friendly 'cancel' code
+ // first.
+ st.wantRSTStream(1, ErrCodeStreamClosed)
+
+ // Set up a bunch of machinery to record the panic we saw
+ // previously.
+ var (
+ panMu sync.Mutex
+ panicVal interface{}
+ )
+
+ testHookOnPanicMu.Lock()
+ testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
+ panMu.Lock()
+ panicVal = pv
+ panMu.Unlock()
+ return true
+ }
+ testHookOnPanicMu.Unlock()
+
+ // Now force the serve loop to end, via closing the connection.
+ st.cc.Close()
+ select {
+ case <-st.sc.doneServing:
+ // Loop has exited.
+ panMu.Lock()
+ got := panicVal
+ panMu.Unlock()
+ if got != nil {
+ t.Errorf("Got panic: %v", got)
+ }
+ case <-time.After(5 * time.Second):
+ t.Error("timeout")
+ }
+ })
+}
+
+func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
+func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
+
+func testRejectTLS(t *testing.T, max uint16) {
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ c.MaxVersion = max
+ })
+ defer st.Close()
+ gf := st.wantGoAway()
+ if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+ t.Errorf("Got error code %v; want %v", got, want)
+ }
+}
+
+func TestServer_Rejects_TLSBadCipher(t *testing.T) {
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ // Only list bad ones:
+ c.CipherSuites = []uint16{
+ tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ }
+ })
+ defer st.Close()
+ gf := st.wantGoAway()
+ if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+ t.Errorf("Got error code %v; want %v", got, want)
+ }
+}
+
+func TestServer_Advertises_Common_Cipher(t *testing.T) {
+ const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ st := newServerTester(t, nil, func(c *tls.Config) {
+ // Have the client only support the one required by the spec.
+ c.CipherSuites = []uint16{requiredSuite}
+ }, func(ts *httptest.Server) {
+ var srv *http.Server = ts.Config
+ // Have the server configured with no specific cipher suites.
+ // This tests that Go's defaults include the required one.
+ srv.TLSConfig = nil
+ })
+ defer st.Close()
+ st.greet()
+}
+
+func (st *serverTester) onHeaderField(f hpack.HeaderField) {
+ if f.Name == "date" {
+ return
+ }
+ st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})
+}
+
+func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {
+ st.decodedHeaders = nil
+ if _, err := st.hpackDec.Write(headerBlock); err != nil {
+ st.t.Fatalf("hpack decoding error: %v", err)
+ }
+ if err := st.hpackDec.Close(); err != nil {
+ st.t.Fatalf("hpack decoding error: %v", err)
+ }
+ return st.decodedHeaders
+}
+
+// testServerResponse sets up an idle HTTP/2 connection. The client function should
+// write a single request that must be handled by the handler. This waits up to 5s
+// for client to return, then up to an additional 2s for the handler to return.
+func testServerResponse(t testing.TB,
+ handler func(http.ResponseWriter, *http.Request) error,
+ client func(*serverTester),
+) {
+ errc := make(chan error, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ if r.Body == nil {
+ t.Fatal("nil Body")
+ }
+ errc <- handler(w, r)
+ })
+ defer st.Close()
+
+ donec := make(chan bool)
+ go func() {
+ defer close(donec)
+ st.greet()
+ client(st)
+ }()
+
+ select {
+ case <-donec:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout in client")
+ }
+
+ select {
+ case err := <-errc:
+ if err != nil {
+ t.Fatalf("Error in handler: %v", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout in handler")
+ }
+}
+
+// readBodyHandler returns an http Handler func that reads len(want)
+// bytes from r.Body and fails t if the contents read were not
+// the value of want.
+func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ buf := make([]byte, len(want))
+ _, err := io.ReadFull(r.Body, buf)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if string(buf) != want {
+ t.Errorf("read %q; want %q", buf, want)
+ }
+ }
+}
+
+// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
+// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
+// http://sourceforge.net/p/curl/bugs/1472/
+func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
+func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
+
+func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
+ }
+ if testing.Short() {
+ t.Skip("skipping curl test in short mode")
+ }
+ requireCurl(t)
+ var gotConn int32
+ testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
+
+ const msg = "Hello from curl!\n"
+ ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Foo", "Bar")
+ w.Header().Set("Client-Proto", r.Proto)
+ io.WriteString(w, msg)
+ }))
+ ConfigureServer(ts.Config, &Server{
+ PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
+ })
+ ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+ ts.StartTLS()
+ defer ts.Close()
+
+ t.Logf("Running test server for curl to hit at: %s", ts.URL)
+ container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
+ defer kill(container)
+ resc := make(chan interface{}, 1)
+ go func() {
+ res, err := dockerLogs(container)
+ if err != nil {
+ resc <- err
+ } else {
+ resc <- res
+ }
+ }()
+ select {
+ case res := <-resc:
+ if err, ok := res.(error); ok {
+ t.Fatal(err)
+ }
+ body := string(res.([]byte))
+ // Search for both "key: value" and "key:value", since curl changed their format
+ // Our Dockerfile contains the latest version (no space), but just in case people
+ // didn't rebuild, check both.
+ if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") {
+ t.Errorf("didn't see foo: Bar header")
+ t.Logf("Got: %s", body)
+ }
+ if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") {
+ t.Errorf("didn't see client-proto: HTTP/2 header")
+ t.Logf("Got: %s", res)
+ }
+ if !strings.Contains(string(res.([]byte)), msg) {
+ t.Errorf("didn't see %q content", msg)
+ t.Logf("Got: %s", res)
+ }
+ case <-time.After(3 * time.Second):
+ t.Errorf("timeout waiting for curl")
+ }
+
+ if atomic.LoadInt32(&gotConn) == 0 {
+ t.Error("never saw an http2 connection")
+ }
+}
+
+var doh2load = flag.Bool("h2load", false, "Run h2load test")
+
+func TestServerWithH2Load(t *testing.T) {
+ if !*doh2load {
+ t.Skip("Skipping without --h2load flag.")
+ }
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
+ }
+ requireH2load(t)
+
+ msg := strings.Repeat("Hello, h2load!\n", 5000)
+ ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+ io.WriteString(w, msg)
+ }))
+ ts.StartTLS()
+ defer ts.Close()
+
+ cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl",
+ "-n100000", "-c100", "-m100", ts.URL)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Issue 12843
+func TestServerDoS_MaxHeaderListSize(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+ defer st.Close()
+
+ // shake hands
+ st.writePreface()
+ st.writeInitialSettings()
+ frameSize := defaultMaxReadFrameSize
+ var advHeaderListSize *uint32
+ st.wantSettings().ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ if s.Val < minMaxFrameSize {
+ frameSize = minMaxFrameSize
+ } else if s.Val > maxFrameSize {
+ frameSize = maxFrameSize
+ } else {
+ frameSize = int(s.Val)
+ }
+ case SettingMaxHeaderListSize:
+ advHeaderListSize = &s.Val
+ }
+ return nil
+ })
+ st.writeSettingsAck()
+ st.wantSettingsAck()
+
+ if advHeaderListSize == nil {
+ t.Errorf("server didn't advertise a max header list size")
+ } else if *advHeaderListSize == 0 {
+ t.Errorf("server advertised a max header list size of 0")
+ }
+
+ st.encodeHeaderField(":method", "GET")
+ st.encodeHeaderField(":path", "/")
+ st.encodeHeaderField(":scheme", "https")
+ cookie := strings.Repeat("*", 4058)
+ st.encodeHeaderField("cookie", cookie)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.headerBuf.Bytes(),
+ EndStream: true,
+ EndHeaders: false,
+ })
+
+ // Capture the short encoding of a duplicate ~4K cookie, now
+ // that we've already sent it once.
+ st.headerBuf.Reset()
+ st.encodeHeaderField("cookie", cookie)
+
+ // Now send 1MB of it.
+ const size = 1 << 20
+ b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len())
+ for len(b) > 0 {
+ chunk := b
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ b = b[len(chunk):]
+ st.fr.WriteContinuation(1, len(b) == 0, chunk)
+ }
+
+ h := st.wantHeaders()
+ if !h.HeadersEnded() {
+ t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
+ }
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "431"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", "63"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+func TestCompressionErrorOnWrite(t *testing.T) {
+ const maxStrLen = 8 << 10
+ var serverConfig *http.Server
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ }, func(ts *httptest.Server) {
+ serverConfig = ts.Config
+ serverConfig.MaxHeaderBytes = maxStrLen
+ })
+ st.addLogFilter("connection error: COMPRESSION_ERROR")
+ defer st.Close()
+ st.greet()
+
+ maxAllowed := st.sc.framer.maxHeaderStringLen()
+
+ // Crank this up, now that we have a conn connected with the
+ // hpack.Decoder's max string length set has been initialized
+ // from the earlier low ~8K value. We want this higher so don't
+ // hit the max header list size. We only want to test hitting
+ // the max string size.
+ serverConfig.MaxHeaderBytes = 1 << 20
+
+ // First a request with a header that's exactly the max allowed size
+ // for the hpack compression. It's still too long for the header list
+ // size, so we'll get the 431 error, but that keeps the compression
+ // context still valid.
+ hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ if !h.HeadersEnded() {
+ t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
+ }
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "431"},
+ {"content-type", "text/html; charset=utf-8"},
+ {"content-length", "63"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+ df := st.wantData()
+ if !strings.Contains(string(df.Data()), "HTTP Error 431") {
+ t.Errorf("Unexpected data body: %q", df.Data())
+ }
+ if !df.StreamEnded() {
+ t.Fatalf("expect data stream end")
+ }
+
+ // And now send one that's just one byte too big.
+ hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 3,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeCompression {
+ t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
+ }
+}
+
+func TestCompressionErrorOnClose(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ })
+ st.addLogFilter("connection error: COMPRESSION_ERROR")
+ defer st.Close()
+ st.greet()
+
+ hbf := st.encodeHeader("foo", "bar")
+ hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ ga := st.wantGoAway()
+ if ga.ErrCode != ErrCodeCompression {
+ t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
+ }
+}
+
+// test that a server handler can read trailers from a client
+func TestServerReadsTrailers(t *testing.T) {
+ const testBody = "some test body"
+ writeReq := func(st *serverTester) {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(1, false, []byte(testBody))
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeaderRaw(
+ "foo", "foov",
+ "bar", "barv",
+ "baz", "bazv",
+ "surprise", "wasn't declared; shouldn't show up",
+ ),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }
+ checkReq := func(r *http.Request) {
+ wantTrailer := http.Header{
+ "Foo": nil,
+ "Bar": nil,
+ "Baz": nil,
+ }
+ if !reflect.DeepEqual(r.Trailer, wantTrailer) {
+ t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer)
+ }
+ slurp, err := ioutil.ReadAll(r.Body)
+ if string(slurp) != testBody {
+ t.Errorf("read body %q; want %q", slurp, testBody)
+ }
+ if err != nil {
+ t.Fatalf("Body slurp: %v", err)
+ }
+ wantTrailerAfter := http.Header{
+ "Foo": {"foov"},
+ "Bar": {"barv"},
+ "Baz": {"bazv"},
+ }
+ if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) {
+ t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter)
+ }
+ }
+ testServerRequest(t, writeReq, checkReq)
+}
+
+// test that a server handler can send trailers
+func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) }
+func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) }
+
+func testServerWritesTrailers(t *testing.T, withFlush bool) {
+ // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B")
+ w.Header().Add("Trailer", "Server-Trailer-C")
+ w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered
+
+ // Regular headers:
+ w.Header().Set("Foo", "Bar")
+ w.Header().Set("Content-Length", "5") // len("Hello")
+
+ io.WriteString(w, "Hello")
+ if withFlush {
+ w.(http.Flusher).Flush()
+ }
+ w.Header().Set("Server-Trailer-A", "valuea")
+ w.Header().Set("Server-Trailer-C", "valuec") // skipping B
+ // After a flush, random keys like Server-Surprise shouldn't show up:
+ w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!")
+ // But we do permit promoting keys to trailers after a
+ // flush if they start with the magic
+ // otherwise-invalid "Trailer:" prefix:
+ w.Header().Set("Trailer:Post-Header-Trailer", "hi1")
+ w.Header().Set("Trailer:post-header-trailer2", "hi2")
+ w.Header().Set("Trailer:Range", "invalid")
+ w.Header().Set("Trailer:Foo\x01Bogus", "invalid")
+ w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40")
+ w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40")
+ w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40")
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if hf.StreamEnded() {
+ t.Fatal("response HEADERS had END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("response HEADERS didn't have END_HEADERS")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"foo", "Bar"},
+ {"trailer", "Server-Trailer-A, Server-Trailer-B"},
+ {"trailer", "Server-Trailer-C"},
+ {"trailer", "Transfer-Encoding, Content-Length, Trailer"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "5"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ df := st.wantData()
+ if string(df.Data()) != "Hello" {
+ t.Fatalf("Client read %q; want Hello", df.Data())
+ }
+ if df.StreamEnded() {
+ t.Fatalf("data frame had STREAM_ENDED")
+ }
+ tf := st.wantHeaders() // for the trailers
+ if !tf.StreamEnded() {
+ t.Fatalf("trailers HEADERS lacked END_STREAM")
+ }
+ if !tf.HeadersEnded() {
+ t.Fatalf("trailers HEADERS lacked END_HEADERS")
+ }
+ wanth = [][2]string{
+ {"post-header-trailer", "hi1"},
+ {"post-header-trailer2", "hi2"},
+ {"server-trailer-a", "valuea"},
+ {"server-trailer-c", "valuec"},
+ }
+ goth = st.decodeHeader(tf.HeaderBlockFragment())
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ })
+}
+
+// validate transmitted header field names & values
+// golang.org/issue/14048
+func TestServerDoesntWriteInvalidHeaders(t *testing.T) {
+ testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Add("OK1", "x")
+ w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key
+ w.Header().Add("Bad1\x00", "x") // null in key
+ w.Header().Add("Bad2", "x\x00y") // null in value
+ return nil
+ }, func(st *serverTester) {
+ getSlash(st)
+ hf := st.wantHeaders()
+ if !hf.StreamEnded() {
+ t.Error("response HEADERS lacked END_STREAM")
+ }
+ if !hf.HeadersEnded() {
+ t.Fatal("response HEADERS didn't have END_HEADERS")
+ }
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "200"},
+ {"ok1", "x"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"content-length", "0"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
+ }
+ })
+}
+
+func BenchmarkServerGets(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+
+ const msg = "Hello, world"
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ id := 1 + uint32(i)*2
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+ }
+}
+
+func BenchmarkServerPosts(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+
+ const msg = "Hello, world"
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ id := 1 + uint32(i)*2
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(id, true, nil)
+ st.wantHeaders()
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+ }
+}
+
+// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
+// Verify we don't hang.
+func TestIssue53(t *testing.T) {
+ const data = "PRI * HTTP/2.0\r\n\r\nSM" +
+ "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad"
+ s := &http.Server{
+ ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags),
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte("hello"))
+ }),
+ }
+ s2 := &Server{
+ MaxReadFrameSize: 1 << 16,
+ PermitProhibitedCipherSuites: true,
+ }
+ c := &issue53Conn{[]byte(data), false, false}
+ s2.ServeConn(c, &ServeConnOpts{BaseConfig: s})
+ if !c.closed {
+ t.Fatal("connection is not closed")
+ }
+}
+
+type issue53Conn struct {
+ data []byte
+ closed bool
+ written bool
+}
+
+func (c *issue53Conn) Read(b []byte) (n int, err error) {
+ if len(c.data) == 0 {
+ return 0, io.EOF
+ }
+ n = copy(b, c.data)
+ c.data = c.data[n:]
+ return
+}
+
+func (c *issue53Conn) Write(b []byte) (n int, err error) {
+ c.written = true
+ return len(b), nil
+}
+
+func (c *issue53Conn) Close() error {
+ c.closed = true
+ return nil
+}
+
+func (c *issue53Conn) LocalAddr() net.Addr {
+ return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
+}
+func (c *issue53Conn) RemoteAddr() net.Addr {
+ return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
+}
+func (c *issue53Conn) SetDeadline(t time.Time) error { return nil }
+func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil }
+func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil }
+
+// golang.org/issue/12895
+func TestConfigureServer(t *testing.T) {
+ tests := []struct {
+ name string
+ tlsConfig *tls.Config
+ wantErr string
+ }{
+ {
+ name: "empty server",
+ },
+ {
+ name: "just the required cipher suite",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
+ },
+ },
+ {
+ name: "missing required cipher suite",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384},
+ },
+ wantErr: "is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ },
+ {
+ name: "required after bad",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
+ },
+ wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after",
+ },
+ {
+ name: "bad after required",
+ tlsConfig: &tls.Config{
+ CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA},
+ },
+ },
+ }
+ for _, tt := range tests {
+ srv := &http.Server{TLSConfig: tt.tlsConfig}
+ err := ConfigureServer(srv, nil)
+ if (err != nil) != (tt.wantErr != "") {
+ if tt.wantErr != "" {
+ t.Errorf("%s: success, but want error", tt.name)
+ } else {
+ t.Errorf("%s: unexpected error: %v", tt.name, err)
+ }
+ }
+ if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) {
+ t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr)
+ }
+ if err == nil && !srv.TLSConfig.PreferServerCipherSuites {
+ t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name)
+ }
+ }
+}
+
+func TestServerRejectHeadWithBody(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body.
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "HEAD"),
+ EndStream: false, // what we're testing, a bogus HEAD request with body
+ EndHeaders: true,
+ })
+ st.wantRSTStream(1, ErrCodeProtocol)
+}
+
+func TestServerNoAutoContentLengthOnHead(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // No response body. (or smaller than one frame)
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1, // clients send odd numbers
+ BlockFragment: st.encodeHeader(":method", "HEAD"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "200"},
+ {"content-type", "text/plain; charset=utf-8"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+// golang.org/issue/13495
+func TestServerNoDuplicateContentType(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.Header()["Content-Type"] = []string{""}
+ fmt.Fprintf(w, "<html><head></head><body>hi</body></html>")
+ })
+ defer st.Close()
+ st.greet()
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ h := st.wantHeaders()
+ headers := st.decodeHeader(h.HeaderBlockFragment())
+ want := [][2]string{
+ {":status", "200"},
+ {"content-type", ""},
+ {"content-length", "41"},
+ }
+ if !reflect.DeepEqual(headers, want) {
+ t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
+ }
+}
+
+func disableGoroutineTracking() (restore func()) {
+ old := DebugGoroutines
+ DebugGoroutines = false
+ return func() { DebugGoroutines = old }
+}
+
+func BenchmarkServer_GetRequest(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msg = "Hello, world."
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(ioutil.Discard, r.Body)
+ if err != nil || n > 0 {
+ b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
+ }
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+
+ st.greet()
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+ hbf := st.encodeHeader(":method", "GET")
+ for i := 0; i < b.N; i++ {
+ streamID := uint32(1 + 2*i)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: hbf,
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.wantHeaders()
+ st.wantData()
+ }
+}
+
+func BenchmarkServer_PostRequest(b *testing.B) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msg = "Hello, world."
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ n, err := io.Copy(ioutil.Discard, r.Body)
+ if err != nil || n > 0 {
+ b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
+ }
+ io.WriteString(w, msg)
+ })
+ defer st.Close()
+ st.greet()
+ // Give the server quota to reply. (plus it has the the 64KB)
+ if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+ b.Fatal(err)
+ }
+ hbf := st.encodeHeader(":method", "POST")
+ for i := 0; i < b.N; i++ {
+ streamID := uint32(1 + 2*i)
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: hbf,
+ EndStream: false,
+ EndHeaders: true,
+ })
+ st.writeData(streamID, true, nil)
+ st.wantHeaders()
+ st.wantData()
+ }
+}
+
+type connStateConn struct {
+ net.Conn
+ cs tls.ConnectionState
+}
+
+func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs }
+
+// golang.org/issue/12737 -- handle any net.Conn, not just
+// *tls.Conn.
+func TestServerHandleCustomConn(t *testing.T) {
+ var s Server
+ c1, c2 := net.Pipe()
+ clientDone := make(chan struct{})
+ handlerDone := make(chan struct{})
+ var req *http.Request
+ go func() {
+ defer close(clientDone)
+ defer c2.Close()
+ fr := NewFramer(c2, c2)
+ io.WriteString(c2, ClientPreface)
+ fr.WriteSettings()
+ fr.WriteSettingsAck()
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() {
+ t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f))
+ return
+ }
+ f, err = fr.ReadFrame()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() {
+ t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f))
+ return
+ }
+ var henc hpackEncoder
+ fr.WriteHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ go io.Copy(ioutil.Discard, c2)
+ <-handlerDone
+ }()
+ const testString = "my custom ConnectionState"
+ fakeConnState := tls.ConnectionState{
+ ServerName: testString,
+ Version: tls.VersionTLS12,
+ }
+ go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{
+ BaseConfig: &http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer close(handlerDone)
+ req = r
+ }),
+ }})
+ select {
+ case <-clientDone:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for handler")
+ }
+ if req.TLS == nil {
+ t.Fatalf("Request.TLS is nil. Got: %#v", req)
+ }
+ if req.TLS.ServerName != testString {
+ t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString)
+ }
+}
+
+// golang.org/issue/14214
+func TestServer_Rejects_ConnHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ t.Error("should not get to Handler")
+ })
+ defer st.Close()
+ st.greet()
+ st.bodylessReq1("connection", "foo")
+ hf := st.wantHeaders()
+ goth := st.decodeHeader(hf.HeaderBlockFragment())
+ wanth := [][2]string{
+ {":status", "400"},
+ {"content-type", "text/plain; charset=utf-8"},
+ {"x-content-type-options", "nosniff"},
+ {"content-length", "51"},
+ }
+ if !reflect.DeepEqual(goth, wanth) {
+ t.Errorf("Got headers %v; want %v", goth, wanth)
+ }
+}
+
+type hpackEncoder struct {
+ enc *hpack.Encoder
+ buf bytes.Buffer
+}
+
+func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte {
+ if len(headers)%2 == 1 {
+ panic("odd number of kv args")
+ }
+ he.buf.Reset()
+ if he.enc == nil {
+ he.enc = hpack.NewEncoder(&he.buf)
+ }
+ for len(headers) > 0 {
+ k, v := headers[0], headers[1]
+ err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ if err != nil {
+ t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+ }
+ headers = headers[2:]
+ }
+ return he.buf.Bytes()
+}
+
+func TestCheckValidHTTP2Request(t *testing.T) {
+ tests := []struct {
+ req *http.Request
+ want error
+ }{
+ {
+ req: &http.Request{Header: http.Header{"Te": {"trailers"}}},
+ want: nil,
+ },
+ {
+ req: &http.Request{Header: http.Header{"Te": {"trailers", "bogus"}}},
+ want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`),
+ },
+ {
+ req: &http.Request{Header: http.Header{"Foo": {""}}},
+ want: nil,
+ },
+ {
+ req: &http.Request{Header: http.Header{"Connection": {""}}},
+ want: errors.New(`request header "Connection" is not valid in HTTP/2`),
+ },
+ {
+ req: &http.Request{Header: http.Header{"Proxy-Connection": {""}}},
+ want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`),
+ },
+ {
+ req: &http.Request{Header: http.Header{"Keep-Alive": {""}}},
+ want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`),
+ },
+ {
+ req: &http.Request{Header: http.Header{"Upgrade": {""}}},
+ want: errors.New(`request header "Upgrade" is not valid in HTTP/2`),
+ },
+ }
+ for i, tt := range tests {
+ got := checkValidHTTP2Request(tt.req)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want)
+ }
+ }
+}
+
+// golang.org/issue/14030
+func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
+ const msg = "Hello"
+ const msg2 = "World"
+
+ doRead := make(chan bool, 1)
+ defer close(doRead) // fallback cleanup
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, msg)
+ w.(http.Flusher).Flush()
+
+ // Do a read, which might force a 100-continue status to be sent.
+ <-doRead
+ r.Body.Read(make([]byte, 10))
+
+ io.WriteString(w, msg2)
+
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20))
+ req.Header.Set("Expect", "100-continue")
+
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ buf := make([]byte, len(msg))
+ if _, err := io.ReadFull(res.Body, buf); err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != msg {
+ t.Fatalf("msg = %q; want %q", buf, msg)
+ }
+
+ doRead <- true
+
+ if _, err := io.ReadFull(res.Body, buf); err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != msg2 {
+ t.Fatalf("second msg = %q; want %q", buf, msg2)
+ }
+}
+
+type funcReader func([]byte) (n int, err error)
+
+func (f funcReader) Read(p []byte) (n int, err error) { return f(p) }
+
+// golang.org/issue/16481 -- return flow control when streams close with unread data.
+// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport)
+func TestUnreadFlowControlReturned_Server(t *testing.T) {
+ unblock := make(chan bool, 1)
+ defer close(unblock)
+
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // Don't read the 16KB request body. Wait until the client's
+ // done sending it and then return. This should cause the Server
+ // to then return those 16KB of flow control to the client.
+ <-unblock
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ // This previously hung on the 4th iteration.
+ for i := 0; i < 6; i++ {
+ body := io.MultiReader(
+ io.LimitReader(neverEnding('A'), 16<<10),
+ funcReader(func([]byte) (n int, err error) {
+ unblock <- true
+ return 0, io.EOF
+ }),
+ )
+ req, _ := http.NewRequest("POST", st.ts.URL, body)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ }
+
+}
diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
new file mode 100644
index 000000000..31a84bed4
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
@@ -0,0 +1,5021 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="lib/rfc2629.xslt"?>
+<?rfc toc="yes" ?>
+<?rfc symrefs="yes" ?>
+<?rfc sortrefs="yes" ?>
+<?rfc compact="yes"?>
+<?rfc subcompact="no" ?>
+<?rfc linkmailto="no" ?>
+<?rfc editing="no" ?>
+<?rfc comments="yes" ?>
+<?rfc inline="yes"?>
+<?rfc rfcedstyle="yes"?>
+<?rfc-ext allow-markup-in-artwork="yes" ?>
+<?rfc-ext include-index="no" ?>
+
+<rfc ipr="trust200902"
+ category="std"
+ docName="draft-ietf-httpbis-http2-latest"
+ x:maturity-level="proposed"
+ xmlns:x="http://purl.org/net/xml2rfc/ext">
+ <x:feedback template="mailto:ietf-http-wg@w3.org?subject={docname},%20%22{section}%22&amp;body=&lt;{ref}&gt;:"/>
+ <front>
+ <title abbrev="HTTP/2">Hypertext Transfer Protocol version 2</title>
+
+ <author initials="M." surname="Belshe" fullname="Mike Belshe">
+ <organization>Twist</organization>
+ <address>
+ <email>mbelshe@chromium.org</email>
+ </address>
+ </author>
+
+ <author initials="R." surname="Peon" fullname="Roberto Peon">
+ <organization>Google, Inc</organization>
+ <address>
+ <email>fenix@google.com</email>
+ </address>
+ </author>
+
+ <author initials="M." surname="Thomson" fullname="Martin Thomson" role="editor">
+ <organization>Mozilla</organization>
+ <address>
+ <postal>
+ <street>331 E Evelyn Street</street>
+ <city>Mountain View</city>
+ <region>CA</region>
+ <code>94041</code>
+ <country>US</country>
+ </postal>
+ <email>martin.thomson@gmail.com</email>
+ </address>
+ </author>
+
+ <date year="2014" />
+ <area>Applications</area>
+ <workgroup>HTTPbis</workgroup>
+ <keyword>HTTP</keyword>
+ <keyword>SPDY</keyword>
+ <keyword>Web</keyword>
+
+ <abstract>
+ <t>
+ This specification describes an optimized expression of the semantics of the Hypertext
+ Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
+ reduced perception of latency by introducing header field compression and allowing multiple
+ concurrent messages on the same connection. It also introduces unsolicited push of
+ representations from servers to clients.
+ </t>
+ <t>
+ This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
+ HTTP's existing semantics remain unchanged.
+ </t>
+ </abstract>
+
+ <note title="Editorial Note (To be removed by RFC Editor)">
+ <t>
+ Discussion of this draft takes place on the HTTPBIS working group mailing list
+ (ietf-http-wg@w3.org), which is archived at <eref
+ target="https://lists.w3.org/Archives/Public/ietf-http-wg/"/>.
+ </t>
+ <t>
+ Working Group information can be found at <eref
+ target="https://tools.ietf.org/wg/httpbis/"/>; that specific to HTTP/2 are at <eref
+ target="https://http2.github.io/"/>.
+ </t>
+ <t>
+ The changes in this draft are summarized in <xref
+ target="change.log"/>.
+ </t>
+ </note>
+
+ </front>
+
+ <middle>
+ <section anchor="intro" title="Introduction">
+
+ <t>
+ The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
+ HTTP/1.1 message format (<xref target="RFC7230" x:fmt="," x:rel="#http.message"/>) has
+ several characteristics that have a negative overall effect on application performance
+ today.
+ </t>
+ <t>
+ In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
+ TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
+ request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
+ clients that need to make many requests typically use multiple connections to a server in
+ order to achieve concurrency and thereby reduce latency.
+ </t>
+ <t>
+ Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
+ network traffic, as well as causing the initial <xref target="TCP">TCP</xref> congestion
+ window to quickly fill. This can result in excessive latency when multiple requests are
+ made on a new TCP connection.
+ </t>
+ <t>
+ HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
+ underlying connection. Specifically, it allows interleaving of request and response
+ messages on the same connection and uses an efficient coding for HTTP header fields. It
+ also allows prioritization of requests, letting more important requests complete more
+ quickly, further improving performance.
+ </t>
+ <t>
+ The resulting protocol is more friendly to the network, because fewer TCP connections can
+ be used in comparison to HTTP/1.x. This means less competition with other flows, and
+ longer-lived connections, which in turn leads to better utilization of available network
+ capacity.
+ </t>
+ <t>
+ Finally, HTTP/2 also enables more efficient processing of messages through use of binary
+ message framing.
+ </t>
+ </section>
+
+ <section anchor="Overview" title="HTTP/2 Protocol Overview">
+ <t>
+ HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core
+ features of HTTP/1.1, but aims to be more efficient in several ways.
+ </t>
+ <t>
+ The basic protocol unit in HTTP/2 is a <xref target="FrameHeader">frame</xref>. Each frame
+ type serves a different purpose. For example, <x:ref>HEADERS</x:ref> and
+ <x:ref>DATA</x:ref> frames form the basis of <xref target="HttpSequence">HTTP requests and
+ responses</xref>; other frame types like <x:ref>SETTINGS</x:ref>,
+ <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PUSH_PROMISE</x:ref> are used in support of other
+ HTTP/2 features.
+ </t>
+ <t>
+ Multiplexing of requests is achieved by having each HTTP request-response exchange
+ associated with its own <xref target="StreamsLayer">stream</xref>. Streams are largely
+ independent of each other, so a blocked or stalled request or response does not prevent
+ progress on other streams.
+ </t>
+ <t>
+ Flow control and prioritization ensure that it is possible to efficiently use multiplexed
+ streams. <xref target="FlowControl">Flow control</xref> helps to ensure that only data that
+ can be used by a receiver is transmitted. <xref
+ target="StreamPriority">Prioritization</xref> ensures that limited resources can be directed
+ to the most important streams first.
+ </t>
+ <t>
+ HTTP/2 adds a new interaction mode, whereby a server can <xref target="PushResources">push
+ responses to a client</xref>. Server push allows a server to speculatively send a client
+ data that the server anticipates the client will need, trading off some network usage
+ against a potential latency gain. The server does this by synthesizing a request, which it
+ sends as a <x:ref>PUSH_PROMISE</x:ref> frame. The server is then able to send a response to
+ the synthetic request on a separate stream.
+ </t>
+ <t>
+ Frames that contain HTTP header fields are <xref target="HeaderBlock">compressed</xref>.
+ HTTP requests can be highly redundant, so compression can reduce the size of requests and
+ responses significantly.
+ </t>
+
+ <section title="Document Organization">
+ <t>
+ The HTTP/2 specification is split into four parts:
+ <list style="symbols">
+ <t>
+ <xref target="starting">Starting HTTP/2</xref> covers how an HTTP/2 connection is
+ initiated.
+ </t>
+ <t>
+ The <xref target="FramingLayer">framing</xref> and <xref
+ target="StreamsLayer">streams</xref> layers describe the way HTTP/2 frames are
+ structured and formed into multiplexed streams.
+ </t>
+ <t>
+ <xref target="FrameTypes">Frame</xref> and <xref target="ErrorCodes">error</xref>
+ definitions include details of the frame and error types used in HTTP/2.
+ </t>
+ <t>
+ <xref target="HTTPLayer">HTTP mappings</xref> and <xref target="HttpExtra">additional
+ requirements</xref> describe how HTTP semantics are expressed using frames and
+ streams.
+ </t>
+ </list>
+ </t>
+ <t>
+ While some of the frame and stream layer concepts are isolated from HTTP, this
+ specification does not define a completely generic framing layer. The framing and streams
+ layers are tailored to the needs of the HTTP protocol and server push.
+ </t>
+ </section>
+
+ <section title="Conventions and Terminology">
+ <t>
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
+ NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
+ described in <xref target="RFC2119">RFC 2119</xref>.
+ </t>
+ <t>
+ All numeric values are in network byte order. Values are unsigned unless otherwise
+ indicated. Literal values are provided in decimal or hexadecimal as appropriate.
+ Hexadecimal literals are prefixed with <spanx style="verb">0x</spanx> to distinguish them
+ from decimal literals.
+ </t>
+ <t>
+ The following terms are used:
+ <list style="hanging">
+ <t hangText="client:">
+ The endpoint initiating the HTTP/2 connection.
+ </t>
+ <t hangText="connection:">
+ A transport-layer connection between two endpoints.
+ </t>
+ <t hangText="connection error:">
+ An error that affects the entire HTTP/2 connection.
+ </t>
+ <t hangText="endpoint:">
+ Either the client or server of the connection.
+ </t>
+ <t hangText="frame:">
+ The smallest unit of communication within an HTTP/2 connection, consisting of a header
+ and a variable-length sequence of octets structured according to the frame type.
+ </t>
+ <t hangText="peer:">
+ An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint
+ that is remote to the primary subject of discussion.
+ </t>
+ <t hangText="receiver:">
+ An endpoint that is receiving frames.
+ </t>
+ <t hangText="sender:">
+ An endpoint that is transmitting frames.
+ </t>
+ <t hangText="server:">
+ The endpoint which did not initiate the HTTP/2 connection.
+ </t>
+ <t hangText="stream:">
+ A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
+ </t>
+ <t hangText="stream error:">
+ An error on the individual HTTP/2 stream.
+ </t>
+ </list>
+ </t>
+ <t>
+ Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
+ in <xref target="RFC7230" x:fmt="of" x:rel="#intermediaries"/>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="starting" title="Starting HTTP/2">
+ <t>
+ An HTTP/2 connection is an application layer protocol running on top of a TCP connection
+ (<xref target="TCP"/>). The client is the TCP connection initiator.
+ </t>
+ <t>
+ HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
+ default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result,
+ implementations processing requests for target resource URIs like <spanx
+ style="verb">http://example.org/foo</spanx> or <spanx
+ style="verb">https://example.com/bar</spanx> are required to first discover whether the
+ upstream server (the immediate peer to which the client wishes to establish a connection)
+ supports HTTP/2.
+ </t>
+
+ <t>
+ The means by which support for HTTP/2 is determined is different for "http" and "https"
+ URIs. Discovery for "http" URIs is described in <xref target="discover-http"/>. Discovery
+ for "https" URIs is described in <xref target="discover-https"/>.
+ </t>
+
+ <section anchor="versioning" title="HTTP/2 Version Identification">
+ <t>
+ The protocol defined in this document has two identifiers.
+ <list style="symbols">
+ <x:lt>
+ <t>
+ The string "h2" identifies the protocol where HTTP/2 uses <xref
+ target="TLS12">TLS</xref>. This identifier is used in the <xref
+ target="TLS-ALPN">TLS application layer protocol negotiation extension (ALPN)</xref>
+ field and any place that HTTP/2 over TLS is identified.
+ </t>
+ <t>
+ The "h2" string is serialized into an ALPN protocol identifier as the two octet
+ sequence: 0x68, 0x32.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
+ This identifier is used in the HTTP/1.1 Upgrade header field and any place that
+ HTTP/2 over TCP is identified.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
+ semantics described in this document.
+ </t>
+ <t>
+ <cref>RFC Editor's Note: please remove the remainder of this section prior to the
+ publication of a final version of this document.</cref>
+ </t>
+ <t>
+ Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
+ Until such an RFC exists, implementations MUST NOT identify themselves using these
+ strings.
+ </t>
+ <t>
+ Examples and text throughout the rest of this document use "h2" as a matter of
+ editorial convenience only. Implementations of draft versions MUST NOT identify using
+ this string.
+ </t>
+ <t>
+ Implementations of draft versions of the protocol MUST add the string "-" and the
+ corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
+ over TLS is identified using the string "h2-11".
+ </t>
+ <t>
+ Non-compatible experiments that are based on these draft versions MUST append the string
+ "-" and an experiment name to the identifier. For example, an experimental implementation
+ of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
+ as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in
+ <xref target="RFC7230" x:fmt="of" x:rel="#field.components"/>. Experimenters are
+ encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
+ </t>
+ </section>
+
+ <section anchor="discover-http" title="Starting HTTP/2 for &quot;http&quot; URIs">
+ <t>
+ A client that makes a request for an "http" URI without prior knowledge about support for
+ HTTP/2 uses the HTTP Upgrade mechanism (<xref target="RFC7230" x:fmt="of"
+ x:rel="#header.upgrade"/>). The client makes an HTTP/1.1 request that includes an Upgrade
+ header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include
+ exactly one <xref target="Http2SettingsHeader">HTTP2-Settings</xref> header field.
+ </t>
+ <figure>
+ <preamble>For example:</preamble>
+ <artwork type="message/http; msgtype=&#34;request&#34;" x:indent-with=" "><![CDATA[
+GET / HTTP/1.1
+Host: server.example.com
+Connection: Upgrade, HTTP2-Settings
+Upgrade: h2c
+HTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>
+
+]]></artwork>
+ </figure>
+ <t>
+ Requests that contain an entity body MUST be sent in their entirety before the client can
+ send HTTP/2 frames. This means that a large request entity can block the use of the
+ connection until it is completely sent.
+ </t>
+ <t>
+ If concurrency of an initial request with subsequent requests is important, an OPTIONS
+ request can be used to perform the upgrade to HTTP/2, at the cost of an additional
+ round-trip.
+ </t>
+ <t>
+ A server that does not support HTTP/2 can respond to the request as though the Upgrade
+ header field were absent:
+ </t>
+ <figure>
+ <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with=" ">
+HTTP/1.1 200 OK
+Content-Length: 243
+Content-Type: text/html
+
+...
+</artwork>
+ </figure>
+ <t>
+ A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with
+ "h2" implies HTTP/2 over TLS, which is instead negotiated as described in <xref
+ target="discover-https"/>.
+ </t>
+ <t>
+ A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
+ response. After the empty line that terminates the 101 response, the server can begin
+ sending HTTP/2 frames. These frames MUST include a response to the request that initiated
+ the Upgrade.
+ </t>
+
+ <figure>
+ <preamble>
+ For example:
+ </preamble>
+ <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with=" ">
+HTTP/1.1 101 Switching Protocols
+Connection: Upgrade
+Upgrade: h2c
+
+[ HTTP/2 connection ...
+</artwork>
+ </figure>
+ <t>
+ The first HTTP/2 frame sent by the server is a <x:ref>SETTINGS</x:ref> frame (<xref
+ target="SETTINGS"/>) as the server connection preface (<xref
+ target="ConnectionHeader"/>). Upon receiving the 101 response, the client sends a <xref
+ target="ConnectionHeader">connection preface</xref>, which includes a
+ <x:ref>SETTINGS</x:ref> frame.
+ </t>
+ <t>
+ The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
+ assigned <xref target="pri-default">default priority values</xref>. Stream 1 is
+ implicitly half closed from the client toward the server, since the request is completed
+ as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the
+ response.
+ </t>
+
+ <section anchor="Http2SettingsHeader" title="HTTP2-Settings Header Field">
+ <t>
+ A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one <spanx
+ style="verb">HTTP2-Settings</spanx> header field. The <spanx
+ style="verb">HTTP2-Settings</spanx> header field is a connection-specific header field
+ that includes parameters that govern the HTTP/2 connection, provided in anticipation of
+ the server accepting the request to upgrade.
+ </t>
+ <figure>
+ <artwork type="abnf" x:indent-with=" "><![CDATA[
+HTTP2-Settings = token68
+]]></artwork>
+ </figure>
+ <t>
+ A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
+ or if more than one is present. A server MUST NOT send this header field.
+ </t>
+
+ <t>
+ The content of the <spanx style="verb">HTTP2-Settings</spanx> header field is the
+ payload of a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>), encoded as a
+ base64url string (that is, the URL- and filename-safe Base64 encoding described in <xref
+ target="RFC4648" x:fmt="of" x:sec="5"/>, with any trailing '=' characters omitted). The
+ <xref target="RFC5234">ABNF</xref> production for <spanx style="verb">token68</spanx> is
+ defined in <xref target="RFC7235" x:fmt="of" x:rel="#challenge.and.response"/>.
+ </t>
+ <t>
+ Since the upgrade is only intended to apply to the immediate connection, a client
+ sending <spanx style="verb">HTTP2-Settings</spanx> MUST also send <spanx
+ style="verb">HTTP2-Settings</spanx> as a connection option in the <spanx
+ style="verb">Connection</spanx> header field to prevent it from being forwarded
+ downstream.
+ </t>
+ <t>
+ A server decodes and interprets these values as it would any other
+ <x:ref>SETTINGS</x:ref> frame. <xref target="SettingsSync">Acknowledgement of the
+ SETTINGS parameters</xref> is not necessary, since a 101 response serves as implicit
+ acknowledgment. Providing these values in the Upgrade request gives a client an
+ opportunity to provide parameters prior to receiving any frames from the server.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="discover-https" title="Starting HTTP/2 for &quot;https&quot; URIs">
+ <t>
+ A client that makes a request to an "https" URI uses <xref target="TLS12">TLS</xref>
+ with the <xref target="TLS-ALPN">application layer protocol negotiation extension</xref>.
+ </t>
+ <t>
+ HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a
+ client or selected by a server.
+ </t>
+ <t>
+ Once TLS negotiation is complete, both the client and the server send a <xref
+ target="ConnectionHeader">connection preface</xref>.
+ </t>
+ </section>
+
+ <section anchor="known-http" title="Starting HTTP/2 with Prior Knowledge">
+ <t>
+ A client can learn that a particular server supports HTTP/2 by other means. For example,
+ <xref target="ALT-SVC"/> describes a mechanism for advertising this capability.
+ </t>
+ <t>
+ A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
+ after the <xref target="ConnectionHeader">connection preface</xref>; a server can
+ identify such a connection by the presence of the connection preface. This only affects
+ the establishment of HTTP/2 connections over cleartext TCP; implementations that support
+ HTTP/2 over TLS MUST use <xref target="TLS-ALPN">protocol negotiation in TLS</xref>.
+ </t>
+ <t>
+ Without additional information, prior support for HTTP/2 is not a strong signal that a
+ given server will support HTTP/2 for future connections. For example, it is possible for
+ server configurations to change, for configurations to differ between instances in
+ clustered servers, or for network conditions to change.
+ </t>
+ </section>
+
+ <section anchor="ConnectionHeader" title="HTTP/2 Connection Preface">
+ <t>
+ Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
+ peers, each endpoint MUST send a connection preface as a final confirmation and to
+ establish the initial SETTINGS parameters for the HTTP/2 connection. The client and
+ server each send a different connection preface.
+ </t>
+ <t>
+ The client connection preface starts with a sequence of 24 octets, which in hex notation
+ are:
+ </t>
+ <figure>
+ <artwork type="inline" x:indent-with=" "><![CDATA[
+0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
+]]></artwork>
+ </figure>
+ <t>
+ (the string <spanx style="verb">PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n</spanx>). This sequence
+ is followed by a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>). The
+ <x:ref>SETTINGS</x:ref> frame MAY be empty. The client sends the client connection
+ preface immediately upon receipt of a 101 Switching Protocols response (indicating a
+ successful upgrade), or as the first application data octets of a TLS connection. If
+ starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
+ client connection preface is sent upon connection establishment.
+ </t>
+ <t>
+ <list>
+ <t>
+ The client connection preface is selected so that a large proportion of HTTP/1.1 or
+ HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note
+ that this does not address the concerns raised in <xref target="TALKING"/>.
+ </t>
+ </list>
+ </t>
+ <t>
+ The server connection preface consists of a potentially empty <x:ref>SETTINGS</x:ref>
+ frame (<xref target="SETTINGS"/>) that MUST be the first frame the server sends in the
+ HTTP/2 connection.
+ </t>
+ <t>
+ The <x:ref>SETTINGS</x:ref> frames received from a peer as part of the connection preface
+ MUST be acknowledged (see <xref target="SettingsSync"/>) after sending the connection
+ preface.
+ </t>
+ <t>
+ To avoid unnecessary latency, clients are permitted to send additional frames to the
+ server immediately after sending the client connection preface, without waiting to receive
+ the server connection preface. It is important to note, however, that the server
+ connection preface <x:ref>SETTINGS</x:ref> frame might include parameters that necessarily
+ alter how a client is expected to communicate with the server. Upon receiving the
+ <x:ref>SETTINGS</x:ref> frame, the client is expected to honor any parameters established.
+ In some configurations, it is possible for the server to transmit <x:ref>SETTINGS</x:ref>
+ before the client sends additional frames, providing an opportunity to avoid this issue.
+ </t>
+ <t>
+ Clients and servers MUST treat an invalid connection preface as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. A <x:ref>GOAWAY</x:ref> frame (<xref target="GOAWAY"/>)
+ MAY be omitted in this case, since an invalid preface indicates that the peer is not using
+ HTTP/2.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FramingLayer" title="HTTP Frames">
+ <t>
+ Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
+ </t>
+
+ <section anchor="FrameHeader" title="Frame Format">
+ <t>
+ All frames begin with a fixed 9-octet header followed by a variable-length payload.
+ </t>
+ <figure title="Frame Layout">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Length (24) |
+ +---------------+---------------+---------------+
+ | Type (8) | Flags (8) |
+ +-+-+-----------+---------------+-------------------------------+
+ |R| Stream Identifier (31) |
+ +=+=============================================================+
+ | Frame Payload (0...) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The fields of the frame header are defined as:
+ <list style="hanging">
+ <x:lt hangText="Length:">
+ <t>
+ The length of the frame payload expressed as an unsigned 24-bit integer. Values
+ greater than 2<x:sup>14</x:sup> (16,384) MUST NOT be sent unless the receiver has
+ set a larger value for <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref>.
+ </t>
+ <t>
+ The 9 octets of the frame header are not included in this value.
+ </t>
+ </x:lt>
+ <x:lt hangText="Type:">
+ <t>
+ The 8-bit type of the frame. The frame type determines the format and semantics of
+ the frame. Implementations MUST ignore and discard any frame that has a type that
+ is unknown.
+ </t>
+ </x:lt>
+ <x:lt hangText="Flags:">
+ <t>
+ An 8-bit field reserved for frame-type specific boolean flags.
+ </t>
+ <t>
+ Flags are assigned semantics specific to the indicated frame type. Flags that have
+ no defined semantics for a particular frame type MUST be ignored, and MUST be left
+ unset (0) when sending.
+ </t>
+ </x:lt>
+ <x:lt hangText="R:">
+ <t>
+ A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST
+ remain unset (0) when sending and MUST be ignored when receiving.
+ </t>
+ </x:lt>
+ <x:lt hangText="Stream Identifier:">
+ <t>
+ A 31-bit stream identifier (see <xref target="StreamIdentifiers"/>). The value 0 is
+ reserved for frames that are associated with the connection as a whole as opposed to
+ an individual stream.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ The structure and content of the frame payload is dependent entirely on the frame type.
+ </t>
+ </section>
+
+ <section anchor="FrameSize" title="Frame Size">
+ <t>
+ The size of a frame payload is limited by the maximum size that a receiver advertises in
+ the <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref> setting. This setting can have any value
+ between 2<x:sup>14</x:sup> (16,384) and 2<x:sup>24</x:sup>-1 (16,777,215) octets,
+ inclusive.
+ </t>
+ <t>
+ All implementations MUST be capable of receiving and minimally processing frames up to
+ 2<x:sup>14</x:sup> octets in length, plus the 9 octet <xref target="FrameHeader">frame
+ header</xref>. The size of the frame header is not included when describing frame sizes.
+ <list style="hanging">
+ <t hangText="Note:">
+ Certain frame types, such as <xref target="PING">PING</xref>, impose additional limits
+ on the amount of payload data allowed.
+ </t>
+ </list>
+ </t>
+ <t>
+ If a frame size exceeds any defined limit, or is too small to contain mandatory frame
+ data, the endpoint MUST send a <x:ref>FRAME_SIZE_ERROR</x:ref> error. A frame size error
+ in a frame that could alter the state of the entire connection MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref>; this includes any frame carrying
+ a <xref target="HeaderBlock">header block</xref> (that is, <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref>, and <x:ref>CONTINUATION</x:ref>), <x:ref>SETTINGS</x:ref>,
+ and any <x:ref>WINDOW_UPDATE</x:ref> frame with a stream identifier of 0.
+ </t>
+ <t>
+ Endpoints are not obligated to use all available space in a frame. Responsiveness can be
+ improved by using frames that are smaller than the permitted maximum size. Sending large
+ frames can result in delays in sending time-sensitive frames (such
+ <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>)
+ which if blocked by the transmission of a large frame, could affect performance.
+ </t>
+ </section>
+
+ <section anchor="HeaderBlock" title="Header Compression and Decompression">
+ <t>
+ Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
+ They are used within HTTP request and response messages as well as server push operations
+ (see <xref target="PushResources" />).
+ </t>
+ <t>
+ Header lists are collections of zero or more header fields. When transmitted over a
+ connection, a header list is serialized into a header block using <xref
+ target="COMPRESSION">HTTP Header Compression</xref>. The serialized header block is then
+ divided into one or more octet sequences, called header block fragments, and transmitted
+ within the payload of <xref target="HEADERS">HEADERS</xref>, <xref
+ target="PUSH_PROMISE">PUSH_PROMISE</xref> or <xref
+ target="CONTINUATION">CONTINUATION</xref> frames.
+ </t>
+ <t>
+ The <xref target="COOKIE">Cookie header field</xref> is treated specially by the HTTP
+ mapping (see <xref target="CompressCookie"/>).
+ </t>
+ <t>
+ A receiving endpoint reassembles the header block by concatenating its fragments, then
+ decompresses the block to reconstruct the header list.
+ </t>
+ <t>
+ A complete header block consists of either:
+ <list style="symbols">
+ <t>
+ a single <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame,
+ with the END_HEADERS flag set, or
+ </t>
+ <t>
+ a <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame with the END_HEADERS
+ flag cleared and one or more <x:ref>CONTINUATION</x:ref> frames,
+ where the last <x:ref>CONTINUATION</x:ref> frame has the END_HEADERS flag set.
+ </t>
+ </list>
+ </t>
+ <t>
+ Header compression is stateful. One compression context and one decompression context is
+ used for the entire connection. Each header block is processed as a discrete unit.
+ Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
+ frames of any other type or from any other stream. The last frame in a sequence of
+ <x:ref>HEADERS</x:ref> or <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS
+ flag set. The last frame in a sequence of <x:ref>PUSH_PROMISE</x:ref> or
+ <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS flag set. This allows a
+ header block to be logically equivalent to a single frame.
+ </t>
+ <t>
+ Header block fragments can only be sent as the payload of <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> or <x:ref>CONTINUATION</x:ref> frames, because these frames
+ carry data that can modify the compression context maintained by a receiver. An endpoint
+ receiving <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or
+ <x:ref>CONTINUATION</x:ref> frames MUST reassemble header blocks and perform decompression
+ even if the frames are to be discarded. A receiver MUST terminate the connection with a
+ <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>COMPRESSION_ERROR</x:ref> if it does not decompress a header block.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="StreamsLayer" title="Streams and Multiplexing">
+ <t>
+ A "stream" is an independent, bi-directional sequence of frames exchanged between the client
+ and server within an HTTP/2 connection. Streams have several important characteristics:
+ <list style="symbols">
+ <t>
+ A single HTTP/2 connection can contain multiple concurrently open streams, with either
+ endpoint interleaving frames from multiple streams.
+ </t>
+ <t>
+ Streams can be established and used unilaterally or shared by either the client or
+ server.
+ </t>
+ <t>
+ Streams can be closed by either endpoint.
+ </t>
+ <t>
+ The order in which frames are sent on a stream is significant. Recipients process frames
+ in the order they are received. In particular, the order of <x:ref>HEADERS</x:ref>,
+ and <x:ref>DATA</x:ref> frames is semantically significant.
+ </t>
+ <t>
+ Streams are identified by an integer. Stream identifiers are assigned to streams by the
+ endpoint initiating the stream.
+ </t>
+ </list>
+ </t>
+
+ <section anchor="StreamStates" title="Stream States">
+ <t>
+ The lifecycle of a stream is shown in <xref target="StreamStatesFigure"/>.
+ </t>
+
+ <figure anchor="StreamStatesFigure" title="Stream States">
+ <artwork type="drawing">
+ <![CDATA[
+ +--------+
+ PP | | PP
+ ,--------| idle |--------.
+ / | | \
+ v +--------+ v
+ +----------+ | +----------+
+ | | | H | |
+ ,---| reserved | | | reserved |---.
+ | | (local) | v | (remote) | |
+ | +----------+ +--------+ +----------+ |
+ | | ES | | ES | |
+ | | H ,-------| open |-------. | H |
+ | | / | | \ | |
+ | v v +--------+ v v |
+ | +----------+ | +----------+ |
+ | | half | | | half | |
+ | | closed | | R | closed | |
+ | | (remote) | | | (local) | |
+ | +----------+ | +----------+ |
+ | | v | |
+ | | ES / R +--------+ ES / R | |
+ | `----------->| |<-----------' |
+ | R | closed | R |
+ `-------------------->| |<--------------------'
+ +--------+
+
+ H: HEADERS frame (with implied CONTINUATIONs)
+ PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+ ES: END_STREAM flag
+ R: RST_STREAM frame
+]]>
+ </artwork>
+ </figure>
+
+ <t>
+ Note that this diagram shows stream state transitions and the frames and flags that affect
+ those transitions only. In this regard, <x:ref>CONTINUATION</x:ref> frames do not result
+ in state transitions; they are effectively part of the <x:ref>HEADERS</x:ref> or
+ <x:ref>PUSH_PROMISE</x:ref> that they follow. For this purpose, the END_STREAM flag is
+ processed as a separate event to the frame that bears it; a <x:ref>HEADERS</x:ref> frame
+ with the END_STREAM flag set can cause two state transitions.
+ </t>
+ <t>
+ Both endpoints have a subjective view of the state of a stream that could be different
+ when frames are in transit. Endpoints do not coordinate the creation of streams; they are
+ created unilaterally by either endpoint. The negative consequences of a mismatch in
+ states are limited to the "closed" state after sending <x:ref>RST_STREAM</x:ref>, where
+ frames might be received for some time after closing.
+ </t>
+ <t>
+ Streams have the following states:
+ <list style="hanging">
+
+ <x:lt hangText="idle:">
+ <t>
+ <vspace blankLines="0"/>
+ All streams start in the "idle" state. In this state, no frames have been
+ exchanged.
+ </t>
+ <t>
+ The following transitions are valid from this state:
+ <list style="symbols">
+ <t>
+ Sending or receiving a <x:ref>HEADERS</x:ref> frame causes the stream to become
+ "open". The stream identifier is selected as described in <xref
+ target="StreamIdentifiers"/>. The same <x:ref>HEADERS</x:ref> frame can also
+ cause a stream to immediately become "half closed".
+ </t>
+ <t>
+ Sending a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream for
+ later use. The stream state for the reserved stream transitions to "reserved
+ (local)".
+ </t>
+ <t>
+ Receiving a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream as
+ reserved by the remote peer. The state of the stream becomes "reserved
+ (remote)".
+ </t>
+ </list>
+ </t>
+ <t>
+ Receiving any frames other than <x:ref>HEADERS</x:ref> or
+ <x:ref>PUSH_PROMISE</x:ref> on a stream in this state MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="reserved (local):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "reserved (local)" state is one that has been promised by sending a
+ <x:ref>PUSH_PROMISE</x:ref> frame. A <x:ref>PUSH_PROMISE</x:ref> frame reserves an
+ idle stream by associating the stream with an open stream that was initiated by the
+ remote peer (see <xref target="PushResources"/>).
+ </t>
+ <t>
+ In this state, only the following transitions are possible:
+ <list style="symbols">
+ <t>
+ The endpoint can send a <x:ref>HEADERS</x:ref> frame. This causes the stream to
+ open in a "half closed (remote)" state.
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+ to become "closed". This releases the stream reservation.
+ </t>
+ </list>
+ </t>
+ <t>
+ An endpoint MUST NOT send any type of frame other than <x:ref>HEADERS</x:ref> or
+ <x:ref>RST_STREAM</x:ref> in this state.
+ </t>
+ <t>
+ A <x:ref>PRIORITY</x:ref> frame MAY be received in this state. Receiving any type
+ of frame other than <x:ref>RST_STREAM</x:ref> or <x:ref>PRIORITY</x:ref> on a stream
+ in this state MUST be treated as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="reserved (remote):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "reserved (remote)" state has been reserved by a remote peer.
+ </t>
+ <t>
+ In this state, only the following transitions are possible:
+ <list style="symbols">
+ <t>
+ Receiving a <x:ref>HEADERS</x:ref> frame causes the stream to transition to
+ "half closed (local)".
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+ to become "closed". This releases the stream reservation.
+ </t>
+ </list>
+ </t>
+ <t>
+ An endpoint MAY send a <x:ref>PRIORITY</x:ref> frame in this state to reprioritize
+ the reserved stream. An endpoint MUST NOT send any type of frame other than
+ <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>
+ in this state.
+ </t>
+ <t>
+ Receiving any type of frame other than <x:ref>HEADERS</x:ref> or
+ <x:ref>RST_STREAM</x:ref> on a stream in this state MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="open:">
+ <t>
+ <vspace blankLines="0"/>
+ A stream in the "open" state may be used by both peers to send frames of any type.
+ In this state, sending peers observe advertised <xref target="FlowControl">stream
+ level flow control limits</xref>.
+ </t>
+ <t>
+ From this state either endpoint can send a frame with an END_STREAM flag set, which
+ causes the stream to transition into one of the "half closed" states: an endpoint
+ sending an END_STREAM flag causes the stream state to become "half closed (local)";
+ an endpoint receiving an END_STREAM flag causes the stream state to become "half
+ closed (remote)".
+ </t>
+ <t>
+ Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame from this state, causing
+ it to transition immediately to "closed".
+ </t>
+ </x:lt>
+
+ <x:lt hangText="half closed (local):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream that is in the "half closed (local)" state cannot be used for sending
+ frames. Only <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> and
+ <x:ref>RST_STREAM</x:ref> frames can be sent in this state.
+ </t>
+ <t>
+ A stream transitions from this state to "closed" when a frame that contains an
+ END_STREAM flag is received, or when either peer sends a <x:ref>RST_STREAM</x:ref>
+ frame.
+ </t>
+ <t>
+ A receiver can ignore <x:ref>WINDOW_UPDATE</x:ref> frames in this state, which might
+ arrive for a short period after a frame bearing the END_STREAM flag is sent.
+ </t>
+ <t>
+ <x:ref>PRIORITY</x:ref> frames received in this state are used to reprioritize
+ streams that depend on the current stream.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="half closed (remote):">
+ <t>
+ <vspace blankLines="0"/>
+ A stream that is "half closed (remote)" is no longer being used by the peer to send
+ frames. In this state, an endpoint is no longer obligated to maintain a receiver
+ flow control window if it performs flow control.
+ </t>
+ <t>
+ If an endpoint receives additional frames for a stream that is in this state, other
+ than <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> or
+ <x:ref>RST_STREAM</x:ref>, it MUST respond with a <xref
+ target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>.
+ </t>
+ <t>
+ A stream that is "half closed (remote)" can be used by the endpoint to send frames
+ of any type. In this state, the endpoint continues to observe advertised <xref
+ target="FlowControl">stream level flow control limits</xref>.
+ </t>
+ <t>
+ A stream can transition from this state to "closed" by sending a frame that contains
+ an END_STREAM flag, or when either peer sends a <x:ref>RST_STREAM</x:ref> frame.
+ </t>
+ </x:lt>
+
+ <x:lt hangText="closed:">
+ <t>
+ <vspace blankLines="0"/>
+ The "closed" state is the terminal state.
+ </t>
+ <t>
+ An endpoint MUST NOT send frames other than <x:ref>PRIORITY</x:ref> on a closed
+ stream. An endpoint that receives any frame other than <x:ref>PRIORITY</x:ref>
+ after receiving a <x:ref>RST_STREAM</x:ref> MUST treat that as a <xref
+ target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>. Similarly, an endpoint that receives any frames after
+ receiving a frame with the END_STREAM flag set MUST treat that as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>, unless the frame is permitted as described below.
+ </t>
+ <t>
+ <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames can be received in
+ this state for a short period after a <x:ref>DATA</x:ref> or <x:ref>HEADERS</x:ref>
+ frame containing an END_STREAM flag is sent. Until the remote peer receives and
+ processes <x:ref>RST_STREAM</x:ref> or the frame bearing the END_STREAM flag, it
+ might send frames of these types. Endpoints MUST ignore
+ <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames received in this
+ state, though endpoints MAY choose to treat frames that arrive a significant time
+ after sending END_STREAM as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ <x:ref>PRIORITY</x:ref> frames can be sent on closed streams to prioritize streams
+ that are dependent on the closed stream. Endpoints SHOULD process
+ <x:ref>PRIORITY</x:ref> frame, though they can be ignored if the stream has been
+ removed from the dependency tree (see <xref target="priority-gc"/>).
+ </t>
+ <t>
+ If this state is reached as a result of sending a <x:ref>RST_STREAM</x:ref> frame,
+ the peer that receives the <x:ref>RST_STREAM</x:ref> might have already sent - or
+ enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint
+ MUST ignore frames that it receives on closed streams after it has sent a
+ <x:ref>RST_STREAM</x:ref> frame. An endpoint MAY choose to limit the period over
+ which it ignores frames and treat frames that arrive after this time as being in
+ error.
+ </t>
+ <t>
+ Flow controlled frames (i.e., <x:ref>DATA</x:ref>) received after sending
+ <x:ref>RST_STREAM</x:ref> are counted toward the connection flow control window.
+ Even though these frames might be ignored, because they are sent before the sender
+ receives the <x:ref>RST_STREAM</x:ref>, the sender will consider the frames to count
+ against the flow control window.
+ </t>
+ <t>
+ An endpoint might receive a <x:ref>PUSH_PROMISE</x:ref> frame after it sends
+ <x:ref>RST_STREAM</x:ref>. <x:ref>PUSH_PROMISE</x:ref> causes a stream to become
+ "reserved" even if the associated stream has been reset. Therefore, a
+ <x:ref>RST_STREAM</x:ref> is needed to close an unwanted promised stream.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ In the absence of more specific guidance elsewhere in this document, implementations
+ SHOULD treat the receipt of a frame that is not expressly permitted in the description of
+ a state as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. Frame of unknown types are ignored.
+ </t>
+ <t>
+ An example of the state transitions for an HTTP request/response exchange can be found in
+ <xref target="HttpSequence"/>. An example of the state transitions for server push can be
+ found in <xref target="PushRequests"/> and <xref target="PushResponses"/>.
+ </t>
+
+ <section anchor="StreamIdentifiers" title="Stream Identifiers">
+ <t>
+ Streams are identified with an unsigned 31-bit integer. Streams initiated by a client
+ MUST use odd-numbered stream identifiers; those initiated by the server MUST use
+ even-numbered stream identifiers. A stream identifier of zero (0x0) is used for
+ connection control messages; the stream identifier zero cannot be used to establish a
+ new stream.
+ </t>
+ <t>
+ HTTP/1.1 requests that are upgraded to HTTP/2 (see <xref target="discover-http"/>) are
+ responded to with a stream identifier of one (0x1). After the upgrade
+ completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1
+ cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
+ </t>
+ <t>
+ The identifier of a newly established stream MUST be numerically greater than all
+ streams that the initiating endpoint has opened or reserved. This governs streams that
+ are opened using a <x:ref>HEADERS</x:ref> frame and streams that are reserved using
+ <x:ref>PUSH_PROMISE</x:ref>. An endpoint that receives an unexpected stream identifier
+ MUST respond with a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The first use of a new stream identifier implicitly closes all streams in the "idle"
+ state that might have been initiated by that peer with a lower-valued stream identifier.
+ For example, if a client sends a <x:ref>HEADERS</x:ref> frame on stream 7 without ever
+ sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
+ first frame for stream 7 is sent or received.
+ </t>
+ <t>
+ Stream identifiers cannot be reused. Long-lived connections can result in an endpoint
+ exhausting the available range of stream identifiers. A client that is unable to
+ establish a new stream identifier can establish a new connection for new streams. A
+ server that is unable to establish a new stream identifier can send a
+ <x:ref>GOAWAY</x:ref> frame so that the client is forced to open a new connection for
+ new streams.
+ </t>
+ </section>
+
+ <section title="Stream Concurrency">
+ <t>
+ A peer can limit the number of concurrently active streams using the
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> parameter (see <xref
+ target="SettingValues"/>) within a <x:ref>SETTINGS</x:ref> frame. The maximum concurrent
+ streams setting is specific to each endpoint and applies only to the peer that receives
+ the setting. That is, clients specify the maximum number of concurrent streams the
+ server can initiate, and servers specify the maximum number of concurrent streams the
+ client can initiate.
+ </t>
+ <t>
+ Streams that are in the "open" state, or either of the "half closed" states count toward
+ the maximum number of streams that an endpoint is permitted to open. Streams in any of
+ these three states count toward the limit advertised in the
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting. Streams in either of the
+ "reserved" states do not count toward the stream limit.
+ </t>
+ <t>
+ Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a
+ <x:ref>HEADERS</x:ref> frame that causes their advertised concurrent stream limit to be
+ exceeded MUST treat this as a <xref target="StreamErrorHandler">stream error</xref>. An
+ endpoint that wishes to reduce the value of
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> to a value that is below the current
+ number of open streams can either close streams that exceed the new value or allow
+ streams to complete.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FlowControl" title="Flow Control">
+ <t>
+ Using streams for multiplexing introduces contention over use of the TCP connection,
+ resulting in blocked streams. A flow control scheme ensures that streams on the same
+ connection do not destructively interfere with each other. Flow control is used for both
+ individual streams and for the connection as a whole.
+ </t>
+ <t>
+ HTTP/2 provides for flow control through use of the <xref
+ target="WINDOW_UPDATE">WINDOW_UPDATE frame</xref>.
+ </t>
+
+ <section anchor="fc-principles" title="Flow Control Principles">
+ <t>
+ HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
+ used without requiring protocol changes. Flow control in HTTP/2 has the following
+ characteristics:
+ <list style="numbers">
+ <t>
+ Flow control is specific to a connection; i.e., it is "hop-by-hop", not
+ "end-to-end".
+ </t>
+ <t>
+ Flow control is based on window update frames. Receivers advertise how many octets
+ they are prepared to receive on a stream and for the entire connection. This is a
+ credit-based scheme.
+ </t>
+ <t>
+ Flow control is directional with overall control provided by the receiver. A
+ receiver MAY choose to set any window size that it desires for each stream and for
+ the entire connection. A sender MUST respect flow control limits imposed by a
+ receiver. Clients, servers and intermediaries all independently advertise their
+ flow control window as a receiver and abide by the flow control limits set by
+ their peer when sending.
+ </t>
+ <t>
+ The initial value for the flow control window is 65,535 octets for both new streams
+ and the overall connection.
+ </t>
+ <t>
+ The frame type determines whether flow control applies to a frame. Of the frames
+ specified in this document, only <x:ref>DATA</x:ref> frames are subject to flow
+ control; all other frame types do not consume space in the advertised flow control
+ window. This ensures that important control frames are not blocked by flow control.
+ </t>
+ <t>
+ Flow control cannot be disabled.
+ </t>
+ <t>
+ HTTP/2 defines only the format and semantics of the <x:ref>WINDOW_UPDATE</x:ref>
+ frame (<xref target="WINDOW_UPDATE"/>). This document does not stipulate how a
+ receiver decides when to send this frame or the value that it sends, nor does it
+ specify how a sender chooses to send packets. Implementations are able to select
+ any algorithm that suits their needs.
+ </t>
+ </list>
+ </t>
+ <t>
+ Implementations are also responsible for managing how requests and responses are sent
+ based on priority; choosing how to avoid head of line blocking for requests; and
+ managing the creation of new streams. Algorithm choices for these could interact with
+ any flow control algorithm.
+ </t>
+ </section>
+
+ <section anchor="DisableFlowControl" title="Appropriate Use of Flow Control">
+ <t>
+ Flow control is defined to protect endpoints that are operating under resource
+ constraints. For example, a proxy needs to share memory between many connections, and
+ also might have a slow upstream connection and a fast downstream one. Flow control
+ addresses cases where the receiver is unable process data on one stream, yet wants to
+ continue to process other streams in the same connection.
+ </t>
+ <t>
+ Deployments that do not require this capability can advertise a flow control window of
+ the maximum size, incrementing the available space when new data is received. This
+ effectively disables flow control for that receiver. Conversely, a sender is always
+ subject to the flow control window advertised by the receiver.
+ </t>
+ <t>
+ Deployments with constrained resources (for example, memory) can employ flow control to
+ limit the amount of memory a peer can consume. Note, however, that this can lead to
+ suboptimal use of available network resources if flow control is enabled without
+ knowledge of the bandwidth-delay product (see <xref target="RFC1323"/>).
+ </t>
+ <t>
+ Even with full awareness of the current bandwidth-delay product, implementation of flow
+ control can be difficult. When using flow control, the receiver MUST read from the TCP
+ receive buffer in a timely fashion. Failure to do so could lead to a deadlock when
+ critical frames, such as <x:ref>WINDOW_UPDATE</x:ref>, are not read and acted upon.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="StreamPriority" title="Stream priority">
+ <t>
+ A client can assign a priority for a new stream by including prioritization information in
+ the <xref target="HEADERS">HEADERS frame</xref> that opens the stream. For an existing
+ stream, the <xref target="PRIORITY">PRIORITY frame</xref> can be used to change the
+ priority.
+ </t>
+ <t>
+ The purpose of prioritization is to allow an endpoint to express how it would prefer its
+ peer allocate resources when managing concurrent streams. Most importantly, priority can
+ be used to select streams for transmitting frames when there is limited capacity for
+ sending.
+ </t>
+ <t>
+ Streams can be prioritized by marking them as dependent on the completion of other streams
+ (<xref target="pri-depend"/>). Each dependency is assigned a relative weight, a number
+ that is used to determine the relative proportion of available resources that are assigned
+ to streams dependent on the same stream.
+ </t>
+ <!--
+ Note that stream dependencies have not yet been validated in practice. The theory
+ might be fairly sound, but there are no implementations currently sending these. If it
+ turns out that they are not useful, or actively harmful, implementations will be requested
+ to avoid creating stream dependencies.
+ -->
+ <t>
+ Explicitly setting the priority for a stream is input to a prioritization process. It
+ does not guarantee any particular processing or transmission order for the stream relative
+ to any other stream. An endpoint cannot force a peer to process concurrent streams in a
+ particular order using priority. Expressing priority is therefore only ever a suggestion.
+ </t>
+ <t>
+ Providing prioritization information is optional, so default values are used if no
+ explicit indicator is provided (<xref target="pri-default"/>).
+ </t>
+
+ <section title="Stream Dependencies" anchor="pri-depend">
+ <t>
+ Each stream can be given an explicit dependency on another stream. Including a
+ dependency expresses a preference to allocate resources to the identified stream rather
+ than to the dependent stream.
+ </t>
+ <t>
+ A stream that is not dependent on any other stream is given a stream dependency of 0x0.
+ In other words, the non-existent stream 0 forms the root of the tree.
+ </t>
+ <t>
+ A stream that depends on another stream is a dependent stream. The stream upon which a
+ stream is dependent is a parent stream. A dependency on a stream that is not currently
+ in the tree - such as a stream in the "idle" state - results in that stream being given
+ a <xref target="pri-default">default priority</xref>.
+ </t>
+ <t>
+ When assigning a dependency on another stream, the stream is added as a new dependency
+ of the parent stream. Dependent streams that share the same parent are not ordered with
+ respect to each other. For example, if streams B and C are dependent on stream A, and
+ if stream D is created with a dependency on stream A, this results in a dependency order
+ of A followed by B, C, and D in any order.
+ </t>
+ <figure title="Example of Default Dependency Creation">
+ <artwork type="inline"><![CDATA[
+ A A
+ / \ ==> /|\
+ B C B D C
+]]></artwork>
+ </figure>
+ <t>
+ An exclusive flag allows for the insertion of a new level of dependencies. The
+ exclusive flag causes the stream to become the sole dependency of its parent stream,
+ causing other dependencies to become dependent on the exclusive stream. In the
+ previous example, if stream D is created with an exclusive dependency on stream A, this
+ results in D becoming the dependency parent of B and C.
+ </t>
+ <figure title="Example of Exclusive Dependency Creation">
+ <artwork type="inline"><![CDATA[
+ A
+ A |
+ / \ ==> D
+ B C / \
+ B C
+]]></artwork>
+ </figure>
+ <t>
+ Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
+ of the streams that it depends on (the chain of parent streams up to 0x0) are either
+ closed, or it is not possible to make progress on them.
+ </t>
+ <t>
+ A stream cannot depend on itself. An endpoint MUST treat this as a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section title="Dependency Weighting">
+ <t>
+ All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
+ </t>
+ <t>
+ Streams with the same parent SHOULD be allocated resources proportionally based on their
+ weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A
+ with weight 12, and if no progress can be made on A, stream B ideally receives one third
+ of the resources allocated to stream C.
+ </t>
+ </section>
+
+ <section anchor="reprioritize" title="Reprioritization">
+ <t>
+ Stream priorities are changed using the <x:ref>PRIORITY</x:ref> frame. Setting a
+ dependency causes a stream to become dependent on the identified parent stream.
+ </t>
+ <t>
+ Dependent streams move with their parent stream if the parent is reprioritized. Setting
+ a dependency with the exclusive flag for a reprioritized stream moves all the
+ dependencies of the new parent stream to become dependent on the reprioritized stream.
+ </t>
+ <t>
+ If a stream is made dependent on one of its own dependencies, the formerly dependent
+ stream is first moved to be dependent on the reprioritized stream's previous parent.
+ The moved dependency retains its weight.
+ </t>
+ <figure title="Example of Dependency Reordering">
+ <preamble>
+ For example, consider an original dependency tree where B and C depend on A, D and E
+ depend on C, and F depends on D. If A is made dependent on D, then D takes the place
+ of A. All other dependency relationships stay the same, except for F, which becomes
+ dependent on A if the reprioritization is exclusive.
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ ? ? ? ?
+ | / \ | |
+ A D A D D
+ / \ / / \ / \ |
+ B C ==> F B C ==> F A OR A
+ / \ | / \ /|\
+ D E E B C B C F
+ | | |
+ F E E
+ (intermediate) (non-exclusive) (exclusive)
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="priority-gc" title="Prioritization State Management">
+ <t>
+ When a stream is removed from the dependency tree, its dependencies can be moved to
+ become dependent on the parent of the closed stream. The weights of new dependencies
+ are recalculated by distributing the weight of the dependency of the closed stream
+ proportionally based on the weights of its dependencies.
+ </t>
+ <t>
+ Streams that are removed from the dependency tree cause some prioritization information
+ to be lost. Resources are shared between streams with the same parent stream, which
+ means that if a stream in that set closes or becomes blocked, any spare capacity
+ allocated to a stream is distributed to the immediate neighbors of the stream. However,
+ if the common dependency is removed from the tree, those streams share resources with
+ streams at the next highest level.
+ </t>
+ <t>
+ For example, assume streams A and B share a parent, and streams C and D both depend on
+ stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
+ then stream C receives all the resources dedicated to stream A. If stream A is removed
+ from the tree, the weight of stream A is divided between streams C and D. If stream D
+ is still unable to proceed, this results in stream C receiving a reduced proportion of
+ resources. For equal starting weights, C receives one third, rather than one half, of
+ available resources.
+ </t>
+ <t>
+ It is possible for a stream to become closed while prioritization information that
+ creates a dependency on that stream is in transit. If a stream identified in a
+ dependency has no associated priority information, then the dependent stream is instead
+ assigned a <xref target="pri-default">default priority</xref>. This potentially creates
+ suboptimal prioritization, since the stream could be given a priority that is different
+ to what is intended.
+ </t>
+ <t>
+ To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
+ period after streams become closed. The longer state is retained, the lower the chance
+ that streams are assigned incorrect or default priority values.
+ </t>
+ <t>
+ This could create a large state burden for an endpoint, so this state MAY be limited.
+ An endpoint MAY apply a fixed upper limit on the number of closed streams for which
+ prioritization state is tracked to limit state exposure. The amount of additional state
+ an endpoint maintains could be dependent on load; under high load, prioritization state
+ can be discarded to limit resource commitments. In extreme cases, an endpoint could
+ even discard prioritization state for active or reserved streams. If a fixed limit is
+ applied, endpoints SHOULD maintain state for at least as many streams as allowed by
+ their setting for <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref>.
+ </t>
+ <t>
+ An endpoint receiving a <x:ref>PRIORITY</x:ref> frame that changes the priority of a
+ closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
+ retained enough state to do so.
+ </t>
+ </section>
+
+ <section title="Default Priorities" anchor="pri-default">
+ <t>
+ Providing priority information is optional. Streams are assigned a non-exclusive
+ dependency on stream 0x0 by default. <xref target="PushResources">Pushed streams</xref>
+ initially depend on their associated stream. In both cases, streams are assigned a
+ default weight of 16.
+ </t>
+ </section>
+ </section>
+
+ <section title="Error Handling">
+ <t>
+ HTTP/2 framing permits two classes of error:
+ <list style="symbols">
+ <t>
+ An error condition that renders the entire connection unusable is a connection error.
+ </t>
+ <t>
+ An error in an individual stream is a stream error.
+ </t>
+ </list>
+ </t>
+ <t>
+ A list of error codes is included in <xref target="ErrorCodes"/>.
+ </t>
+
+ <section anchor="ConnectionErrorHandler" title="Connection Error Handling">
+ <t>
+ A connection error is any error which prevents further processing of the framing layer,
+ or which corrupts any connection state.
+ </t>
+ <t>
+ An endpoint that encounters a connection error SHOULD first send a <x:ref>GOAWAY</x:ref>
+ frame (<xref target="GOAWAY"/>) with the stream identifier of the last stream that it
+ successfully received from its peer. The <x:ref>GOAWAY</x:ref> frame includes an error
+ code that indicates why the connection is terminating. After sending the
+ <x:ref>GOAWAY</x:ref> frame, the endpoint MUST close the TCP connection.
+ </t>
+ <t>
+ It is possible that the <x:ref>GOAWAY</x:ref> will not be reliably received by the
+ receiving endpoint (see <xref target="RFC7230" x:fmt=","
+ x:rel="#persistent.tear-down"/>). In the event of a connection error,
+ <x:ref>GOAWAY</x:ref> only provides a best effort attempt to communicate with the peer
+ about why the connection is being terminated.
+ </t>
+ <t>
+ An endpoint can end a connection at any time. In particular, an endpoint MAY choose to
+ treat a stream error as a connection error. Endpoints SHOULD send a
+ <x:ref>GOAWAY</x:ref> frame when ending a connection, providing that circumstances
+ permit it.
+ </t>
+ </section>
+
+ <section anchor="StreamErrorHandler" title="Stream Error Handling">
+ <t>
+ A stream error is an error related to a specific stream that does not affect processing
+ of other streams.
+ </t>
+ <t>
+ An endpoint that detects a stream error sends a <x:ref>RST_STREAM</x:ref> frame (<xref
+ target="RST_STREAM"/>) that contains the stream identifier of the stream where the error
+ occurred. The <x:ref>RST_STREAM</x:ref> frame includes an error code that indicates the
+ type of error.
+ </t>
+ <t>
+ A <x:ref>RST_STREAM</x:ref> is the last frame that an endpoint can send on a stream.
+ The peer that sends the <x:ref>RST_STREAM</x:ref> frame MUST be prepared to receive any
+ frames that were sent or enqueued for sending by the remote peer. These frames can be
+ ignored, except where they modify connection state (such as the state maintained for
+ <xref target="HeaderBlock">header compression</xref>, or flow control).
+ </t>
+ <t>
+ Normally, an endpoint SHOULD NOT send more than one <x:ref>RST_STREAM</x:ref> frame for
+ any stream. However, an endpoint MAY send additional <x:ref>RST_STREAM</x:ref> frames if
+ it receives frames on a closed stream after more than a round-trip time. This behavior
+ is permitted to deal with misbehaving implementations.
+ </t>
+ <t>
+ An endpoint MUST NOT send a <x:ref>RST_STREAM</x:ref> in response to an
+ <x:ref>RST_STREAM</x:ref> frame, to avoid looping.
+ </t>
+ </section>
+
+ <section title="Connection Termination">
+ <t>
+ If the TCP connection is closed or reset while streams remain in open or half closed
+ states, then the endpoint MUST assume that those streams were abnormally interrupted and
+ could be incomplete.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="extensibility" title="Extending HTTP/2">
+ <t>
+ HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide
+ additional services or alter any aspect of the protocol, within the limitations described
+ in this section. Extensions are effective only within the scope of a single HTTP/2
+ connection.
+ </t>
+ <t>
+ Extensions are permitted to use new <xref target="FrameHeader">frame types</xref>, new
+ <xref target="SettingValues">settings</xref>, or new <xref target="ErrorCodes">error
+ codes</xref>. Registries are established for managing these extension points: <xref
+ target="iana-frames">frame types</xref>, <xref target="iana-settings">settings</xref> and
+ <xref target="iana-errors">error codes</xref>.
+ </t>
+ <t>
+ Implementations MUST ignore unknown or unsupported values in all extensible protocol
+ elements. Implementations MUST discard frames that have unknown or unsupported types.
+ This means that any of these extension points can be safely used by extensions without
+ prior arrangement or negotiation. However, extension frames that appear in the middle of
+ a <xref target="HeaderBlock">header block</xref> are not permitted; these MUST be treated
+ as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ However, extensions that could change the semantics of existing protocol components MUST
+ be negotiated before being used. For example, an extension that changes the layout of the
+ <x:ref>HEADERS</x:ref> frame cannot be used until the peer has given a positive signal
+ that this is acceptable. In this case, it could also be necessary to coordinate when the
+ revised layout comes into effect. Note that treating any frame other than
+ <x:ref>DATA</x:ref> frames as flow controlled is such a change in semantics, and can only
+ be done through negotiation.
+ </t>
+ <t>
+ This document doesn't mandate a specific method for negotiating the use of an extension,
+ but notes that a <xref target="SettingValues">setting</xref> could be used for that
+ purpose. If both peers set a value that indicates willingness to use the extension, then
+ the extension can be used. If a setting is used for extension negotiation, the initial
+ value MUST be defined so that the extension is initially disabled.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="FrameTypes" title="Frame Definitions">
+ <t>
+ This specification defines a number of frame types, each identified by a unique 8-bit type
+ code. Each frame type serves a distinct purpose either in the establishment and management
+ of the connection as a whole, or of individual streams.
+ </t>
+ <t>
+ The transmission of specific frame types can alter the state of a connection. If endpoints
+ fail to maintain a synchronized view of the connection state, successful communication
+ within the connection will no longer be possible. Therefore, it is important that endpoints
+ have a shared comprehension of how the state is affected by the use any given frame.
+ </t>
+
+ <section anchor="DATA" title="DATA">
+ <t>
+ DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
+ with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
+ response payloads.
+ </t>
+ <t>
+ DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to
+ obscure the size of messages.
+ </t>
+ <figure title="DATA Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +---------------+-----------------------------------------------+
+ | Data (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The DATA frame contains the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is optional and is only present if the PADDED flag is set.
+ </t>
+ <t hangText="Data:">
+ Application data. The amount of data is the remainder of the frame payload after
+ subtracting the length of the other fields that are present.
+ </t>
+ <t hangText="Padding:">
+ Padding octets that contain no application semantic value. Padding octets MUST be set
+ to zero when sending and ignored when receiving.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The DATA frame defines the following flags:
+ <list style="hanging">
+ <t hangText="END_STREAM (0x1):">
+ Bit 1 being set indicates that this frame is the last that the endpoint will send for
+ the identified stream. Setting this flag causes the stream to enter one of <xref
+ target="StreamStates">the "half closed" states or the "closed" state</xref>.
+ </t>
+ <t hangText="PADDED (0x8):">
+ Bit 4 being set indicates that the Pad Length field and any padding that it describes
+ is present.
+ </t>
+ </list>
+ </t>
+ <t>
+ DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
+ identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ DATA frames are subject to flow control and can only be sent when a stream is in the
+ "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
+ control, including Pad Length and Padding fields if present. If a DATA frame is received
+ whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
+ with a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>STREAM_CLOSED</x:ref>.
+ </t>
+ <t>
+ The total number of padding octets is determined by the value of the Pad Length field. If
+ the length of the padding is greater than the length of the frame payload, the recipient
+ MUST treat this as a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>PROTOCOL_ERROR</x:ref>.
+ <list style="hanging">
+ <t hangText="Note:">
+ A frame can be increased in size by one octet by including a Pad Length field with a
+ value of zero.
+ </t>
+ </list>
+ </t>
+ <t>
+ Padding is a security feature; see <xref target="padding"/>.
+ </t>
+ </section>
+
+ <section anchor="HEADERS" title="HEADERS">
+ <t>
+ The HEADERS frame (type=0x1) is used to <xref target="StreamStates">open a stream</xref>,
+ and additionally carries a header block fragment. HEADERS frames can be sent on a stream
+ in the "open" or "half closed (remote)" states.
+ </t>
+ <figure title="HEADERS Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |E| Stream Dependency? (31) |
+ +-+-------------+-----------------------------------------------+
+ | Weight? (8) |
+ +-+-------------+-----------------------------------------------+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The HEADERS frame payload has the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is only present if the PADDED flag is set.
+ </t>
+ <t hangText="E:">
+ A single bit flag indicates that the stream dependency is exclusive, see <xref
+ target="StreamPriority"/>. This field is only present if the PRIORITY flag is set.
+ </t>
+ <t hangText="Stream Dependency:">
+ A 31-bit stream identifier for the stream that this stream depends on, see <xref
+ target="StreamPriority"/>. This field is only present if the PRIORITY flag is set.
+ </t>
+ <t hangText="Weight:">
+ An 8-bit weight for the stream, see <xref target="StreamPriority"/>. Add one to the
+ value to obtain a weight between 1 and 256. This field is only present if the
+ PRIORITY flag is set.
+ </t>
+ <t hangText="Header Block Fragment:">
+ A <xref target="HeaderBlock">header block fragment</xref>.
+ </t>
+ <t hangText="Padding:">
+ Padding octets that contain no application semantic value. Padding octets MUST be set
+ to zero when sending and ignored when receiving.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The HEADERS frame defines the following flags:
+ <list style="hanging">
+ <x:lt hangText="END_STREAM (0x1):">
+ <t>
+ Bit 1 being set indicates that the <xref target="HeaderBlock">header block</xref> is
+ the last that the endpoint will send for the identified stream. Setting this flag
+ causes the stream to enter one of <xref target="StreamStates">"half closed"
+ states</xref>.
+ </t>
+ <t>
+ A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
+ However, a HEADERS frame with the END_STREAM flag set can be followed by
+ <x:ref>CONTINUATION</x:ref> frames on the same stream. Logically, the
+ <x:ref>CONTINUATION</x:ref> frames are part of the HEADERS frame.
+ </t>
+ </x:lt>
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame contains an entire <xref
+ target="HeaderBlock">header block</xref> and is not followed by any
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ A HEADERS frame without the END_HEADERS flag set MUST be followed by a
+ <x:ref>CONTINUATION</x:ref> frame for the same stream. A receiver MUST treat the
+ receipt of any other type of frame or a frame on a different stream as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="PADDED (0x8):">
+ <t>
+ Bit 4 being set indicates that the Pad Length field and any padding that it
+ describes is present.
+ </t>
+ </x:lt>
+ <x:lt hangText="PRIORITY (0x20):">
+ <t>
+ Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
+ fields are present; see <xref target="StreamPriority"/>.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ The payload of a HEADERS frame contains a <xref target="HeaderBlock">header block
+ fragment</xref>. A header block that does not fit within a HEADERS frame is continued in
+ a <xref target="CONTINUATION">CONTINUATION frame</xref>.
+ </t>
+
+ <t>
+ HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
+ stream identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The HEADERS frame changes the connection state as described in <xref
+ target="HeaderBlock"/>.
+ </t>
+
+ <t>
+ The HEADERS frame includes optional padding. Padding fields and flags are identical to
+ those defined for <xref target="DATA">DATA frames</xref>.
+ </t>
+ <t>
+ Prioritization information in a HEADERS frame is logically equivalent to a separate
+ <x:ref>PRIORITY</x:ref> frame, but inclusion in HEADERS avoids the potential for churn in
+ stream prioritization when new streams are created. Priorization fields in HEADERS frames
+ subsequent to the first on a stream <xref target="reprioritize">reprioritize the
+ stream</xref>.
+ </t>
+ </section>
+
+ <section anchor="PRIORITY" title="PRIORITY">
+ <t>
+ The PRIORITY frame (type=0x2) specifies the <xref target="StreamPriority">sender-advised
+ priority of a stream</xref>. It can be sent at any time for an existing stream, including
+ closed streams. This enables reprioritization of existing streams.
+ </t>
+ <figure title="PRIORITY Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |E| Stream Dependency (31) |
+ +-+-------------+-----------------------------------------------+
+ | Weight (8) |
+ +-+-------------+
+]]></artwork>
+ </figure>
+ <t>
+ The payload of a PRIORITY frame contains the following fields:
+ <list style="hanging">
+ <t hangText="E:">
+ A single bit flag indicates that the stream dependency is exclusive, see <xref
+ target="StreamPriority"/>.
+ </t>
+ <t hangText="Stream Dependency:">
+ A 31-bit stream identifier for the stream that this stream depends on, see <xref
+ target="StreamPriority"/>.
+ </t>
+ <t hangText="Weight:">
+ An 8-bit weight for the identified stream dependency, see <xref
+ target="StreamPriority"/>. Add one to the value to obtain a weight between 1 and 256.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The PRIORITY frame does not define any flags.
+ </t>
+
+ <t>
+ The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
+ with a stream identifier of 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
+ "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
+ sent between consecutive frames that comprise a single <xref target="HeaderBlock">header
+ block</xref>. Note that this frame could arrive after processing or frame sending has
+ completed, which would cause it to have no effect on the current stream. For a stream
+ that is in the "half closed (remote)" or "closed" - state, this frame can only affect
+ processing of the current stream and not frame transmission.
+ </t>
+ <t>
+ The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
+ This allows for the reprioritization of a group of dependent streams by altering the
+ priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a
+ closed stream risks being ignored due to the peer having discarded priority state
+ information for that stream.
+ </t>
+ </section>
+
+ <section anchor="RST_STREAM" title="RST_STREAM">
+ <t>
+ The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by
+ the initiator of a stream, it indicates that they wish to cancel the stream or that an
+ error condition has occurred. When sent by the receiver of a stream, it indicates that
+ either the receiver is rejecting the stream, requesting that the stream be cancelled, or
+ that an error condition has occurred.
+ </t>
+ <figure title="RST_STREAM Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Error Code (32) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+
+ <t>
+ The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the <xref
+ target="ErrorCodes">error code</xref>. The error code indicates why the stream is being
+ terminated.
+ </t>
+
+ <t>
+ The RST_STREAM frame does not define any flags.
+ </t>
+
+ <t>
+ The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
+ closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
+ additional frames for that stream, with the exception of <x:ref>PRIORITY</x:ref>. However,
+ after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
+ additional frames sent on the stream that might have been sent by the peer prior to the
+ arrival of the RST_STREAM.
+ </t>
+
+ <t>
+ RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received
+ with a stream identifier of 0x0, the recipient MUST treat this as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM
+ frame identifying an idle stream is received, the recipient MUST treat this as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ </section>
+
+ <section anchor="SETTINGS" title="SETTINGS">
+ <t>
+ The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
+ communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is
+ also used to acknowledge the receipt of those parameters. Individually, a SETTINGS
+ parameter can also be referred to as a "setting".
+ </t>
+ <t>
+ SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
+ which are used by the receiving peer. Different values for the same parameter can be
+ advertised by each peer. For example, a client might set a high initial flow control
+ window, whereas a server might set a lower value to conserve resources.
+ </t>
+
+ <t>
+ A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
+ sent at any other time by either endpoint over the lifetime of the connection.
+ Implementations MUST support all of the parameters defined by this specification.
+ </t>
+
+ <t>
+ Each parameter in a SETTINGS frame replaces any existing value for that parameter.
+ Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
+ frame does not need to maintain any state other than the current value of its
+ parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
+ a receiver.
+ </t>
+ <t>
+ SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
+ frame defines the following flag:
+ <list style="hanging">
+ <t hangText="ACK (0x1):">
+ Bit 1 being set indicates that this frame acknowledges receipt and application of the
+ peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST
+ be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value
+ other than 0 MUST be treated as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>FRAME_SIZE_ERROR</x:ref>. For more info, see <xref
+ target="SettingsSync">Settings Synchronization</xref>.
+ </t>
+ </list>
+ </t>
+ <t>
+ SETTINGS frames always apply to a connection, never a single stream. The stream
+ identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
+ frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
+ with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame
+ MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <section title="SETTINGS Format" anchor="SettingFormat">
+ <t>
+ The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
+ an unsigned 16-bit setting identifier and an unsigned 32-bit value.
+ </t>
+
+ <figure title="Setting Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Identifier (16) |
+ +-------------------------------+-------------------------------+
+ | Value (32) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="SettingValues" title="Defined SETTINGS Parameters">
+ <t>
+ The following parameters are defined:
+ <list style="hanging">
+ <x:lt hangText="SETTINGS_HEADER_TABLE_SIZE (0x1):"
+ anchor="SETTINGS_HEADER_TABLE_SIZE">
+ <t>
+ Allows the sender to inform the remote endpoint of the maximum size of the header
+ compression table used to decode header blocks, in octets. The encoder can select
+ any size equal to or less than this value by using signaling specific to the
+ header compression format inside a header block. The initial value is 4,096
+ octets.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_ENABLE_PUSH (0x2):"
+ anchor="SETTINGS_ENABLE_PUSH">
+ <t>
+ This setting can be use to disable <xref target="PushResources">server
+ push</xref>. An endpoint MUST NOT send a <x:ref>PUSH_PROMISE</x:ref> frame if it
+ receives this parameter set to a value of 0. An endpoint that has both set this
+ parameter to 0 and had it acknowledged MUST treat the receipt of a
+ <x:ref>PUSH_PROMISE</x:ref> frame as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The initial value is 1, which indicates that server push is permitted. Any value
+ other than 0 or 1 MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_CONCURRENT_STREAMS (0x3):"
+ anchor="SETTINGS_MAX_CONCURRENT_STREAMS">
+ <t>
+ Indicates the maximum number of concurrent streams that the sender will allow.
+ This limit is directional: it applies to the number of streams that the sender
+ permits the receiver to create. Initially there is no limit to this value. It is
+ recommended that this value be no smaller than 100, so as to not unnecessarily
+ limit parallelism.
+ </t>
+ <t>
+ A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
+ by endpoints. A zero value does prevent the creation of new streams, however this
+ can also happen for any limit that is exhausted with active streams. Servers
+ SHOULD only set a zero value for short durations; if a server does not wish to
+ accept requests, closing the connection could be preferable.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_INITIAL_WINDOW_SIZE (0x4):"
+ anchor="SETTINGS_INITIAL_WINDOW_SIZE">
+ <t>
+ Indicates the sender's initial window size (in octets) for stream level flow
+ control. The initial value is 2<x:sup>16</x:sup>-1 (65,535) octets.
+ </t>
+ <t>
+ This setting affects the window size of all streams, including existing streams,
+ see <xref target="InitialWindowSize"/>.
+ </t>
+ <t>
+ Values above the maximum flow control window size of 2<x:sup>31</x:sup>-1 MUST
+ be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of
+ type <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_FRAME_SIZE (0x5):"
+ anchor="SETTINGS_MAX_FRAME_SIZE">
+ <t>
+ Indicates the size of the largest frame payload that the sender is willing to
+ receive, in octets.
+ </t>
+ <t>
+ The initial value is 2<x:sup>14</x:sup> (16,384) octets. The value advertised by
+ an endpoint MUST be between this initial value and the maximum allowed frame size
+ (2<x:sup>24</x:sup>-1 or 16,777,215 octets), inclusive. Values outside this range
+ MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref>
+ of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="SETTINGS_MAX_HEADER_LIST_SIZE (0x6):"
+ anchor="SETTINGS_MAX_HEADER_LIST_SIZE">
+ <t>
+ This advisory setting informs a peer of the maximum size of header list that the
+ sender is prepared to accept, in octets. The value is based on the uncompressed
+ size of header fields, including the length of the name and value in octets plus
+ an overhead of 32 octets for each header field.
+ </t>
+ <t>
+ For any given request, a lower limit than what is advertised MAY be enforced. The
+ initial value of this setting is unlimited.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
+ MUST ignore that setting.
+ </t>
+ </section>
+
+ <section anchor="SettingsSync" title="Settings Synchronization">
+ <t>
+ Most values in SETTINGS benefit from or require an understanding of when the peer has
+ received and applied the changed parameter values. In order to provide
+ such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
+ is not set MUST apply the updated parameters as soon as possible upon receipt.
+ </t>
+ <t>
+ The values in the SETTINGS frame MUST be processed in the order they appear, with no
+ other frame processing between values. Unsupported parameters MUST be ignored. Once
+ all values have been processed, the recipient MUST immediately emit a SETTINGS frame
+ with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
+ of the altered parameters can rely on the setting having been applied.
+ </t>
+ <t>
+ If the sender of a SETTINGS frame does not receive an acknowledgement within a
+ reasonable amount of time, it MAY issue a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>SETTINGS_TIMEOUT</x:ref>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="PUSH_PROMISE" title="PUSH_PROMISE">
+ <t>
+ The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
+ streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned
+ 31-bit identifier of the stream the endpoint plans to create along with a set of headers
+ that provide additional context for the stream. <xref target="PushResources"/> contains a
+ thorough description of the use of PUSH_PROMISE frames.
+ </t>
+
+ <figure title="PUSH_PROMISE Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |R| Promised Stream ID (31) |
+ +-+-----------------------------+-------------------------------+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+ | Padding (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The PUSH_PROMISE frame payload has the following fields:
+ <list style="hanging">
+ <t hangText="Pad Length:">
+ An 8-bit field containing the length of the frame padding in units of octets. This
+ field is only present if the PADDED flag is set.
+ </t>
+ <t hangText="R:">
+ A single reserved bit.
+ </t>
+ <t hangText="Promised Stream ID:">
+ An unsigned 31-bit integer that identifies the stream that is reserved by the
+ PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next
+ stream sent by the sender (see <xref target="StreamIdentifiers">new stream
+ identifier</xref>).
+ </t>
+ <t hangText="Header Block Fragment:">
+ A <xref target="HeaderBlock">header block fragment</xref> containing request header
+ fields.
+ </t>
+ <t hangText="Padding:">
+ Padding octets.
+ </t>
+ </list>
+ </t>
+
+ <t>
+ The PUSH_PROMISE frame defines the following flags:
+ <list style="hanging">
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame contains an entire <xref
+ target="HeaderBlock">header block</xref> and is not followed by any
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
+ CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any
+ other type of frame or a frame on a different stream as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ <x:lt hangText="PADDED (0x8):">
+ <t>
+ Bit 4 being set indicates that the Pad Length field and any padding that it
+ describes is present.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
+ identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the
+ stream identifier field specifies the value 0x0, a recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ Promised streams are not required to be used in the order they are promised. The
+ PUSH_PROMISE only reserves stream identifiers for later use.
+ </t>
+
+ <t>
+ PUSH_PROMISE MUST NOT be sent if the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting of the
+ peer endpoint is set to 0. An endpoint that has set this setting and has received
+ acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
+ <x:ref>RST_STREAM</x:ref> referencing the promised stream identifier back to the sender of
+ the PUSH_PROMISE.
+ </t>
+
+ <t>
+ A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a <xref
+ target="HeaderBlock">header block</xref> potentially modifies the state maintained for
+ header compression. PUSH_PROMISE also reserves a stream for later use, causing the
+ promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a
+ stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
+ ensure that the promised stream is a valid choice for a <xref
+ target="StreamIdentifiers">new stream identifier</xref> (that is, the promised stream MUST
+ be in the "idle" state).
+ </t>
+ <t>
+ Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
+ state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a
+ stream that is neither "open" nor "half closed (local)" as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. However, an endpoint that has sent
+ <x:ref>RST_STREAM</x:ref> on the associated stream MUST handle PUSH_PROMISE frames that
+ might have been created before the <x:ref>RST_STREAM</x:ref> frame is received and
+ processed.
+ </t>
+ <t>
+ A receiver MUST treat the receipt of a PUSH_PROMISE that promises an <xref
+ target="StreamIdentifiers">illegal stream identifier</xref> (that is, an identifier for a
+ stream that is not currently in the "idle" state) as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical
+ to those defined for <xref target="DATA">DATA frames</xref>.
+ </t>
+ </section>
+
+ <section anchor="PING" title="PING">
+ <t>
+ The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
+ sender, as well as determining whether an idle connection is still functional. PING
+ frames can be sent from any endpoint.
+ </t>
+ <figure title="PING Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ | Opaque Data (64) |
+ | |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+
+ <t>
+ In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
+ A sender can include any value it chooses and use those bytes in any fashion.
+ </t>
+ <t>
+ Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
+ the ACK flag set in response, with an identical payload. PING responses SHOULD be given
+ higher priority than any other frame.
+ </t>
+
+ <t>
+ The PING frame defines the following flags:
+ <list style="hanging">
+ <t hangText="ACK (0x1):">
+ Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST
+ set this flag in PING responses. An endpoint MUST NOT respond to PING frames
+ containing this flag.
+ </t>
+ </list>
+ </t>
+ <t>
+ PING frames are not associated with any individual stream. If a PING frame is received
+ with a stream identifier field value other than 0x0, the recipient MUST respond with a
+ <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Receipt of a PING frame with a length field value other than 8 MUST be treated as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FRAME_SIZE_ERROR</x:ref>.
+ </t>
+
+ </section>
+
+ <section anchor="GOAWAY" title="GOAWAY">
+ <t>
+ The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
+ connection. GOAWAY can be sent by either the client or the server. Once sent, the sender
+ will ignore frames sent on any new streams with identifiers higher than the included last
+ stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the
+ connection, although a new connection can be established for new streams.
+ </t>
+ <t>
+ The purpose of this frame is to allow an endpoint to gracefully stop accepting new
+ streams, while still finishing processing of previously established streams. This enables
+ administrative actions, like server maintainance.
+ </t>
+ <t>
+ There is an inherent race condition between an endpoint starting new streams and the
+ remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream
+ identifier of the last peer-initiated stream which was or might be processed on the
+ sending endpoint in this connection. For instance, if the server sends a GOAWAY frame,
+ the identified stream is the highest numbered stream initiated by the client.
+ </t>
+ <t>
+ If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
+ than what is indicated in the GOAWAY frame, those streams are not or will not be
+ processed. The receiver of the GOAWAY frame can treat the streams as though they had
+ never been created at all, thereby allowing those streams to be retried later on a new
+ connection.
+ </t>
+ <t>
+ Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
+ can know whether a stream has been partially processed or not. For example, if an HTTP
+ client sends a POST at the same time that a server closes a connection, the client cannot
+ know if the server started to process that POST request if the server does not send a
+ GOAWAY frame to indicate what streams it might have acted on.
+ </t>
+ <t>
+ An endpoint might choose to close a connection without sending GOAWAY for misbehaving
+ peers.
+ </t>
+
+ <figure title="GOAWAY Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R| Last-Stream-ID (31) |
+ +-+-------------------------------------------------------------+
+ | Error Code (32) |
+ +---------------------------------------------------------------+
+ | Additional Debug Data (*) |
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The GOAWAY frame does not define any flags.
+ </t>
+ <t>
+ The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat
+ a <x:ref>GOAWAY</x:ref> frame with a stream identifier other than 0x0 as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ The last stream identifier in the GOAWAY frame contains the highest numbered stream
+ identifier for which the sender of the GOAWAY frame might have taken some action on, or
+ might yet take action on. All streams up to and including the identified stream might
+ have been processed in some way. The last stream identifier can be set to 0 if no streams
+ were processed.
+ <list style="hanging">
+ <t hangText="Note:">
+ In this context, "processed" means that some data from the stream was passed to some
+ higher layer of software that might have taken some action as a result.
+ </t>
+ </list>
+ If a connection terminates without a GOAWAY frame, the last stream identifier is
+ effectively the highest possible stream identifier.
+ </t>
+ <t>
+ On streams with lower or equal numbered identifiers that were not closed completely prior
+ to the connection being closed, re-attempting requests, transactions, or any protocol
+ activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
+ DELETE. Any protocol activity that uses higher numbered streams can be safely retried
+ using a new connection.
+ </t>
+ <t>
+ Activity on streams numbered lower or equal to the last stream identifier might still
+ complete successfully. The sender of a GOAWAY frame might gracefully shut down a
+ connection by sending a GOAWAY frame, maintaining the connection in an open state until
+ all in-progress streams complete.
+ </t>
+ <t>
+ An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an
+ endpoint that sends GOAWAY with <x:ref>NO_ERROR</x:ref> during graceful shutdown could
+ subsequently encounter an condition that requires immediate termination of the connection.
+ The last stream identifier from the last GOAWAY frame received indicates which streams
+ could have been acted upon. Endpoints MUST NOT increase the value they send in the last
+ stream identifier, since the peers might already have retried unprocessed requests on
+ another connection.
+ </t>
+ <t>
+ A client that is unable to retry requests loses all requests that are in flight when the
+ server closes the connection. This is especially true for intermediaries that might
+ not be serving clients using HTTP/2. A server that is attempting to gracefully shut down
+ a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
+ 2<x:sup>31</x:sup>-1 and a <x:ref>NO_ERROR</x:ref> code. This signals to the client that
+ a shutdown is imminent and that no further requests can be initiated. After waiting at
+ least one round trip time, the server can send another GOAWAY frame with an updated last
+ stream identifier. This ensures that a connection can be cleanly shut down without losing
+ requests.
+ </t>
+
+ <t>
+ After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
+ higher than the identified last stream. However, any frames that alter connection state
+ cannot be completely ignored. For instance, <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> and <x:ref>CONTINUATION</x:ref> frames MUST be minimally
+ processed to ensure the state maintained for header compression is consistent (see <xref
+ target="HeaderBlock"/>); similarly DATA frames MUST be counted toward the connection flow
+ control window. Failure to process these frames can cause flow control or header
+ compression state to become unsynchronized.
+ </t>
+
+ <t>
+ The GOAWAY frame also contains a 32-bit <xref target="ErrorCodes">error code</xref> that
+ contains the reason for closing the connection.
+ </t>
+ <t>
+ Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug
+ data is intended for diagnostic purposes only and carries no semantic value. Debug
+ information could contain security- or privacy-sensitive data. Logged or otherwise
+ persistently stored debug data MUST have adequate safeguards to prevent unauthorized
+ access.
+ </t>
+ </section>
+
+ <section anchor="WINDOW_UPDATE" title="WINDOW_UPDATE">
+ <t>
+ The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see <xref
+ target="FlowControl"/> for an overview.
+ </t>
+ <t>
+ Flow control operates at two levels: on each individual stream and on the entire
+ connection.
+ </t>
+ <t>
+ Both types of flow control are hop-by-hop; that is, only between the two endpoints.
+ Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
+ However, throttling of data transfer by any receiver can indirectly cause the propagation
+ of flow control information toward the original sender.
+ </t>
+ <t>
+ Flow control only applies to frames that are identified as being subject to flow control.
+ Of the frame types defined in this document, this includes only <x:ref>DATA</x:ref> frames.
+ Frames that are exempt from flow control MUST be accepted and processed, unless the
+ receiver is unable to assign resources to handling the frame. A receiver MAY respond with
+ a <xref target="StreamErrorHandler">stream error</xref> or <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> if it is unable to accept a frame.
+ </t>
+ <figure title="WINDOW_UPDATE Payload Format">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R| Window Size Increment (31) |
+ +-+-------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
+ indicating the number of octets that the sender can transmit in addition to the existing
+ flow control window. The legal range for the increment to the flow control window is 1 to
+ 2<x:sup>31</x:sup>-1 (0x7fffffff) octets.
+ </t>
+ <t>
+ The WINDOW_UPDATE frame does not define any flags.
+ </t>
+ <t>
+ The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the
+ former case, the frame's stream identifier indicates the affected stream; in the latter,
+ the value "0" indicates that the entire connection is the subject of the frame.
+ </t>
+ <t>
+ A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
+ increment of 0 as a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>; errors on the connection flow control window MUST be
+ treated as a <xref target="ConnectionErrorHandler">connection error</xref>.
+ </t>
+ <t>
+ WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
+ This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
+ or "closed" stream. A receiver MUST NOT treat this as an error, see <xref
+ target="StreamStates"/>.
+ </t>
+ <t>
+ A receiver that receives a flow controlled frame MUST always account for its contribution
+ against the connection flow control window, unless the receiver treats this as a <xref
+ target="ConnectionErrorHandler">connection error</xref>. This is necessary even if the
+ frame is in error. Since the sender counts the frame toward the flow control window, if
+ the receiver does not, the flow control window at sender and receiver can become
+ different.
+ </t>
+
+ <section title="The Flow Control Window">
+ <t>
+ Flow control in HTTP/2 is implemented using a window kept by each sender on every
+ stream. The flow control window is a simple integer value that indicates how many octets
+ of data the sender is permitted to transmit; as such, its size is a measure of the
+ buffering capacity of the receiver.
+ </t>
+ <t>
+ Two flow control windows are applicable: the stream flow control window and the
+ connection flow control window. The sender MUST NOT send a flow controlled frame with a
+ length that exceeds the space available in either of the flow control windows advertised
+ by the receiver. Frames with zero length with the END_STREAM flag set (that is, an
+ empty <x:ref>DATA</x:ref> frame) MAY be sent if there is no available space in either
+ flow control window.
+ </t>
+ <t>
+ For flow control calculations, the 9 octet frame header is not counted.
+ </t>
+ <t>
+ After sending a flow controlled frame, the sender reduces the space available in both
+ windows by the length of the transmitted frame.
+ </t>
+ <t>
+ The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
+ space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream
+ and connection level flow control windows.
+ </t>
+ <t>
+ A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
+ amount specified in the frame.
+ </t>
+ <t>
+ A sender MUST NOT allow a flow control window to exceed 2<x:sup>31</x:sup>-1 octets.
+ If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
+ maximum it MUST terminate either the stream or the connection, as appropriate. For
+ streams, the sender sends a <x:ref>RST_STREAM</x:ref> with the error code of
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> code; for the connection, a <x:ref>GOAWAY</x:ref>
+ frame with a <x:ref>FLOW_CONTROL_ERROR</x:ref> code.
+ </t>
+ <t>
+ Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
+ completely asynchronous with respect to each other. This property allows a receiver to
+ aggressively update the window size kept by the sender to prevent streams from stalling.
+ </t>
+ </section>
+
+ <section anchor="InitialWindowSize" title="Initial Flow Control Window Size">
+ <t>
+ When an HTTP/2 connection is first established, new streams are created with an initial
+ flow control window size of 65,535 octets. The connection flow control window is 65,535
+ octets. Both endpoints can adjust the initial window size for new streams by including
+ a value for <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> in the <x:ref>SETTINGS</x:ref>
+ frame that forms part of the connection preface. The connection flow control window can
+ only be changed using WINDOW_UPDATE frames.
+ </t>
+ <t>
+ Prior to receiving a <x:ref>SETTINGS</x:ref> frame that sets a value for
+ <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref>, an endpoint can only use the default
+ initial window size when sending flow controlled frames. Similarly, the connection flow
+ control window is set to the default initial window size until a WINDOW_UPDATE frame is
+ received.
+ </t>
+ <t>
+ A <x:ref>SETTINGS</x:ref> frame can alter the initial flow control window size for all
+ current streams. When the value of <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> changes,
+ a receiver MUST adjust the size of all stream flow control windows that it maintains by
+ the difference between the new value and the old value.
+ </t>
+ <t>
+ A change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> can cause the available space in
+ a flow control window to become negative. A sender MUST track the negative flow control
+ window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
+ frames that cause the flow control window to become positive.
+ </t>
+ <t>
+ For example, if the client sends 60KB immediately on connection establishment, and the
+ server sets the initial window size to be 16KB, the client will recalculate the
+ available flow control window to be -44KB on receipt of the <x:ref>SETTINGS</x:ref>
+ frame. The client retains a negative flow control window until WINDOW_UPDATE frames
+ restore the window to being positive, after which the client can resume sending.
+ </t>
+ <t>
+ A <x:ref>SETTINGS</x:ref> frame cannot alter the connection flow control window.
+ </t>
+ <t>
+ An endpoint MUST treat a change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> that
+ causes any flow control window to exceed the maximum size as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section title="Reducing the Stream Window Size">
+ <t>
+ A receiver that wishes to use a smaller flow control window than the current size can
+ send a new <x:ref>SETTINGS</x:ref> frame. However, the receiver MUST be prepared to
+ receive data that exceeds this window size, since the sender might send data that
+ exceeds the lower limit prior to processing the <x:ref>SETTINGS</x:ref> frame.
+ </t>
+ <t>
+ After sending a SETTINGS frame that reduces the initial flow control window size, a
+ receiver has two options for handling streams that exceed flow control limits:
+ <list style="numbers">
+ <t>
+ The receiver can immediately send <x:ref>RST_STREAM</x:ref> with
+ <x:ref>FLOW_CONTROL_ERROR</x:ref> error code for the affected streams.
+ </t>
+ <t>
+ The receiver can accept the streams and tolerate the resulting head of line
+ blocking, sending WINDOW_UPDATE frames as it consumes data.
+ </t>
+ </list>
+ </t>
+ </section>
+ </section>
+
+ <section anchor="CONTINUATION" title="CONTINUATION">
+ <t>
+ The CONTINUATION frame (type=0x9) is used to continue a sequence of <xref
+ target="HeaderBlock">header block fragments</xref>. Any number of CONTINUATION frames can
+ be sent on an existing stream, as long as the preceding frame is on the same stream and is
+ a <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the
+ END_HEADERS flag set.
+ </t>
+
+ <figure title="CONTINUATION Frame Payload">
+ <artwork type="inline"><![CDATA[
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Header Block Fragment (*) ...
+ +---------------------------------------------------------------+
+]]></artwork>
+ </figure>
+ <t>
+ The CONTINUATION frame payload contains a <xref target="HeaderBlock">header block
+ fragment</xref>.
+ </t>
+
+ <t>
+ The CONTINUATION frame defines the following flag:
+ <list style="hanging">
+ <x:lt hangText="END_HEADERS (0x4):">
+ <t>
+ Bit 3 being set indicates that this frame ends a <xref target="HeaderBlock">header
+ block</xref>.
+ </t>
+ <t>
+ If the END_HEADERS bit is not set, this frame MUST be followed by another
+ CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or
+ a frame on a different stream as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </x:lt>
+ </list>
+ </t>
+
+ <t>
+ The CONTINUATION frame changes the connection state as defined in <xref
+ target="HeaderBlock" />.
+ </t>
+
+ <t>
+ CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
+ whose stream identifier field is 0x0, the recipient MUST respond with a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type PROTOCOL_ERROR.
+ </t>
+
+ <t>
+ A CONTINUATION frame MUST be preceded by a <x:ref>HEADERS</x:ref>,
+ <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the END_HEADERS flag set. A
+ recipient that observes violation of this rule MUST respond with a <xref
+ target="ConnectionErrorHandler"> connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="ErrorCodes" title="Error Codes">
+ <t>
+ Error codes are 32-bit fields that are used in <x:ref>RST_STREAM</x:ref> and
+ <x:ref>GOAWAY</x:ref> frames to convey the reasons for the stream or connection error.
+ </t>
+
+ <t>
+ Error codes share a common code space. Some error codes apply only to either streams or the
+ entire connection and have no defined semantics in the other context.
+ </t>
+
+ <t>
+ The following error codes are defined:
+ <list style="hanging">
+ <t hangText="NO_ERROR (0x0):" anchor="NO_ERROR">
+ The associated condition is not as a result of an error. For example, a
+ <x:ref>GOAWAY</x:ref> might include this code to indicate graceful shutdown of a
+ connection.
+ </t>
+ <t hangText="PROTOCOL_ERROR (0x1):" anchor="PROTOCOL_ERROR">
+ The endpoint detected an unspecific protocol error. This error is for use when a more
+ specific error code is not available.
+ </t>
+ <t hangText="INTERNAL_ERROR (0x2):" anchor="INTERNAL_ERROR">
+ The endpoint encountered an unexpected internal error.
+ </t>
+ <t hangText="FLOW_CONTROL_ERROR (0x3):" anchor="FLOW_CONTROL_ERROR">
+ The endpoint detected that its peer violated the flow control protocol.
+ </t>
+ <t hangText="SETTINGS_TIMEOUT (0x4):" anchor="SETTINGS_TIMEOUT">
+ The endpoint sent a <x:ref>SETTINGS</x:ref> frame, but did not receive a response in a
+ timely manner. See <xref target="SettingsSync">Settings Synchronization</xref>.
+ </t>
+ <t hangText="STREAM_CLOSED (0x5):" anchor="STREAM_CLOSED">
+ The endpoint received a frame after a stream was half closed.
+ </t>
+ <t hangText="FRAME_SIZE_ERROR (0x6):" anchor="FRAME_SIZE_ERROR">
+ The endpoint received a frame with an invalid size.
+ </t>
+ <t hangText="REFUSED_STREAM (0x7):" anchor="REFUSED_STREAM">
+ The endpoint refuses the stream prior to performing any application processing, see
+ <xref target="Reliability"/> for details.
+ </t>
+ <t hangText="CANCEL (0x8):" anchor="CANCEL">
+ Used by the endpoint to indicate that the stream is no longer needed.
+ </t>
+ <t hangText="COMPRESSION_ERROR (0x9):" anchor="COMPRESSION_ERROR">
+ The endpoint is unable to maintain the header compression context for the connection.
+ </t>
+ <t hangText="CONNECT_ERROR (0xa):" anchor="CONNECT_ERROR">
+ The connection established in response to a <xref target="CONNECT">CONNECT
+ request</xref> was reset or abnormally closed.
+ </t>
+ <t hangText="ENHANCE_YOUR_CALM (0xb):" anchor="ENHANCE_YOUR_CALM">
+ The endpoint detected that its peer is exhibiting a behavior that might be generating
+ excessive load.
+ </t>
+ <t hangText="INADEQUATE_SECURITY (0xc):" anchor="INADEQUATE_SECURITY">
+ The underlying transport has properties that do not meet minimum security
+ requirements (see <xref target="TLSUsage"/>).
+ </t>
+ </list>
+ </t>
+ <t>
+ Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be
+ treated by an implementation as being equivalent to <x:ref>INTERNAL_ERROR</x:ref>.
+ </t>
+ </section>
+
+ <section anchor="HTTPLayer" title="HTTP Message Exchanges">
+ <t>
+ HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
+ that, from the application perspective, the features of the protocol are largely
+ unchanged. To achieve this, all request and response semantics are preserved, although the
+ syntax of conveying those semantics has changed.
+ </t>
+ <t>
+ Thus, the specification and requirements of HTTP/1.1 Semantics and Content <xref
+ target="RFC7231"/>, Conditional Requests <xref target="RFC7232"/>, Range Requests <xref
+ target="RFC7233"/>, Caching <xref target="RFC7234"/> and Authentication <xref
+ target="RFC7235"/> are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
+ and Routing <xref target="RFC7230"/>, such as the HTTP and HTTPS URI schemes, are also
+ applicable in HTTP/2, but the expression of those semantics for this protocol are defined
+ in the sections below.
+ </t>
+
+ <section anchor="HttpSequence" title="HTTP Request/Response Exchange">
+ <t>
+ A client sends an HTTP request on a new stream, using a previously unused <xref
+ target="StreamIdentifiers">stream identifier</xref>. A server sends an HTTP response on
+ the same stream as the request.
+ </t>
+ <t>
+ An HTTP message (request or response) consists of:
+ <list style="numbers">
+ <t>
+ for a response only, zero or more <x:ref>HEADERS</x:ref> frames (each followed by zero
+ or more <x:ref>CONTINUATION</x:ref> frames) containing the message headers of
+ informational (1xx) HTTP responses (see <xref target="RFC7230" x:fmt=","
+ x:rel="#header.fields"/> and <xref target="RFC7231" x:fmt="," x:rel="#status.1xx"/>),
+ and
+ </t>
+ <t>
+ one <x:ref>HEADERS</x:ref> frame (followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames) containing the message headers (see <xref target="RFC7230" x:fmt=","
+ x:rel="#header.fields"/>), and
+ </t>
+ <t>
+ zero or more <x:ref>DATA</x:ref> frames containing the message payload (see <xref
+ target="RFC7230" x:fmt="," x:rel="#message.body"/>), and
+ </t>
+ <t>
+ optionally, one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the trailer-part, if present (see <xref
+ target="RFC7230" x:fmt="," x:rel="#chunked.trailer.part"/>).
+ </t>
+ </list>
+ The last frame in the sequence bears an END_STREAM flag, noting that a
+ <x:ref>HEADERS</x:ref> frame bearing the END_STREAM flag can be followed by
+ <x:ref>CONTINUATION</x:ref> frames that carry any remaining portions of the header block.
+ </t>
+ <t>
+ Other frames (from any stream) MUST NOT occur between either <x:ref>HEADERS</x:ref> frame
+ and any <x:ref>CONTINUATION</x:ref> frames that might follow.
+ </t>
+
+ <t>
+ Trailing header fields are carried in a header block that also terminates the stream.
+ That is, a sequence starting with a <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames, where the <x:ref>HEADERS</x:ref> frame bears an
+ END_STREAM flag. Header blocks after the first that do not terminate the stream are not
+ part of an HTTP request or response.
+ </t>
+ <t>
+ A <x:ref>HEADERS</x:ref> frame (and associated <x:ref>CONTINUATION</x:ref> frames) can
+ only appear at the start or end of a stream. An endpoint that receives a
+ <x:ref>HEADERS</x:ref> frame without the END_STREAM flag set after receiving a final
+ (non-informational) status code MUST treat the corresponding request or response as <xref
+ target="malformed">malformed</xref>.
+ </t>
+
+ <t>
+ An HTTP request/response exchange fully consumes a single stream. A request starts with
+ the <x:ref>HEADERS</x:ref> frame that puts the stream into an "open" state. The request
+ ends with a frame bearing END_STREAM, which causes the stream to become "half closed
+ (local)" for the client and "half closed (remote)" for the server. A response starts with
+ a <x:ref>HEADERS</x:ref> frame and ends with a frame bearing END_STREAM, which places the
+ stream in the "closed" state.
+ <!-- Yes, the response might be completed before the request does, but that's not a detail
+ we need to expand upon. It's complicated enough explaining this as it is. -->
+ </t>
+
+ <section anchor="informational-responses" title="Upgrading From HTTP/2">
+ <t>
+ HTTP/2 removes support for the 101 (Switching Protocols) informational status code
+ (<xref target="RFC7231" x:fmt="," x:rel="#status.101"/>).
+ </t>
+ <t>
+ The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
+ Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
+ their use (see <xref target="starting"/>).
+ </t>
+ </section>
+
+ <section anchor="HttpHeaders" title="HTTP Header Fields">
+ <t>
+ HTTP header fields carry information as a series of key-value pairs. For a listing of
+ registered HTTP headers, see the Message Header Field Registry maintained at <eref
+ target="https://www.iana.org/assignments/message-headers"/>.
+ </t>
+
+ <section anchor="PseudoHeaderFields" title="Pseudo-Header Fields">
+ <t>
+ While HTTP/1.x used the message start-line (see <xref target="RFC7230" x:fmt=","
+ x:rel="#start.line"/>) to convey the target URI and method of the request, and the
+ status code for the response, HTTP/2 uses special pseudo-header fields beginning with
+ ':' character (ASCII 0x3a) for this purpose.
+ </t>
+ <t>
+ Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
+ pseudo-header fields other than those defined in this document.
+ </t>
+ <t>
+ Pseudo-header fields are only valid in the context in which they are defined.
+ Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
+ fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST
+ NOT appear in trailers. Endpoints MUST treat a request or response that contains
+ undefined or invalid pseudo-header fields as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ Just as in HTTP/1.x, header field names are strings of ASCII characters that are
+ compared in a case-insensitive fashion. However, header field names MUST be converted
+ to lowercase prior to their encoding in HTTP/2. A request or response containing
+ uppercase header field names MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ All pseudo-header fields MUST appear in the header block before regular header fields.
+ Any request or response that contains a pseudo-header field that appears in a header
+ block after a regular header field MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ </section>
+
+ <section title="Connection-Specific Header Fields">
+ <t>
+ HTTP/2 does not use the <spanx style="verb">Connection</spanx> header field to
+ indicate connection-specific header fields; in this protocol, connection-specific
+ metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message
+ containing connection-specific header fields; any message containing
+ connection-specific header fields MUST be treated as <xref
+ target="malformed">malformed</xref>.
+ </t>
+ <t>
+ This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
+ to remove any header fields nominated by the Connection header field, along with the
+ Connection header field itself. Such intermediaries SHOULD also remove other
+ connection-specific header fields, such as Keep-Alive, Proxy-Connection,
+ Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
+ </t>
+ <t>
+ One exception to this is the TE header field, which MAY be present in an HTTP/2
+ request, but when it is MUST NOT contain any value other than "trailers".
+ </t>
+ <t>
+ <list style="hanging">
+ <t hangText="Note:">
+ HTTP/2 purposefully does not support upgrade to another protocol. The handshake
+ methods described in <xref target="starting"/> are believed sufficient to
+ negotiate the use of alternative protocols.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section anchor="HttpRequest" title="Request Pseudo-Header Fields">
+ <t>
+ The following pseudo-header fields are defined for HTTP/2 requests:
+ <list style="symbols">
+ <x:lt>
+ <t>
+ The <spanx style="verb">:method</spanx> pseudo-header field includes the HTTP
+ method (<xref target="RFC7231" x:fmt="," x:rel="#methods"/>).
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:scheme</spanx> pseudo-header field includes the scheme
+ portion of the target URI (<xref target="RFC3986" x:fmt="," x:sec="3.1"/>).
+ </t>
+ <t>
+ <spanx style="verb">:scheme</spanx> is not restricted to <spanx
+ style="verb">http</spanx> and <spanx style="verb">https</spanx> schemed URIs. A
+ proxy or gateway can translate requests for non-HTTP schemes, enabling the use
+ of HTTP to interact with non-HTTP services.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:authority</spanx> pseudo-header field includes the
+ authority portion of the target URI (<xref target="RFC3986" x:fmt=","
+ x:sec="3.2"/>). The authority MUST NOT include the deprecated <spanx
+ style="verb">userinfo</spanx> subcomponent for <spanx style="verb">http</spanx>
+ or <spanx style="verb">https</spanx> schemed URIs.
+ </t>
+ <t>
+ To ensure that the HTTP/1.1 request line can be reproduced accurately, this
+ pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
+ that has a request target in origin or asterisk form (see <xref
+ target="RFC7230" x:fmt="," x:rel="#request-target"/>). Clients that generate
+ HTTP/2 requests directly SHOULD use the <spanx>:authority</spanx> pseudo-header
+ field instead of the <spanx style="verb">Host</spanx> header field. An
+ intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a <spanx
+ style="verb">Host</spanx> header field if one is not present in a request by
+ copying the value of the <spanx style="verb">:authority</spanx> pseudo-header
+ field.
+ </t>
+ </x:lt>
+ <x:lt>
+ <t>
+ The <spanx style="verb">:path</spanx> pseudo-header field includes the path and
+ query parts of the target URI (the <spanx style="verb">path-absolute</spanx>
+ production from <xref target="RFC3986"/> and optionally a '?' character
+ followed by the <spanx style="verb">query</spanx> production, see <xref
+ target="RFC3986" x:fmt="," x:sec="3.3"/> and <xref target="RFC3986" x:fmt=","
+ x:sec="3.4"/>). A request in asterisk form includes the value '*' for the
+ <spanx style="verb">:path</spanx> pseudo-header field.
+ </t>
+ <t>
+ This pseudo-header field MUST NOT be empty for <spanx style="verb">http</spanx>
+ or <spanx style="verb">https</spanx> URIs; <spanx style="verb">http</spanx> or
+ <spanx style="verb">https</spanx> URIs that do not contain a path component
+ MUST include a value of '/'. The exception to this rule is an OPTIONS request
+ for an <spanx style="verb">http</spanx> or <spanx style="verb">https</spanx>
+ URI that does not include a path component; these MUST include a <spanx
+ style="verb">:path</spanx> pseudo-header field with a value of '*' (see <xref
+ target="RFC7230" x:fmt="," x:rel="#asterisk-form"/>).
+ </t>
+ </x:lt>
+ </list>
+ </t>
+ <t>
+ All HTTP/2 requests MUST include exactly one valid value for the <spanx
+ style="verb">:method</spanx>, <spanx style="verb">:scheme</spanx>, and <spanx
+ style="verb">:path</spanx> pseudo-header fields, unless it is a <xref
+ target="CONNECT">CONNECT request</xref>. An HTTP request that omits mandatory
+ pseudo-header fields is <xref target="malformed">malformed</xref>.
+ </t>
+ <t>
+ HTTP/2 does not define a way to carry the version identifier that is included in the
+ HTTP/1.1 request line.
+ </t>
+ </section>
+
+ <section anchor="HttpResponse" title="Response Pseudo-Header Fields">
+ <t>
+ For HTTP/2 responses, a single <spanx style="verb">:status</spanx> pseudo-header
+ field is defined that carries the HTTP status code field (see <xref target="RFC7231"
+ x:fmt="," x:rel="#status.codes"/>). This pseudo-header field MUST be included in all
+ responses, otherwise the response is <xref target="malformed">malformed</xref>.
+ </t>
+ <t>
+ HTTP/2 does not define a way to carry the version or reason phrase that is included in
+ an HTTP/1.1 status line.
+ </t>
+ </section>
+
+ <section anchor="CompressCookie" title="Compressing the Cookie Header Field">
+ <t>
+ The <xref target="COOKIE">Cookie header field</xref> can carry a significant amount of
+ redundant data.
+ </t>
+ <t>
+ The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
+ This header field doesn't follow the list construction rules in HTTP (see <xref
+ target="RFC7230" x:fmt="," x:rel="#field.order"/>), which prevents cookie-pairs from
+ being separated into different name-value pairs. This can significantly reduce
+ compression efficiency as individual cookie-pairs are updated.
+ </t>
+ <t>
+ To allow for better compression efficiency, the Cookie header field MAY be split into
+ separate header fields, each with one or more cookie-pairs. If there are multiple
+ Cookie header fields after decompression, these MUST be concatenated into a single
+ octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
+ before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
+ generic HTTP server application.
+ </t>
+ <figure>
+ <preamble>
+ Therefore, the following two lists of Cookie header fields are semantically
+ equivalent.
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ cookie: a=b; c=d; e=f
+
+ cookie: a=b
+ cookie: c=d
+ cookie: e=f
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="malformed" title="Malformed Requests and Responses">
+ <t>
+ A malformed request or response is one that is an otherwise valid sequence of HTTP/2
+ frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
+ header fields, the absence of mandatory header fields, or the inclusion of uppercase
+ header field names.
+ </t>
+ <t>
+ A request or response that includes an entity body can include a <spanx
+ style="verb">content-length</spanx> header field. A request or response is also
+ malformed if the value of a <spanx style="verb">content-length</spanx> header field
+ does not equal the sum of the <x:ref>DATA</x:ref> frame payload lengths that form the
+ body. A response that is defined to have no payload, as described in <xref
+ target="RFC7230" x:fmt="," x:rel="#header.content-length"/>, can have a non-zero
+ <spanx style="verb">content-length</spanx> header field, even though no content is
+ included in <x:ref>DATA</x:ref> frames.
+ </t>
+ <t>
+ Intermediaries that process HTTP requests or responses (i.e., any intermediary not
+ acting as a tunnel) MUST NOT forward a malformed request or response. Malformed
+ requests or responses that are detected MUST be treated as a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ For malformed requests, a server MAY send an HTTP response prior to closing or
+ resetting the stream. Clients MUST NOT accept a malformed response. Note that these
+ requirements are intended to protect against several types of common attacks against
+ HTTP; they are deliberately strict, because being permissive can expose
+ implementations to these vulnerabilities.
+ </t>
+ </section>
+ </section>
+
+ <section title="Examples">
+ <t>
+ This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
+ HTTP/2 requests and responses.
+ </t>
+ <t>
+ An HTTP GET request includes request header fields and no body and is therefore
+ transmitted as a single <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the serialized block of request header
+ fields. The <x:ref>HEADERS</x:ref> frame in the following has both the END_HEADERS and
+ END_STREAM flags set; no <x:ref>CONTINUATION</x:ref> frames are sent:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ GET /resource HTTP/1.1 HEADERS
+ Host: example.org ==> + END_STREAM
+ Accept: image/jpeg + END_HEADERS
+ :method = GET
+ :scheme = https
+ :path = /resource
+ host = example.org
+ accept = image/jpeg
+]]></artwork>
+ </figure>
+
+ <t>
+ Similarly, a response that includes only response header fields is transmitted as a
+ <x:ref>HEADERS</x:ref> frame (again, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames) containing the serialized block of response header
+ fields.
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 304 Not Modified HEADERS
+ ETag: "xyzzy" ==> + END_STREAM
+ Expires: Thu, 23 Jan ... + END_HEADERS
+ :status = 304
+ etag = "xyzzy"
+ expires = Thu, 23 Jan ...
+]]></artwork>
+ </figure>
+
+ <t>
+ An HTTP POST request that includes request header fields and payload data is transmitted
+ as one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+ <x:ref>CONTINUATION</x:ref> frames containing the request header fields, followed by one
+ or more <x:ref>DATA</x:ref> frames, with the last <x:ref>CONTINUATION</x:ref> (or
+ <x:ref>HEADERS</x:ref>) frame having the END_HEADERS flag set and the final
+ <x:ref>DATA</x:ref> frame having the END_STREAM flag set:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ POST /resource HTTP/1.1 HEADERS
+ Host: example.org ==> - END_STREAM
+ Content-Type: image/jpeg - END_HEADERS
+ Content-Length: 123 :method = POST
+ :path = /resource
+ {binary data} :scheme = https
+
+ CONTINUATION
+ + END_HEADERS
+ content-type = image/jpeg
+ host = example.org
+ content-length = 123
+
+ DATA
+ + END_STREAM
+ {binary data}
+]]></artwork>
+ <postamble>
+ Note that data contributing to any given header field could be spread between header
+ block fragments. The allocation of header fields to frames in this example is
+ illustrative only.
+ </postamble>
+ </figure>
+
+ <t>
+ A response that includes header fields and payload data is transmitted as a
+ <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames, followed by one or more <x:ref>DATA</x:ref> frames, with the last
+ <x:ref>DATA</x:ref> frame in the sequence having the END_STREAM flag set:
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 200 OK HEADERS
+ Content-Type: image/jpeg ==> - END_STREAM
+ Content-Length: 123 + END_HEADERS
+ :status = 200
+ {binary data} content-type = image/jpeg
+ content-length = 123
+
+ DATA
+ + END_STREAM
+ {binary data}
+]]></artwork>
+ </figure>
+
+ <t>
+ Trailing header fields are sent as a header block after both the request or response
+ header block and all the <x:ref>DATA</x:ref> frames have been sent. The
+ <x:ref>HEADERS</x:ref> frame starting the trailers header block has the END_STREAM flag
+ set.
+ </t>
+
+ <figure>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 200 OK HEADERS
+ Content-Type: image/jpeg ==> - END_STREAM
+ Transfer-Encoding: chunked + END_HEADERS
+ Trailer: Foo :status = 200
+ content-length = 123
+ 123 content-type = image/jpeg
+ {binary data} trailer = Foo
+ 0
+ Foo: bar DATA
+ - END_STREAM
+ {binary data}
+
+ HEADERS
+ + END_STREAM
+ + END_HEADERS
+ foo = bar
+]]></artwork>
+ </figure>
+
+
+ <figure>
+ <preamble>
+ An informational response using a 1xx status code other than 101 is transmitted as a
+ <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+ frames:
+ </preamble>
+ <artwork type="inline"><![CDATA[
+ HTTP/1.1 103 BAR HEADERS
+ Extension-Field: bar ==> - END_STREAM
+ + END_HEADERS
+ :status = 103
+ extension-field = bar
+]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="Reliability" title="Request Reliability Mechanisms in HTTP/2">
+ <t>
+ In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
+ occurs, because there is no means to determine the nature of the error. It is possible
+ that some server processing occurred prior to the error, which could result in
+ undesirable effects if the request were reattempted.
+ </t>
+ <t>
+ HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
+ not been processed:
+ <list style="symbols">
+ <t>
+ The <x:ref>GOAWAY</x:ref> frame indicates the highest stream number that might have
+ been processed. Requests on streams with higher numbers are therefore guaranteed to
+ be safe to retry.
+ </t>
+ <t>
+ The <x:ref>REFUSED_STREAM</x:ref> error code can be included in a
+ <x:ref>RST_STREAM</x:ref> frame to indicate that the stream is being closed prior to
+ any processing having occurred. Any request that was sent on the reset stream can
+ be safely retried.
+ </t>
+ </list>
+ </t>
+ <t>
+ Requests that have not been processed have not failed; clients MAY automatically retry
+ them, even those with non-idempotent methods.
+ </t>
+ <t>
+ A server MUST NOT indicate that a stream has not been processed unless it can guarantee
+ that fact. If frames that are on a stream are passed to the application layer for any
+ stream, then <x:ref>REFUSED_STREAM</x:ref> MUST NOT be used for that stream, and a
+ <x:ref>GOAWAY</x:ref> frame MUST include a stream identifier that is greater than or
+ equal to the given stream identifier.
+ </t>
+ <t>
+ In addition to these mechanisms, the <x:ref>PING</x:ref> frame provides a way for a
+ client to easily test a connection. Connections that remain idle can become broken as
+ some middleboxes (for instance, network address translators, or load balancers) silently
+ discard connection bindings. The <x:ref>PING</x:ref> frame allows a client to safely
+ test whether a connection is still active without sending a request.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="PushResources" title="Server Push">
+ <t>
+ HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
+ corresponding "promised" requests) to a client in association with a previous
+ client-initiated request. This can be useful when the server knows the client will need
+ to have those responses available in order to fully process the response to the original
+ request.
+ </t>
+
+ <t>
+ Pushing additional message exchanges in this fashion is optional, and is negotiated
+ between individual endpoints. The <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting can be set
+ to 0 to indicate that server push is disabled.
+ </t>
+ <t>
+ Promised requests MUST be cacheable (see <xref target="RFC7231" x:fmt=","
+ x:rel="#cacheable.methods"/>), MUST be safe (see <xref target="RFC7231" x:fmt=","
+ x:rel="#safe.methods"/>) and MUST NOT include a request body. Clients that receive a
+ promised request that is not cacheable, unsafe or that includes a request body MUST
+ reset the stream with a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+ <t>
+ Pushed responses that are cacheable (see <xref target="RFC7234" x:fmt=","
+ x:rel="#response.cacheability"/>) can be stored by the client, if it implements a HTTP
+ cache. Pushed responses are considered successfully validated on the origin server (e.g.,
+ if the "no-cache" cache response directive <xref target="RFC7234" x:fmt=","
+ x:rel="#cache-response-directive"/> is present) while the stream identified by the
+ promised stream ID is still open.
+ </t>
+ <t>
+ Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
+ be made available to the application separately.
+ </t>
+ <t>
+ An intermediary can receive pushes from the server and choose not to forward them on to
+ the client. In other words, how to make use of the pushed information is up to that
+ intermediary. Equally, the intermediary might choose to make additional pushes to the
+ client, without any action taken by the server.
+ </t>
+ <t>
+ A client cannot push. Thus, servers MUST treat the receipt of a
+ <x:ref>PUSH_PROMISE</x:ref> frame as a <xref target="ConnectionErrorHandler">connection
+ error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>. Clients MUST reject any attempt to
+ change the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting to a value other than 0 by treating
+ the message as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <section anchor="PushRequests" title="Push Requests">
+ <t>
+ Server push is semantically equivalent to a server responding to a request; however, in
+ this case that request is also sent by the server, as a <x:ref>PUSH_PROMISE</x:ref>
+ frame.
+ </t>
+ <t>
+ The <x:ref>PUSH_PROMISE</x:ref> frame includes a header block that contains a complete
+ set of request header fields that the server attributes to the request. It is not
+ possible to push a response to a request that includes a request body.
+ </t>
+
+ <t>
+ Pushed responses are always associated with an explicit request from the client. The
+ <x:ref>PUSH_PROMISE</x:ref> frames sent by the server are sent on that explicit
+ request's stream. The <x:ref>PUSH_PROMISE</x:ref> frame also includes a promised stream
+ identifier, chosen from the stream identifiers available to the server (see <xref
+ target="StreamIdentifiers"/>).
+ </t>
+
+ <t>
+ The header fields in <x:ref>PUSH_PROMISE</x:ref> and any subsequent
+ <x:ref>CONTINUATION</x:ref> frames MUST be a valid and complete set of <xref
+ target="HttpRequest">request header fields</xref>. The server MUST include a method in
+ the <spanx style="verb">:method</spanx> header field that is safe and cacheable. If a
+ client receives a <x:ref>PUSH_PROMISE</x:ref> that does not include a complete and valid
+ set of header fields, or the <spanx style="verb">:method</spanx> header field identifies
+ a method that is not safe, it MUST respond with a <xref
+ target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+ </t>
+
+ <t>
+ The server SHOULD send <x:ref>PUSH_PROMISE</x:ref> (<xref target="PUSH_PROMISE"/>)
+ frames prior to sending any frames that reference the promised responses. This avoids a
+ race where clients issue requests prior to receiving any <x:ref>PUSH_PROMISE</x:ref>
+ frames.
+ </t>
+ <t>
+ For example, if the server receives a request for a document containing embedded links
+ to multiple image files, and the server chooses to push those additional images to the
+ client, sending push promises before the <x:ref>DATA</x:ref> frames that contain the
+ image links ensures that the client is able to see the promises before discovering
+ embedded links. Similarly, if the server pushes responses referenced by the header block
+ (for instance, in Link header fields), sending the push promises before sending the
+ header block ensures that clients do not request them.
+ </t>
+
+ <t>
+ <x:ref>PUSH_PROMISE</x:ref> frames MUST NOT be sent by the client.
+ </t>
+ <t>
+ <x:ref>PUSH_PROMISE</x:ref> frames can be sent by the server in response to any
+ client-initiated stream, but the stream MUST be in either the "open" or "half closed
+ (remote)" state with respect to the server. <x:ref>PUSH_PROMISE</x:ref> frames are
+ interspersed with the frames that comprise a response, though they cannot be
+ interspersed with <x:ref>HEADERS</x:ref> and <x:ref>CONTINUATION</x:ref> frames that
+ comprise a single header block.
+ </t>
+ <t>
+ Sending a <x:ref>PUSH_PROMISE</x:ref> frame creates a new stream and puts the stream
+ into the “reserved (local)†state for the server and the “reserved (remote)†state for
+ the client.
+ </t>
+ </section>
+
+ <section anchor="PushResponses" title="Push Responses">
+ <t>
+ After sending the <x:ref>PUSH_PROMISE</x:ref> frame, the server can begin delivering the
+ pushed response as a <xref target="HttpResponse">response</xref> on a server-initiated
+ stream that uses the promised stream identifier. The server uses this stream to
+ transmit an HTTP response, using the same sequence of frames as defined in <xref
+ target="HttpSequence"/>. This stream becomes <xref target="StreamStates">"half closed"
+ to the client</xref> after the initial <x:ref>HEADERS</x:ref> frame is sent.
+ </t>
+
+ <t>
+ Once a client receives a <x:ref>PUSH_PROMISE</x:ref> frame and chooses to accept the
+ pushed response, the client SHOULD NOT issue any requests for the promised response
+ until after the promised stream has closed.
+ </t>
+
+ <t>
+ If the client determines, for any reason, that it does not wish to receive the pushed
+ response from the server, or if the server takes too long to begin sending the promised
+ response, the client can send an <x:ref>RST_STREAM</x:ref> frame, using either the
+ <x:ref>CANCEL</x:ref> or <x:ref>REFUSED_STREAM</x:ref> codes, and referencing the pushed
+ stream's identifier.
+ </t>
+ <t>
+ A client can use the <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting to limit the
+ number of responses that can be concurrently pushed by a server. Advertising a
+ <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> value of zero disables server push by
+ preventing the server from creating the necessary streams. This does not prohibit a
+ server from sending <x:ref>PUSH_PROMISE</x:ref> frames; clients need to reset any
+ promised streams that are not wanted.
+ </t>
+
+ <t>
+ Clients receiving a pushed response MUST validate that either the server is
+ authoritative (see <xref target="authority"/>), or the proxy that provided the pushed
+ response is configured for the corresponding request. For example, a server that offers
+ a certificate for only the <spanx style="verb">example.com</spanx> DNS-ID or Common Name
+ is not permitted to push a response for <spanx
+ style="verb">https://www.example.org/doc</spanx>.
+ </t>
+ <t>
+ The response for a <x:ref>PUSH_PROMISE</x:ref> stream begins with a
+ <x:ref>HEADERS</x:ref> frame, which immediately puts the stream into the “half closed
+ (remote)†state for the server and “half closed (local)†state for the client, and ends
+ with a frame bearing END_STREAM, which places the stream in the "closed" state.
+ <list style="hanging">
+ <t hangText="Note:">
+ The client never sends a frame with the END_STREAM flag for a server push.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ </section>
+
+ <section anchor="CONNECT" title="The CONNECT Method">
+ <t>
+ In HTTP/1.x, the pseudo-method CONNECT (<xref target="RFC7231" x:fmt=","
+ x:rel="#CONNECT"/>) is used to convert an HTTP connection into a tunnel to a remote host.
+ CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
+ server for the purposes of interacting with <spanx style="verb">https</spanx> resources.
+ </t>
+ <t>
+ In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
+ a remote host, for similar purposes. The HTTP header field mapping works as defined in
+ <xref target="HttpRequest">Request Header Fields</xref>, with a few
+ differences. Specifically:
+ <list style="symbols">
+ <t>
+ The <spanx style="verb">:method</spanx> header field is set to <spanx
+ style="verb">CONNECT</spanx>.
+ </t>
+ <t>
+ The <spanx style="verb">:scheme</spanx> and <spanx style="verb">:path</spanx> header
+ fields MUST be omitted.
+ </t>
+ <t>
+ The <spanx style="verb">:authority</spanx> header field contains the host and port to
+ connect to (equivalent to the authority-form of the request-target of CONNECT
+ requests, see <xref target="RFC7230" x:fmt="," x:rel="#request-target"/>).
+ </t>
+ </list>
+ </t>
+ <t>
+ A proxy that supports CONNECT establishes a <xref target="TCP">TCP connection</xref> to
+ the server identified in the <spanx style="verb">:authority</spanx> header field. Once
+ this connection is successfully established, the proxy sends a <x:ref>HEADERS</x:ref>
+ frame containing a 2xx series status code to the client, as defined in <xref
+ target="RFC7231" x:fmt="," x:rel="#CONNECT"/>.
+ </t>
+ <t>
+ After the initial <x:ref>HEADERS</x:ref> frame sent by each peer, all subsequent
+ <x:ref>DATA</x:ref> frames correspond to data sent on the TCP connection. The payload of
+ any <x:ref>DATA</x:ref> frames sent by the client is transmitted by the proxy to the TCP
+ server; data received from the TCP server is assembled into <x:ref>DATA</x:ref> frames by
+ the proxy. Frame types other than <x:ref>DATA</x:ref> or stream management frames
+ (<x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PRIORITY</x:ref>)
+ MUST NOT be sent on a connected stream, and MUST be treated as a <xref
+ target="StreamErrorHandler">stream error</xref> if received.
+ </t>
+ <t>
+ The TCP connection can be closed by either peer. The END_STREAM flag on a
+ <x:ref>DATA</x:ref> frame is treated as being equivalent to the TCP FIN bit. A client is
+ expected to send a <x:ref>DATA</x:ref> frame with the END_STREAM flag set after receiving
+ a frame bearing the END_STREAM flag. A proxy that receives a <x:ref>DATA</x:ref> frame
+ with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
+ segment. A proxy that receives a TCP segment with the FIN bit set sends a
+ <x:ref>DATA</x:ref> frame with the END_STREAM flag set. Note that the final TCP segment
+ or <x:ref>DATA</x:ref> frame could be empty.
+ </t>
+ <t>
+ A TCP connection error is signaled with <x:ref>RST_STREAM</x:ref>. A proxy treats any
+ error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
+ as a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>CONNECT_ERROR</x:ref>. Correspondingly, a proxy MUST send a TCP segment with the
+ RST bit set if it detects an error with the stream or the HTTP/2 connection.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="HttpExtra" title="Additional HTTP Requirements/Considerations">
+ <t>
+ This section outlines attributes of the HTTP protocol that improve interoperability, reduce
+ exposure to known security vulnerabilities, or reduce the potential for implementation
+ variation.
+ </t>
+
+ <section title="Connection Management">
+ <t>
+ HTTP/2 connections are persistent. For best performance, it is expected clients will not
+ close connections until it is determined that no further communication with a server is
+ necessary (for example, when a user navigates away from a particular web page), or until
+ the server closes the connection.
+ </t>
+ <t>
+ Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
+ where host is derived from a URI, a selected <xref target="ALT-SVC">alternative
+ service</xref>, or a configured proxy.
+ </t>
+ <t>
+ A client can create additional connections as replacements, either to replace connections
+ that are near to exhausting the available <xref target="StreamIdentifiers">stream
+ identifier space</xref>, to refresh the keying material for a TLS connection, or to
+ replace connections that have encountered <xref
+ target="ConnectionErrorHandler">errors</xref>.
+ </t>
+ <t>
+ A client MAY open multiple connections to the same IP address and TCP port using different
+ <xref target="TLS-EXT">Server Name Indication</xref> values or to provide different TLS
+ client certificates, but SHOULD avoid creating multiple connections with the same
+ configuration.
+ </t>
+ <t>
+ Servers are encouraged to maintain open connections for as long as possible, but are
+ permitted to terminate idle connections if necessary. When either endpoint chooses to
+ close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
+ <x:ref>GOAWAY</x:ref> (<xref target="GOAWAY"/>) frame so that both endpoints can reliably
+ determine whether previously sent frames have been processed and gracefully complete or
+ terminate any necessary remaining tasks.
+ </t>
+
+ <section anchor="reuse" title="Connection Reuse">
+ <t>
+ Connections that are made to an origin servers, either directly or through a tunnel
+ created using the <xref target="CONNECT">CONNECT method</xref> MAY be reused for
+ requests with multiple different URI authority components. A connection can be reused
+ as long as the origin server is <xref target="authority">authoritative</xref>. For
+ <spanx style="verb">http</spanx> resources, this depends on the host having resolved to
+ the same IP address.
+ </t>
+ <t>
+ For <spanx style="verb">https</spanx> resources, connection reuse additionally depends
+ on having a certificate that is valid for the host in the URI. An origin server might
+ offer a certificate with multiple <spanx style="verb">subjectAltName</spanx> attributes,
+ or names with wildcards, one of which is valid for the authority in the URI. For
+ example, a certificate with a <spanx style="verb">subjectAltName</spanx> of <spanx
+ style="verb">*.example.com</spanx> might permit the use of the same connection for
+ requests to URIs starting with <spanx style="verb">https://a.example.com/</spanx> and
+ <spanx style="verb">https://b.example.com/</spanx>.
+ </t>
+ <t>
+ In some deployments, reusing a connection for multiple origins can result in requests
+ being directed to the wrong origin server. For example, TLS termination might be
+ performed by a middlebox that uses the TLS <xref target="TLS-EXT">Server Name Indication
+ (SNI)</xref> extension to select an origin server. This means that it is possible
+ for clients to send confidential information to servers that might not be the intended
+ target for the request, even though the server is otherwise authoritative.
+ </t>
+ <t>
+ A server that does not wish clients to reuse connections can indicate that it is not
+ authoritative for a request by sending a 421 (Misdirected Request) status code in response
+ to the request (see <xref target="MisdirectedRequest"/>).
+ </t>
+ <t>
+ A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
+ through a single connection. That is, all requests sent via a proxy reuse the
+ connection to the proxy.
+ </t>
+ </section>
+
+ <section anchor="MisdirectedRequest" title="The 421 (Misdirected Request) Status Code">
+ <t>
+ The 421 (Misdirected Request) status code indicates that the request was directed at a
+ server that is not able to produce a response. This can be sent by a server that is not
+ configured to produce responses for the combination of scheme and authority that are
+ included in the request URI.
+ </t>
+ <t>
+ Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
+ request - whether the request method is idempotent or not - over a different connection.
+ This is possible if a connection is reused (<xref target="reuse"/>) or if an alternative
+ service is selected (<xref target="ALT-SVC"/>).
+ </t>
+ <t>
+ This status code MUST NOT be generated by proxies.
+ </t>
+ <t>
+ A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
+ definition or explicit cache controls (see <xref target="RFC7234"
+ x:rel="#heuristic.freshness" x:fmt="of"/>).
+ </t>
+ </section>
+ </section>
+
+ <section title="Use of TLS Features" anchor="TLSUsage">
+ <t>
+ Implementations of HTTP/2 MUST support <xref target="TLS12">TLS 1.2</xref> for HTTP/2 over
+ TLS. The general TLS usage guidance in <xref target="TLSBCP"/> SHOULD be followed, with
+ some additional restrictions that are specific to HTTP/2.
+ </t>
+
+ <t>
+ An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
+ feature set and cipher suite described in this section. Due to implementation
+ limitations, it might not be possible to fail TLS negotiation. An endpoint MUST
+ immediately terminate an HTTP/2 connection that does not meet these minimum requirements
+ with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>INADEQUATE_SECURITY</x:ref>.
+ </t>
+
+ <section anchor="TLSFeatures" title="TLS Features">
+ <t>
+ The TLS implementation MUST support the <xref target="TLS-EXT">Server Name Indication
+ (SNI)</xref> extension to TLS. HTTP/2 clients MUST indicate the target domain name when
+ negotiating TLS.
+ </t>
+ <t>
+ The TLS implementation MUST disable compression. TLS compression can lead to the
+ exposure of information that would not otherwise be revealed <xref target="RFC3749"/>.
+ Generic compression is unnecessary since HTTP/2 provides compression features that are
+ more aware of context and therefore likely to be more appropriate for use for
+ performance, security or other reasons.
+ </t>
+ <t>
+ The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS
+ renegotiation as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>PROTOCOL_ERROR</x:ref>. Note that disabling renegotiation can result in
+ long-lived connections becoming unusable due to limits on the number of messages the
+ underlying cipher suite can encipher.
+ </t>
+ <t>
+ A client MAY use renegotiation to provide confidentiality protection for client
+ credentials offered in the handshake, but any renegotiation MUST occur prior to sending
+ the connection preface. A server SHOULD request a client certificate if it sees a
+ renegotiation request immediately after establishing a connection.
+ </t>
+ <t>
+ This effectively prevents the use of renegotiation in response to a request for a
+ specific protected resource. A future specification might provide a way to support this
+ use case. <!-- <cref> We are tracking this in a non-blocking fashion in issue #496 and
+ with a new draft. -->
+ </t>
+ </section>
+
+ <section title="TLS Cipher Suites">
+ <t>
+ The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST
+ only be used with cipher suites that have ephemeral key exchange, such as the <xref
+ target="TLS12">ephemeral Diffie-Hellman (DHE)</xref> or the <xref
+ target="RFC4492">elliptic curve variant (ECDHE)</xref>. Ephemeral key exchange MUST
+ have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
+ Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher
+ suites that use stream or block ciphers. Authenticated Encryption with Additional Data
+ (AEAD) modes, such as the <xref target="RFC5288">Galois Counter Model (GCM) mode for
+ AES</xref> are acceptable.
+ </t>
+ <t>
+ The effect of these restrictions is that TLS 1.2 implementations could have
+ non-intersecting sets of available cipher suites, since these prevent the use of the
+ cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of
+ HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 <xref
+ target="TLS-ECDHE"/> with P256 <xref target="FIPS186"/>.
+ </t>
+ <t>
+ Clients MAY advertise support of cipher suites that are prohibited by the above
+ restrictions in order to allow for connection to servers that do not support HTTP/2.
+ This enables a fallback to protocols without these constraints without the additional
+ latency imposed by using a separate connection for fallback.
+ </t>
+ </section>
+ </section>
+ </section>
+
+ <section anchor="security" title="Security Considerations">
+ <section title="Server Authority" anchor="authority">
+ <t>
+ HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
+ authoritative in providing a given response, see <xref target="RFC7230" x:fmt=","
+ x:rel="#establishing.authority"/>. This relies on local name resolution for the "http"
+ URI scheme, and the authenticated server identity for the "https" scheme (see <xref
+ target="RFC2818" x:fmt="," x:sec="3"/>).
+ </t>
+ </section>
+
+ <section title="Cross-Protocol Attacks">
+ <t>
+ In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
+ protocol toward a server that understands a different protocol. An attacker might be able
+ to cause the transaction to appear as valid transaction in the second protocol. In
+ combination with the capabilities of the web context, this can be used to interact with
+ poorly protected servers in private networks.
+ </t>
+ <t>
+ Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
+ protection against cross protocol attacks. ALPN provides a positive indication that a
+ server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
+ protocols.
+ </t>
+ <t>
+ The encryption in TLS makes it difficult for attackers to control the data which could be
+ used in a cross-protocol attack on a cleartext protocol.
+ </t>
+ <t>
+ The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
+ The <xref target="ConnectionHeader">connection preface</xref> contains a string that is
+ designed to confuse HTTP/1.1 servers, but no special protection is offered for other
+ protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an
+ Upgrade header field in addition to the client connection preface could be exposed to a
+ cross-protocol attack.
+ </t>
+ </section>
+
+ <section title="Intermediary Encapsulation Attacks">
+ <t>
+ HTTP/2 header field names and values are encoded as sequences of octets with a length
+ prefix. This enables HTTP/2 to carry any string of octets as the name or value of a
+ header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
+ directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might
+ exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
+ header fields, extra header fields, or even new messages that are entirely falsified.
+ </t>
+ <t>
+ Header field names or values that contain characters not permitted by HTTP/1.1, including
+ carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
+ intermediary, as stipulated in <xref target="RFC7230" x:rel="#field.parsing" x:fmt=","/>.
+ </t>
+ <t>
+ Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
+ Intermediaries that perform translation to HTTP/2 MUST remove any instances of the <spanx
+ style="verb">obs-fold</spanx> production from header field values.
+ </t>
+ </section>
+
+ <section title="Cacheability of Pushed Responses">
+ <t>
+ Pushed responses do not have an explicit request from the client; the request
+ is provided by the server in the <x:ref>PUSH_PROMISE</x:ref> frame.
+ </t>
+ <t>
+ Caching responses that are pushed is possible based on the guidance provided by the origin
+ server in the Cache-Control header field. However, this can cause issues if a single
+ server hosts more than one tenant. For example, a server might offer multiple users each
+ a small portion of its URI space.
+ </t>
+ <t>
+ Where multiple tenants share space on the same server, that server MUST ensure that
+ tenants are not able to push representations of resources that they do not have authority
+ over. Failure to enforce this would allow a tenant to provide a representation that would
+ be served out of cache, overriding the actual representation that the authoritative tenant
+ provides.
+ </t>
+ <t>
+ Pushed responses for which an origin server is not authoritative (see
+ <xref target="authority"/>) are never cached or used.
+ </t>
+ </section>
+
+ <section anchor="dos" title="Denial of Service Considerations">
+ <t>
+ An HTTP/2 connection can demand a greater commitment of resources to operate than a
+ HTTP/1.1 connection. The use of header compression and flow control depend on a
+ commitment of resources for storing a greater amount of state. Settings for these
+ features ensure that memory commitments for these features are strictly bounded.
+ </t>
+ <t>
+ The number of <x:ref>PUSH_PROMISE</x:ref> frames is not constrained in the same fashion.
+ A client that accepts server push SHOULD limit the number of streams it allows to be in
+ the "reserved (remote)" state. Excessive number of server push streams can be treated as
+ a <xref target="StreamErrorHandler">stream error</xref> of type
+ <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+ </t>
+ <t>
+ Processing capacity cannot be guarded as effectively as state capacity.
+ </t>
+ <t>
+ The <x:ref>SETTINGS</x:ref> frame can be abused to cause a peer to expend additional
+ processing time. This might be done by pointlessly changing SETTINGS parameters, setting
+ multiple undefined parameters, or changing the same setting multiple times in the same
+ frame. <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>PRIORITY</x:ref> frames can be abused to
+ cause an unnecessary waste of resources.
+ </t>
+ <t>
+ Large numbers of small or empty frames can be abused to cause a peer to expend time
+ processing frame headers. Note however that some uses are entirely legitimate, such as
+ the sending of an empty <x:ref>DATA</x:ref> frame to end a stream.
+ </t>
+ <t>
+ Header compression also offers some opportunities to waste processing resources; see <xref
+ target="COMPRESSION" x:fmt="of" x:rel="#Security"/> for more details on potential abuses.
+ </t>
+ <t>
+ Limits in <x:ref>SETTINGS</x:ref> parameters cannot be reduced instantaneously, which
+ leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
+ particular, immediately after establishing a connection, limits set by a server are not
+ known to clients and could be exceeded without being an obvious protocol violation.
+ </t>
+ <t>
+ All these features - i.e., <x:ref>SETTINGS</x:ref> changes, small frames, header
+ compression - have legitimate uses. These features become a burden only when they are
+ used unnecessarily or to excess.
+ </t>
+ <t>
+ An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
+ service attack. Implementations SHOULD track the use of these features and set limits on
+ their use. An endpoint MAY treat activity that is suspicious as a <xref
+ target="ConnectionErrorHandler">connection error</xref> of type
+ <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+ </t>
+
+ <section anchor="MaxHeaderBlock" title="Limits on Header Block Size">
+ <t>
+ A large <xref target="HeaderBlock">header block</xref> can cause an implementation to
+ commit a large amount of state. Header fields that are critical for routing can appear
+ toward the end of a header block, which prevents streaming of header fields to their
+ ultimate destination. For this an other reasons, such as ensuring cache correctness,
+ means that an endpoint might need to buffer the entire header block. Since there is no
+ hard limit to the size of a header block, some endpoints could be forced commit a large
+ amount of available memory for header fields.
+ </t>
+ <t>
+ An endpoint can use the <x:ref>SETTINGS_MAX_HEADER_LIST_SIZE</x:ref> to advise peers of
+ limits that might apply on the size of header blocks. This setting is only advisory, so
+ endpoints MAY choose to send header blocks that exceed this limit and risk having the
+ request or response being treated as malformed. This setting specific to a connection,
+ so any request or response could encounter a hop with a lower, unknown limit. An
+ intermediary can attempt to avoid this problem by passing on values presented by
+ different peers, but they are not obligated to do so.
+ </t>
+ <t>
+ A server that receives a larger header block than it is willing to handle can send an
+ HTTP 431 (Request Header Fields Too Large) status code <xref target="RFC6585"/>. A
+ client can discard responses that it cannot process. The header block MUST be processed
+ to ensure a consistent connection state, unless the connection is closed.
+ </t>
+ </section>
+ </section>
+
+ <section title="Use of Compression">
+ <t>
+ HTTP/2 enables greater use of compression for both header fields (<xref
+ target="HeaderBlock"/>) and entity bodies. Compression can allow an attacker to recover
+ secret data when it is compressed in the same context as data under attacker control.
+ </t>
+ <t>
+ There are demonstrable attacks on compression that exploit the characteristics of the web
+ (e.g., <xref target="BREACH"/>). The attacker induces multiple requests containing
+ varying plaintext, observing the length of the resulting ciphertext in each, which
+ reveals a shorter length when a guess about the secret is correct.
+ </t>
+ <t>
+ Implementations communicating on a secure channel MUST NOT compress content that includes
+ both confidential and attacker-controlled data unless separate compression dictionaries
+ are used for each source of data. Compression MUST NOT be used if the source of data
+ cannot be reliably determined. Generic stream compression, such as that provided by TLS
+ MUST NOT be used with HTTP/2 (<xref target="TLSFeatures"/>).
+ </t>
+ <t>
+ Further considerations regarding the compression of header fields are described in <xref
+ target="COMPRESSION"/>.
+ </t>
+ </section>
+
+ <section title="Use of Padding" anchor="padding">
+ <t>
+ Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
+ as might be provided by <xref target="TLS12">TLS</xref>. Redundant padding could even be
+ counterproductive. Correct application can depend on having specific knowledge of the
+ data that is being padded.
+ </t>
+ <t>
+ To mitigate attacks that rely on compression, disabling or limiting compression might be
+ preferable to padding as a countermeasure.
+ </t>
+ <t>
+ Padding can be used to obscure the exact size of frame content, and is provided to
+ mitigate specific attacks within HTTP. For example, attacks where compressed content
+ includes both attacker-controlled plaintext and secret data (see for example, <xref
+ target="BREACH"/>).
+ </t>
+ <t>
+ Use of padding can result in less protection than might seem immediately obvious. At
+ best, padding only makes it more difficult for an attacker to infer length information by
+ increasing the number of frames an attacker has to observe. Incorrectly implemented
+ padding schemes can be easily defeated. In particular, randomized padding with a
+ predictable distribution provides very little protection; similarly, padding payloads to a
+ fixed size exposes information as payload sizes cross the fixed size boundary, which could
+ be possible if an attacker can control plaintext.
+ </t>
+ <t>
+ Intermediaries SHOULD retain padding for <x:ref>DATA</x:ref> frames, but MAY drop padding
+ for <x:ref>HEADERS</x:ref> and <x:ref>PUSH_PROMISE</x:ref> frames. A valid reason for an
+ intermediary to change the amount of padding of frames is to improve the protections that
+ padding provides.
+ </t>
+ </section>
+
+ <section title="Privacy Considerations">
+ <t>
+ Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
+ of a single client or server over time. This includes the value of settings, the manner
+ in which flow control windows are managed, the way priorities are allocated to streams,
+ timing of reactions to stimulus, and handling of any optional features.
+ </t>
+ <t>
+ As far as this creates observable differences in behavior, they could be used as a basis
+ for fingerprinting a specific client, as defined in <xref target="HTML5" x:fmt="of"
+ x:sec="1.8" x:rel="introduction.html#fingerprint"/>.
+ </t>
+ </section>
+ </section>
+
+ <section anchor="iana" title="IANA Considerations">
+ <t>
+ A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
+ (ALPN) Protocol IDs" registry established in <xref target="TLS-ALPN"/>.
+ </t>
+ <t>
+ This document establishes a registry for frame types, settings, and error codes. These new
+ registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
+ </t>
+ <t>
+ This document registers the <spanx style="verb">HTTP2-Settings</spanx> header field for
+ use in HTTP; and the 421 (Misdirected Request) status code.
+ </t>
+ <t>
+ This document registers the <spanx style="verb">PRI</spanx> method for use in HTTP, to avoid
+ collisions with the <xref target="ConnectionHeader">connection preface</xref>.
+ </t>
+
+ <section anchor="iana-alpn" title="Registration of HTTP/2 Identification Strings">
+ <t>
+ This document creates two registrations for the identification of HTTP/2 in the
+ "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in <xref
+ target="TLS-ALPN"/>.
+ </t>
+ <t>
+ The "h2" string identifies HTTP/2 when used over TLS:
+ <list style="hanging">
+ <t hangText="Protocol:">HTTP/2 over TLS</t>
+ <t hangText="Identification Sequence:">0x68 0x32 ("h2")</t>
+ <t hangText="Specification:">This document</t>
+ </list>
+ </t>
+ <t>
+ The "h2c" string identifies HTTP/2 when used over cleartext TCP:
+ <list style="hanging">
+ <t hangText="Protocol:">HTTP/2 over TCP</t>
+ <t hangText="Identification Sequence:">0x68 0x32 0x63 ("h2c")</t>
+ <t hangText="Specification:">This document</t>
+ </list>
+ </t>
+ </section>
+
+ <section anchor="iana-frames" title="Frame Type Registry">
+ <t>
+ This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame
+ Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under
+ either of the <xref target="RFC5226">"IETF Review" or "IESG Approval" policies</xref> for
+ values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
+ experimental use.
+ </t>
+ <t>
+ New entries in this registry require the following information:
+ <list style="hanging">
+ <t hangText="Frame Type:">
+ A name or label for the frame type.
+ </t>
+ <t hangText="Code:">
+ The 8-bit code assigned to the frame type.
+ </t>
+ <t hangText="Specification:">
+ A reference to a specification that includes a description of the frame layout,
+ it's semantics and flags that the frame type uses, including any parts of the frame
+ that are conditionally present based on the value of flags.
+ </t>
+ </list>
+ </t>
+ <t>
+ The entries in the following table are registered by this document.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Frame Type</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Section</ttcol>
+ <c>DATA</c><c>0x0</c><c><xref target="DATA"/></c>
+ <c>HEADERS</c><c>0x1</c><c><xref target="HEADERS"/></c>
+ <c>PRIORITY</c><c>0x2</c><c><xref target="PRIORITY"/></c>
+ <c>RST_STREAM</c><c>0x3</c><c><xref target="RST_STREAM"/></c>
+ <c>SETTINGS</c><c>0x4</c><c><xref target="SETTINGS"/></c>
+ <c>PUSH_PROMISE</c><c>0x5</c><c><xref target="PUSH_PROMISE"/></c>
+ <c>PING</c><c>0x6</c><c><xref target="PING"/></c>
+ <c>GOAWAY</c><c>0x7</c><c><xref target="GOAWAY"/></c>
+ <c>WINDOW_UPDATE</c><c>0x8</c><c><xref target="WINDOW_UPDATE"/></c>
+ <c>CONTINUATION</c><c>0x9</c><c><xref target="CONTINUATION"/></c>
+ </texttable>
+ </section>
+
+ <section anchor="iana-settings" title="Settings Registry">
+ <t>
+ This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry
+ manages a 16-bit space. The "HTTP/2 Settings" registry operates under the <xref
+ target="RFC5226">"Expert Review" policy</xref> for values in the range from 0x0000 to
+ 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
+ </t>
+ <t>
+ New registrations are advised to provide the following information:
+ <list style="hanging">
+ <t hangText="Name:">
+ A symbolic name for the setting. Specifying a setting name is optional.
+ </t>
+ <t hangText="Code:">
+ The 16-bit code assigned to the setting.
+ </t>
+ <t hangText="Initial Value:">
+ An initial value for the setting.
+ </t>
+ <t hangText="Specification:">
+ An optional reference to a specification that describes the use of the setting.
+ </t>
+ </list>
+ </t>
+ <t>
+ An initial set of setting registrations can be found in <xref target="SettingValues"/>.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Name</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Initial Value</ttcol>
+ <ttcol>Specification</ttcol>
+ <c>HEADER_TABLE_SIZE</c>
+ <c>0x1</c><c>4096</c><c><xref target="SettingValues"/></c>
+ <c>ENABLE_PUSH</c>
+ <c>0x2</c><c>1</c><c><xref target="SettingValues"/></c>
+ <c>MAX_CONCURRENT_STREAMS</c>
+ <c>0x3</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+ <c>INITIAL_WINDOW_SIZE</c>
+ <c>0x4</c><c>65535</c><c><xref target="SettingValues"/></c>
+ <c>MAX_FRAME_SIZE</c>
+ <c>0x5</c><c>16384</c><c><xref target="SettingValues"/></c>
+ <c>MAX_HEADER_LIST_SIZE</c>
+ <c>0x6</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+ </texttable>
+
+ </section>
+
+ <section anchor="iana-errors" title="Error Code Registry">
+ <t>
+ This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code"
+ registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the
+ <xref target="RFC5226">"Expert Review" policy</xref>.
+ </t>
+ <t>
+ Registrations for error codes are required to include a description of the error code. An
+ expert reviewer is advised to examine new registrations for possible duplication with
+ existing error codes. Use of existing registrations is to be encouraged, but not
+ mandated.
+ </t>
+ <t>
+ New registrations are advised to provide the following information:
+ <list style="hanging">
+ <t hangText="Name:">
+ A name for the error code. Specifying an error code name is optional.
+ </t>
+ <t hangText="Code:">
+ The 32-bit error code value.
+ </t>
+ <t hangText="Description:">
+ A brief description of the error code semantics, longer if no detailed specification
+ is provided.
+ </t>
+ <t hangText="Specification:">
+ An optional reference for a specification that defines the error code.
+ </t>
+ </list>
+ </t>
+ <t>
+ The entries in the following table are registered by this document.
+ </t>
+ <texttable align="left" suppress-title="true">
+ <ttcol>Name</ttcol>
+ <ttcol>Code</ttcol>
+ <ttcol>Description</ttcol>
+ <ttcol>Specification</ttcol>
+ <c>NO_ERROR</c><c>0x0</c>
+ <c>Graceful shutdown</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>PROTOCOL_ERROR</c><c>0x1</c>
+ <c>Protocol error detected</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>INTERNAL_ERROR</c><c>0x2</c>
+ <c>Implementation fault</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>FLOW_CONTROL_ERROR</c><c>0x3</c>
+ <c>Flow control limits exceeded</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>SETTINGS_TIMEOUT</c><c>0x4</c>
+ <c>Settings not acknowledged</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>STREAM_CLOSED</c><c>0x5</c>
+ <c>Frame received for closed stream</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>FRAME_SIZE_ERROR</c><c>0x6</c>
+ <c>Frame size incorrect</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>REFUSED_STREAM</c><c>0x7</c>
+ <c>Stream not processed</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>CANCEL</c><c>0x8</c>
+ <c>Stream cancelled</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>COMPRESSION_ERROR</c><c>0x9</c>
+ <c>Compression state not updated</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>CONNECT_ERROR</c><c>0xa</c>
+ <c>TCP connection error for CONNECT method</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>ENHANCE_YOUR_CALM</c><c>0xb</c>
+ <c>Processing capacity exceeded</c>
+ <c><xref target="ErrorCodes"/></c>
+ <c>INADEQUATE_SECURITY</c><c>0xc</c>
+ <c>Negotiated TLS parameters not acceptable</c>
+ <c><xref target="ErrorCodes"/></c>
+ </texttable>
+
+ </section>
+
+ <section title="HTTP2-Settings Header Field Registration">
+ <t>
+ This section registers the <spanx style="verb">HTTP2-Settings</spanx> header field in the
+ <xref target="BCP90">Permanent Message Header Field Registry</xref>.
+ <list style="hanging">
+ <t hangText="Header field name:">
+ HTTP2-Settings
+ </t>
+ <t hangText="Applicable protocol:">
+ http
+ </t>
+ <t hangText="Status:">
+ standard
+ </t>
+ <t hangText="Author/Change controller:">
+ IETF
+ </t>
+ <t hangText="Specification document(s):">
+ <xref target="Http2SettingsHeader"/> of this document
+ </t>
+ <t hangText="Related information:">
+ This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section title="PRI Method Registration">
+ <t>
+ This section registers the <spanx style="verb">PRI</spanx> method in the HTTP Method
+ Registry (<xref target="RFC7231" x:fmt="," x:rel="#method.registry"/>).
+ <list style="hanging">
+ <t hangText="Method Name:">
+ PRI
+ </t>
+ <t hangText="Safe">
+ No
+ </t>
+ <t hangText="Idempotent">
+ No
+ </t>
+ <t hangText="Specification document(s)">
+ <xref target="ConnectionHeader"/> of this document
+ </t>
+ <t hangText="Related information:">
+ This method is never used by an actual client. This method will appear to be used
+ when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
+ preface.
+ </t>
+ </list>
+ </t>
+ </section>
+
+ <section title="The 421 (Misdirected Request) HTTP Status Code"
+ anchor="iana-MisdirectedRequest">
+ <t>
+ This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
+ Transfer Protocol (HTTP) Status Code Registry (<xref target="RFC7231" x:fmt=","
+ x:rel="#status.code.registry"/>).
+ </t>
+ <t>
+ <list style="hanging">
+ <t hangText="Status Code:">
+ 421
+ </t>
+ <t hangText="Short Description:">
+ Misdirected Request
+ </t>
+ <t hangText="Specification:">
+ <xref target="MisdirectedRequest"/> of this document
+ </t>
+ </list>
+ </t>
+ </section>
+
+ </section>
+
+ <section title="Acknowledgements">
+ <t>
+ This document includes substantial input from the following individuals:
+ <list style="symbols">
+ <t>
+ Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
+ Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
+ Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
+ contributors).
+ </t>
+ <t>
+ Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
+ </t>
+ <t>
+ William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
+ Peon, Rob Trace (Flow control).
+ </t>
+ <t>
+ Mike Bishop (Extensibility).
+ </t>
+ <t>
+ Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
+ (Substantial editorial contributions).
+ </t>
+ <t>
+ Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
+ </t>
+ <t>
+ Alexey Melnikov was an editor of this document during 2013.
+ </t>
+ <t>
+ A substantial proportion of Martin's contribution was supported by Microsoft during his
+ employment there.
+ </t>
+ </list>
+ </t>
+ </section>
+ </middle>
+
+ <back>
+ <references title="Normative References">
+ <reference anchor="COMPRESSION">
+ <front>
+ <title>HPACK - Header Compression for HTTP/2</title>
+ <author initials="H." surname="Ruellan" fullname="Herve Ruellan"/>
+ <author initials="R." surname="Peon" fullname="Roberto Peon"/>
+ <date month="July" year="2014" />
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-header-compression-09" />
+ <x:source href="refs/draft-ietf-httpbis-header-compression-09.xml"/>
+ </reference>
+
+ <reference anchor="TCP">
+ <front>
+ <title abbrev="Transmission Control Protocol">
+ Transmission Control Protocol
+ </title>
+ <author initials="J." surname="Postel" fullname="Jon Postel">
+ <organization>University of Southern California (USC)/Information Sciences
+ Institute</organization>
+ </author>
+ <date year="1981" month="September" />
+ </front>
+ <seriesInfo name="STD" value="7" />
+ <seriesInfo name="RFC" value="793" />
+ </reference>
+
+ <reference anchor="RFC2119">
+ <front>
+ <title>
+ Key words for use in RFCs to Indicate Requirement Levels
+ </title>
+ <author initials="S." surname="Bradner" fullname="Scott Bradner">
+ <organization>Harvard University</organization>
+ <address><email>sob@harvard.edu</email></address>
+ </author>
+ <date month="March" year="1997"/>
+ </front>
+ <seriesInfo name="BCP" value="14"/>
+ <seriesInfo name="RFC" value="2119"/>
+ </reference>
+
+ <reference anchor="RFC2818">
+ <front>
+ <title>
+ HTTP Over TLS
+ </title>
+ <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+ <date month="May" year="2000"/>
+ </front>
+ <seriesInfo name="RFC" value="2818"/>
+ </reference>
+
+ <reference anchor="RFC3986">
+ <front>
+ <title abbrev="URI Generic Syntax">Uniform Resource Identifier (URI): Generic
+ Syntax</title>
+ <author initials="T." surname="Berners-Lee" fullname="Tim Berners-Lee"></author>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding"></author>
+ <author initials="L." surname="Masinter" fullname="Larry Masinter"></author>
+ <date year="2005" month="January" />
+ </front>
+ <seriesInfo name="STD" value="66" />
+ <seriesInfo name="RFC" value="3986" />
+ </reference>
+
+ <reference anchor="RFC4648">
+ <front>
+ <title>The Base16, Base32, and Base64 Data Encodings</title>
+ <author fullname="S. Josefsson" initials="S." surname="Josefsson"/>
+ <date year="2006" month="October"/>
+ </front>
+ <seriesInfo value="4648" name="RFC"/>
+ </reference>
+
+ <reference anchor="RFC5226">
+ <front>
+ <title>Guidelines for Writing an IANA Considerations Section in RFCs</title>
+ <author initials="T." surname="Narten" fullname="T. Narten"/>
+ <author initials="H." surname="Alvestrand" fullname="H. Alvestrand"/>
+ <date year="2008" month="May" />
+ </front>
+ <seriesInfo name="BCP" value="26" />
+ <seriesInfo name="RFC" value="5226" />
+ </reference>
+
+ <reference anchor="RFC5234">
+ <front>
+ <title>Augmented BNF for Syntax Specifications: ABNF</title>
+ <author initials="D." surname="Crocker" fullname="D. Crocker"/>
+ <author initials="P." surname="Overell" fullname="P. Overell"/>
+ <date year="2008" month="January" />
+ </front>
+ <seriesInfo name="STD" value="68" />
+ <seriesInfo name="RFC" value="5234" />
+ </reference>
+
+ <reference anchor="TLS12">
+ <front>
+ <title>The Transport Layer Security (TLS) Protocol Version 1.2</title>
+ <author initials="T." surname="Dierks" fullname="Tim Dierks"/>
+ <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5246" />
+ </reference>
+
+ <reference anchor="TLS-EXT">
+ <front>
+ <title>
+ Transport Layer Security (TLS) Extensions: Extension Definitions
+ </title>
+ <author initials="D." surname="Eastlake" fullname="D. Eastlake"/>
+ <date year="2011" month="January"/>
+ </front>
+ <seriesInfo name="RFC" value="6066"/>
+ </reference>
+
+ <reference anchor="TLS-ALPN">
+ <front>
+ <title>Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension</title>
+ <author initials="S." surname="Friedl" fullname="Stephan Friedl"></author>
+ <author initials="A." surname="Popov" fullname="Andrei Popov"></author>
+ <author initials="A." surname="Langley" fullname="Adam Langley"></author>
+ <author initials="E." surname="Stephan" fullname="Emile Stephan"></author>
+ <date month="July" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7301" />
+ </reference>
+
+ <reference anchor="TLS-ECDHE">
+ <front>
+ <title>
+ TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
+ Counter Mode (GCM)
+ </title>
+ <author initials="E." surname="Rescorla" fullname="E. Rescorla"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5289" />
+ </reference>
+
+ <reference anchor="FIPS186">
+ <front>
+ <title>
+ Digital Signature Standard (DSS)
+ </title>
+ <author><organization>NIST</organization></author>
+ <date year="2013" month="July" />
+ </front>
+ <seriesInfo name="FIPS" value="PUB 186-4" />
+ </reference>
+
+ <reference anchor="RFC7230">
+ <front>
+ <title>
+ Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7230" />
+ <x:source href="refs/rfc7230.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230"/>
+ </reference>
+ <reference anchor="RFC7231">
+ <front>
+ <title>
+ Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7231" />
+ <x:source href="refs/rfc7231.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7231"/>
+ </reference>
+ <reference anchor="RFC7232">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests</title>
+ <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7232" />
+ </reference>
+ <reference anchor="RFC7233">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Range Requests</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author initials="Y." surname="Lafon" fullname="Yves Lafon" role="editor">
+ <organization abbrev="W3C">World Wide Web Consortium</organization>
+ <address><email>ylafon@w3.org</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7233" />
+ </reference>
+ <reference anchor="RFC7234">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Caching</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author fullname="Mark Nottingham" initials="M." role="editor" surname="Nottingham">
+ <organization>Akamai</organization>
+ <address><email>mnot@mnot.net</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7234"/>
+ <x:source href="refs/rfc7234.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7234"/>
+ </reference>
+ <reference anchor="RFC7235">
+ <front>
+ <title>Hypertext Transfer Protocol (HTTP/1.1): Authentication</title>
+ <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+ <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+ <address><email>fielding@gbiv.com</email></address>
+ </author>
+ <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+ <organization abbrev="greenbytes">greenbytes GmbH</organization>
+ <address><email>julian.reschke@greenbytes.de</email></address>
+ </author>
+ <date month="June" year="2014" />
+ </front>
+ <seriesInfo name="RFC" value="7235"/>
+ <x:source href="refs/rfc7235.xml"
+ basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7235"/>
+ </reference>
+
+ <reference anchor="COOKIE">
+ <front>
+ <title>HTTP State Management Mechanism</title>
+ <author initials="A." surname="Barth" fullname="A. Barth"/>
+ <date year="2011" month="April" />
+ </front>
+ <seriesInfo name="RFC" value="6265" />
+ </reference>
+ </references>
+
+ <references title="Informative References">
+ <reference anchor="RFC1323">
+ <front>
+ <title>
+ TCP Extensions for High Performance
+ </title>
+ <author initials="V." surname="Jacobson" fullname="Van Jacobson"></author>
+ <author initials="B." surname="Braden" fullname="Bob Braden"></author>
+ <author initials="D." surname="Borman" fullname="Dave Borman"></author>
+ <date year="1992" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="1323" />
+ </reference>
+
+ <reference anchor="RFC3749">
+ <front>
+ <title>Transport Layer Security Protocol Compression Methods</title>
+ <author initials="S." surname="Hollenbeck" fullname="S. Hollenbeck"/>
+ <date year="2004" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="3749" />
+ </reference>
+
+ <reference anchor="RFC6585">
+ <front>
+ <title>Additional HTTP Status Codes</title>
+ <author initials="M." surname="Nottingham" fullname="Mark Nottingham"/>
+ <author initials="R." surname="Fielding" fullname="Roy Fielding"/>
+ <date year="2012" month="April" />
+ </front>
+ <seriesInfo name="RFC" value="6585" />
+ </reference>
+
+ <reference anchor="RFC4492">
+ <front>
+ <title>
+ Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
+ </title>
+ <author initials="S." surname="Blake-Wilson" fullname="S. Blake-Wilson"/>
+ <author initials="N." surname="Bolyard" fullname="N. Bolyard"/>
+ <author initials="V." surname="Gupta" fullname="V. Gupta"/>
+ <author initials="C." surname="Hawk" fullname="C. Hawk"/>
+ <author initials="B." surname="Moeller" fullname="B. Moeller"/>
+ <date year="2006" month="May" />
+ </front>
+ <seriesInfo name="RFC" value="4492" />
+ </reference>
+
+ <reference anchor="RFC5288">
+ <front>
+ <title>
+ AES Galois Counter Mode (GCM) Cipher Suites for TLS
+ </title>
+ <author initials="J." surname="Salowey" fullname="J. Salowey"/>
+ <author initials="A." surname="Choudhury" fullname="A. Choudhury"/>
+ <author initials="D." surname="McGrew" fullname="D. McGrew"/>
+ <date year="2008" month="August" />
+ </front>
+ <seriesInfo name="RFC" value="5288" />
+ </reference>
+
+ <reference anchor='HTML5'
+ target='http://www.w3.org/TR/2014/CR-html5-20140731/'>
+ <front>
+ <title>HTML5</title>
+ <author fullname='Robin Berjon' surname='Berjon' initials='R.'/>
+ <author fullname='Steve Faulkner' surname='Faulkner' initials='S.'/>
+ <author fullname='Travis Leithead' surname='Leithead' initials='T.'/>
+ <author fullname='Erika Doyle Navara' surname='Doyle Navara' initials='E.'/>
+ <author fullname='Edward O&apos;Connor' surname='O&apos;Connor' initials='E.'/>
+ <author fullname='Silvia Pfeiffer' surname='Pfeiffer' initials='S.'/>
+ <date year='2014' month='July' day='31'/>
+ </front>
+ <seriesInfo name='W3C Candidate Recommendation' value='CR-html5-20140731'/>
+ <annotation>
+ Latest version available at
+ <eref target='http://www.w3.org/TR/html5/'/>.
+ </annotation>
+ </reference>
+
+ <reference anchor="TALKING" target="http://w2spconf.com/2011/papers/websocket.pdf">
+ <front>
+ <title>
+ Talking to Yourself for Fun and Profit
+ </title>
+ <author initials="L-S." surname="Huang"/>
+ <author initials="E." surname="Chen"/>
+ <author initials="A." surname="Barth"/>
+ <author initials="E." surname="Rescorla"/>
+ <author initials="C." surname="Jackson"/>
+ <date year="2011" />
+ </front>
+ </reference>
+
+ <reference anchor="BREACH"
+ target="http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf">
+ <front>
+ <title>
+ BREACH: Reviving the CRIME Attack
+ </title>
+ <author initials="Y." surname="Gluck"/>
+ <author initials="N." surname="Harris"/>
+ <author initials="A." surname="Prado"/>
+ <date year="2013" month="July" day="12"/>
+ </front>
+ </reference>
+
+ <reference anchor="BCP90">
+ <front>
+ <title>Registration Procedures for Message Header Fields</title>
+ <author initials="G." surname="Klyne" fullname="G. Klyne">
+ <organization>Nine by Nine</organization>
+ <address><email>GK-IETF@ninebynine.org</email></address>
+ </author>
+ <author initials="M." surname="Nottingham" fullname="M. Nottingham">
+ <organization>BEA Systems</organization>
+ <address><email>mnot@pobox.com</email></address>
+ </author>
+ <author initials="J." surname="Mogul" fullname="J. Mogul">
+ <organization>HP Labs</organization>
+ <address><email>JeffMogul@acm.org</email></address>
+ </author>
+ <date year="2004" month="September" />
+ </front>
+ <seriesInfo name="BCP" value="90" />
+ <seriesInfo name="RFC" value="3864" />
+ </reference>
+
+ <reference anchor="TLSBCP">
+ <front>
+ <title>Recommendations for Secure Use of TLS and DTLS</title>
+ <author initials="Y" surname="Sheffer" fullname="Yaron Sheffer">
+ <organization />
+ </author>
+ <author initials="R" surname="Holz" fullname="Ralph Holz">
+ <organization />
+ </author>
+ <author initials="P" surname="Saint-Andre" fullname="Peter Saint-Andre">
+ <organization />
+ </author>
+ <date month="June" day="23" year="2014" />
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-uta-tls-bcp-01" />
+ </reference>
+
+ <reference anchor="ALT-SVC">
+ <front>
+ <title>
+ HTTP Alternative Services
+ </title>
+ <author initials="M." surname="Nottingham" fullname="Mark Nottingham">
+ <organization>Akamai</organization>
+ </author>
+ <author initials="P." surname="McManus" fullname="Patrick McManus">
+ <organization>Mozilla</organization>
+ </author>
+ <author initials="J." surname="Reschke" fullname="Julian Reschke">
+ <organization>greenbytes</organization>
+ </author>
+ <date year="2014" month="April"/>
+ </front>
+ <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-alt-svc-02"/>
+ <x:source href="refs/draft-ietf-httpbis-alt-svc-02.xml"/>
+ </reference>
+ </references>
+
+ <section title="Change Log" anchor="change.log">
+ <t>
+ This section is to be removed by RFC Editor before publication.
+ </t>
+
+ <section title="Since draft-ietf-httpbis-http2-14" anchor="changes.since.draft-ietf-httpbis-http2-14">
+ <t>
+ Renamed Not Authoritative status code to Misdirected Request.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-13" anchor="changes.since.draft-ietf-httpbis-http2-13">
+ <t>
+ Pseudo-header fields are now required to appear strictly before regular ones.
+ </t>
+ <t>
+ Restored 1xx series status codes, except 101.
+ </t>
+ <t>
+ Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting
+ to limit the damage.
+ </t>
+ <t>
+ Added a setting to advise peers of header set size limits.
+ </t>
+ <t>
+ Removed segments.
+ </t>
+ <t>
+ Made non-semantic-bearing <x:ref>HEADERS</x:ref> frames illegal in the HTTP mapping.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-12" anchor="changes.since.draft-ietf-httpbis-http2-12">
+ <t>
+ Restored extensibility options.
+ </t>
+ <t>
+ Restricting TLS cipher suites to AEAD only.
+ </t>
+ <t>
+ Removing Content-Encoding requirements.
+ </t>
+ <t>
+ Permitting the use of <x:ref>PRIORITY</x:ref> after stream close.
+ </t>
+ <t>
+ Removed ALTSVC frame.
+ </t>
+ <t>
+ Removed BLOCKED frame.
+ </t>
+ <t>
+ Reducing the maximum padding size to 256 octets; removing padding from
+ <x:ref>CONTINUATION</x:ref> frames.
+ </t>
+ <t>
+ Removed per-frame GZIP compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-11" anchor="changes.since.draft-ietf-httpbis-http2-11">
+ <t>
+ Added BLOCKED frame (at risk).
+ </t>
+ <t>
+ Simplified priority scheme.
+ </t>
+ <t>
+ Added <x:ref>DATA</x:ref> per-frame GZIP compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-10" anchor="changes.since.draft-ietf-httpbis-http2-10">
+ <t>
+ Changed "connection header" to "connection preface" to avoid confusion.
+ </t>
+ <t>
+ Added dependency-based stream prioritization.
+ </t>
+ <t>
+ Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
+ </t>
+ <t>
+ Adding missing padding to <x:ref>PUSH_PROMISE</x:ref>.
+ </t>
+ <t>
+ Integrate ALTSVC frame and supporting text.
+ </t>
+ <t>
+ Dropping requirement on "deflate" Content-Encoding.
+ </t>
+ <t>
+ Improving security considerations around use of compression.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-09" anchor="changes.since.draft-ietf-httpbis-http2-09">
+ <t>
+ Adding padding for data frames.
+ </t>
+ <t>
+ Renumbering frame types, error codes, and settings.
+ </t>
+ <t>
+ Adding INADEQUATE_SECURITY error code.
+ </t>
+ <t>
+ Updating TLS usage requirements to 1.2; forbidding TLS compression.
+ </t>
+ <t>
+ Removing extensibility for frames and settings.
+ </t>
+ <t>
+ Changing setting identifier size.
+ </t>
+ <t>
+ Removing the ability to disable flow control.
+ </t>
+ <t>
+ Changing the protocol identification token to "h2".
+ </t>
+ <t>
+ Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
+ cases.
+ </t>
+ <t>
+ Allowing split on 0x0 for Cookie.
+ </t>
+ <t>
+ Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-08" anchor="changes.since.draft-ietf-httpbis-http2-08">
+ <t>
+ Added cookie crumbling for more efficient header compression.
+ </t>
+ <t>
+ Added header field ordering with the value-concatenation mechanism.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-07" anchor="changes.since.draft-ietf-httpbis-http2-07">
+ <t>
+ Marked draft for implementation.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-06" anchor="changes.since.draft-ietf-httpbis-http2-06">
+ <t>
+ Adding definition for CONNECT method.
+ </t>
+ <t>
+ Constraining the use of push to safe, cacheable methods with no request body.
+ </t>
+ <t>
+ Changing from :host to :authority to remove any potential confusion.
+ </t>
+ <t>
+ Adding setting for header compression table size.
+ </t>
+ <t>
+ Adding settings acknowledgement.
+ </t>
+ <t>
+ Removing unnecessary and potentially problematic flags from CONTINUATION.
+ </t>
+ <t>
+ Added denial of service considerations.
+ </t>
+ </section>
+ <section title="Since draft-ietf-httpbis-http2-05" anchor="changes.since.draft-ietf-httpbis-http2-05">
+ <t>
+ Marking the draft ready for implementation.
+ </t>
+ <t>
+ Renumbering END_PUSH_PROMISE flag.
+ </t>
+ <t>
+ Editorial clarifications and changes.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-04" anchor="changes.since.draft-ietf-httpbis-http2-04">
+ <t>
+ Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
+ </t>
+ <t>
+ PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
+ zero.
+ </t>
+ <t>
+ Push expanded to allow all safe methods without a request body.
+ </t>
+ <t>
+ Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1
+ hop-by-hop header fields.
+ </t>
+ <t>
+ Requiring that intermediaries not forward requests with missing or illegal routing
+ :-headers.
+ </t>
+ <t>
+ Clarified requirements around handling different frames after stream close, stream reset
+ and <x:ref>GOAWAY</x:ref>.
+ </t>
+ <t>
+ Added more specific prohibitions for sending of different frame types in various stream
+ states.
+ </t>
+ <t>
+ Making the last received setting value the effective value.
+ </t>
+ <t>
+ Clarified requirements on TLS version, extension and ciphers.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-03" anchor="changes.since.draft-ietf-httpbis-http2-03">
+ <t>
+ Committed major restructuring atrocities.
+ </t>
+ <t>
+ Added reference to first header compression draft.
+ </t>
+ <t>
+ Added more formal description of frame lifecycle.
+ </t>
+ <t>
+ Moved END_STREAM (renamed from FINAL) back to <x:ref>HEADERS</x:ref>/<x:ref>DATA</x:ref>.
+ </t>
+ <t>
+ Removed HEADERS+PRIORITY, added optional priority to <x:ref>HEADERS</x:ref> frame.
+ </t>
+ <t>
+ Added <x:ref>PRIORITY</x:ref> frame.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-02" anchor="changes.since.draft-ietf-httpbis-http2-02">
+ <t>
+ Added continuations to frames carrying header blocks.
+ </t>
+ <t>
+ Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
+ concepts, like cookies.
+ </t>
+ <t>
+ Removed "message".
+ </t>
+ <t>
+ Switched to TLS ALPN from NPN.
+ </t>
+ <t>
+ Editorial changes.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-01" anchor="changes.since.draft-ietf-httpbis-http2-01">
+ <t>
+ Added IANA considerations section for frame types, error codes and settings.
+ </t>
+ <t>
+ Removed data frame compression.
+ </t>
+ <t>
+ Added <x:ref>PUSH_PROMISE</x:ref>.
+ </t>
+ <t>
+ Added globally applicable flags to framing.
+ </t>
+ <t>
+ Removed zlib-based header compression mechanism.
+ </t>
+ <t>
+ Updated references.
+ </t>
+ <t>
+ Clarified stream identifier reuse.
+ </t>
+ <t>
+ Removed CREDENTIALS frame and associated mechanisms.
+ </t>
+ <t>
+ Added advice against naive implementation of flow control.
+ </t>
+ <t>
+ Added session header section.
+ </t>
+ <t>
+ Restructured frame header. Removed distinction between data and control frames.
+ </t>
+ <t>
+ Altered flow control properties to include session-level limits.
+ </t>
+ <t>
+ Added note on cacheability of pushed resources and multiple tenant servers.
+ </t>
+ <t>
+ Changed protocol label form based on discussions.
+ </t>
+ </section>
+
+ <section title="Since draft-ietf-httpbis-http2-00" anchor="changes.since.draft-ietf-httpbis-http2-00">
+ <t>
+ Changed title throughout.
+ </t>
+ <t>
+ Removed section on Incompatibilities with SPDY draft#2.
+ </t>
+ <t>
+ Changed <x:ref>INTERNAL_ERROR</x:ref> on <x:ref>GOAWAY</x:ref> to have a value of 2 <eref
+ target="https://groups.google.com/forum/?fromgroups#!topic/spdy-dev/cfUef2gL3iU"/>.
+ </t>
+ <t>
+ Replaced abstract and introduction.
+ </t>
+ <t>
+ Added section on starting HTTP/2.0, including upgrade mechanism.
+ </t>
+ <t>
+ Removed unused references.
+ </t>
+ <t>
+ Added <xref target="fc-principles">flow control principles</xref> based on <eref
+ target="https://tools.ietf.org/html/draft-montenegro-httpbis-http2-fc-principles-01"/>.
+ </t>
+ </section>
+
+ <section title="Since draft-mbelshe-httpbis-spdy-00" anchor="changes.since.draft-mbelshe-httpbis-spdy-00">
+ <t>
+ Adopted as base for draft-ietf-httpbis-http2.
+ </t>
+ <t>
+ Updated authors/editors list.
+ </t>
+ <t>
+ Added status note.
+ </t>
+ </section>
+ </section>
+
+ </back>
+</rfc>
+<!--
+ vim:et:tw=100:sw=2:
+ -->
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 000000000..42c73bd1e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,2003 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+ "golang.org/x/net/lex/httplex"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allow. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ singleUse bool // whether being used for a single http.Request
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ lastActive time.Time
+ // Settings from peer: (also guarded by mu)
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ initialWindowSize uint32
+
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
+ werr error // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+ req *http.Request
+ trace *clientTrace // or nil
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ on100 func() // optional code to run if get a 100 continue response
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+
+ peerReset chan struct{} // closed on peer reset
+ resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to cancel a RoundTrip request, its context to expire, or for the
+// request to be done (any way it might be removed from the cc.streams
+// map: peer reset, successful completion, TCP connection breakage,
+// etc)
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ ctx := reqContext(req)
+ if req.Cancel == nil && ctx.Done() == nil {
+ return
+ }
+ select {
+ case <-req.Cancel:
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-ctx.Done():
+ cs.bufPipe.CloseWithError(ctx.Err())
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-cs.done:
+ }
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
+ select {
+ case <-cs.peerReset:
+ return cs.resetErr
+ case <-cs.done:
+ return errStreamClosed
+ default:
+ return nil
+ }
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ host = authority
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ return net.JoinHostPort(host, port)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Scheme, req.URL.Host)
+ for {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ traceGotConn(req, cc)
+ res, err := cc.RoundTrip(req)
+ if shouldRetryRequest(req, err) {
+ continue
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+)
+
+func shouldRetryRequest(req *http.Request, err error) bool {
+ // TODO: retry GET requests (no bodies) more aggressively, if shutdown
+ // before response.
+ return err == errClientConnUnusable
+}
+
+func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.newClientConn(tconn, singleUse)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *cloneTLSConfig(t.TLSClientConfig)
+ }
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ cn, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := cn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := cn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ return t.newClientConn(c, false)
+}
+
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ }
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+
+ cc.bw.Write(clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+ cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ old := cc.goAway
+ cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
+}
+
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return false
+ }
+ return cc.goAway == nil && !cc.closed &&
+ int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+ cc.nextStreamID < math.MaxInt32
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ nextID := cc.nextStreamID
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
+ cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+ cc.mu.Lock()
+ size := cc.maxFrameSize
+ if size > maxAllocFrameSize {
+ size = maxAllocFrameSize
+ }
+ for i, buf := range cc.freeBuf {
+ if len(buf) >= int(size) {
+ cc.freeBuf[i] = nil
+ cc.mu.Unlock()
+ return buf[:size]
+ }
+ }
+ cc.mu.Unlock()
+ return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+ if len(cc.freeBuf) < maxBufs {
+ cc.freeBuf = append(cc.freeBuf, buf)
+ return
+ }
+ for i, old := range cc.freeBuf {
+ if old == nil {
+ cc.freeBuf[i] = buf
+ return
+ }
+ }
+ // forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return errors.New("http2: invalid Upgrade request header")
+ }
+ if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
+ return errors.New("http2: invalid Transfer-Encoding request header")
+ }
+ if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
+ return errors.New("http2: invalid Connection request header")
+ }
+ return nil
+}
+
+func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
+ body = req.Body
+ if body == nil {
+ return nil, 0
+ }
+ if req.ContentLength != 0 {
+ return req.Body, req.ContentLength
+ }
+
+ // We have a body but a zero content length. Test to see if
+ // it's actually zero or just unset.
+ var buf [1]byte
+ n, rerr := body.Read(buf[:])
+ if rerr != nil && rerr != io.EOF {
+ return errorReader{rerr}, -1
+ }
+ if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stitch the Body back together again, re-attaching our
+ // consumed byte.
+ if rerr == io.EOF {
+ return bytes.NewReader(buf[:]), 1
+ }
+ return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+ }
+ // Body is actually zero bytes.
+ return nil, 0
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, err
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, err
+ }
+ hasTrailers := trailers != ""
+
+ cc.mu.Lock()
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ cc.mu.Unlock()
+ return nil, errClientConnUnusable
+ }
+
+ body, contentLen := bodyAndLength(req)
+ hasBody := body != nil
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ var requestedGzip bool
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ if err != nil {
+ cc.mu.Unlock()
+ return nil, err
+ }
+
+ cs := cc.newStream()
+ cs.req = req
+ cs.trace = requestTrace(req)
+ cs.requestedGzip = requestedGzip
+ bodyWriter := cc.t.getBodyWriterState(cs, body)
+ cs.on100 = bodyWriter.on100
+
+ cc.wmu.Lock()
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ cc.wmu.Unlock()
+ traceWroteHeaders(cs.trace)
+ cc.mu.Unlock()
+
+ if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ bodyWriter.cancel()
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ traceWroteRequest(cs.trace, werr)
+ return nil, werr
+ }
+
+ var respHeaderTimer <-chan time.Time
+ if hasBody {
+ bodyWriter.scheduleBodyWrite()
+ } else {
+ traceWroteRequest(cs.trace, nil)
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ bodyWritten := false
+ ctx := reqContext(req)
+
+ handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
+ }
+
+ for {
+ select {
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ case <-respHeaderTimer:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errTimeout
+ case <-ctx.Done():
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, ctx.Err()
+ case <-req.Cancel:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.resetErr
+ case err := <-bodyWriter.resc:
+ // Prefer the read loop's response, if available. Issue 16102.
+ select {
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ default:
+ }
+ if err != nil {
+ return nil, err
+ }
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+ cc := cs.cc
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+ buf := cc.frameScratchBuffer()
+ defer cc.putFrameScratchBuffer(buf)
+
+ defer func() {
+ traceWroteRequest(cs.trace, err)
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
+ sawEOF = true
+ err = nil
+ } else if err != nil {
+ return err
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
+ }
+
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ trls = cc.encodeTrailers(req)
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkResetOrDone(); err != nil {
+ return 0, err
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httplex.PunycodeHostPort(host)
+ if err != nil {
+ return nil, err
+ }
+
+ var path string
+ if req.Method != "CONNECT" {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return nil, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httplex.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ cc.writeHeader(":authority", host)
+ cc.writeHeader(":method", req.Method)
+ if req.Method != "CONNECT" {
+ cc.writeHeader(":path", path)
+ cc.writeHeader(":scheme", "https")
+ }
+ if trailers != "" {
+ cc.writeHeader("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ switch lowKey {
+ case "host", "content-length":
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ case "user-agent":
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ cc.writeHeader("accept-encoding", "gzip")
+ }
+ if !didUA {
+ cc.writeHeader("user-agent", defaultUserAgent)
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+ cc.hbuf.Reset()
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes()
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+ cs := &clientStream{
+ cc: cc,
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ peerReset: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove && cs != nil && !cc.closed {
+ cc.lastActive = time.Now()
+ delete(cc.streams, id)
+ close(cs.done)
+ cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ rl := &clientConnReadLoop{
+ cc: cc,
+ activeRes: make(map[uint32]*clientStream),
+ }
+
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type GoAwayError struct {
+ LastStreamID uint32
+ ErrCode ErrCode
+ DebugData string
+}
+
+func (e GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.tconn.Close()
+ defer cc.t.connPool().MarkDead(cc)
+ defer close(cc.readerDone)
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ cc.mu.Lock()
+ if cc.goAway != nil && isEOFOrNetReadError(err) {
+ err = GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ for _, cs := range rl.activeRes {
+ cs.bufPipe.CloseWithError(err)
+ }
+ for _, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ close(cs.done)
+ }
+ cc.closed = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
+ gotReply := false // ever saw a HEADERS reply
+ gotSettings := false
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+ cs.cc.writeStreamReset(cs.ID, se.Code, err)
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
+ } else if err != nil {
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
+ return ConnectionError(ErrCodeProtocol)
+ }
+ gotSettings = true
+ }
+ maybeIdle := false // whether frame might transition us to idle
+
+ switch f := f.(type) {
+ case *MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ maybeIdle = true
+ gotReply = true
+ case *DataFrame:
+ err = rl.processData(f)
+ maybeIdle = true
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ maybeIdle = true
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ maybeIdle = true
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
+ }
+ return err
+ }
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cs.resc <- resAndError{err: err}
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ if res.Body != noBody {
+ rl.activeRes[cs.ID] = cs
+ }
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed non-numeric status pseudo header")
+ }
+
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ header := make(http.Header)
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ header[key] = append(header[key], hf.Value)
+ }
+ }
+
+ streamEnded := f.StreamEnded()
+ isHead := cs.req.Method == "HEAD"
+ if !streamEnded || isHead {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded || isHead {
+ res.Body = noBody
+ return res, nil
+ }
+
+ buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+ cs.bufPipe = pipe{b: buf}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(cs.req)
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ setResponseUncompressed(res)
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+ connAdd = transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ // Consider any buffered body data (read from the conn but not
+ // consumed by the client) when computing flow control for this
+ // stream.
+ v := int(cs.inflow.available()) + cs.bufPipe.Len()
+ if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = int32(transportDefaultStreamFlow - v)
+ cs.inflow.add(streamAdd)
+ }
+ }
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ cc := cs.cc
+
+ serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
+ unread := cs.bufPipe.Len()
+
+ if unread > 0 || !serverSentStreamEnd {
+ cc.mu.Lock()
+ cc.wmu.Lock()
+ if !serverSentStreamEnd {
+ cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
+ }
+ // Return connection-level flow control.
+ if unread > 0 {
+ cc.inflow.add(int32(unread))
+ cc.fr.WriteWindowUpdate(0, uint32(unread))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ cc.mu.Unlock()
+ }
+
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ data := f.Data()
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ cc.inflow.add(int32(f.Length))
+ cc.mu.Unlock()
+
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(f.Length))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ return nil
+ }
+ if f.Length > 0 {
+ if len(data) > 0 && cs.bufPipe.b == nil {
+ // Data frame after it's already closed?
+ cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(f.Length) {
+ cs.inflow.take(int32(f.Length))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ cs.inflow.add(pad)
+ cc.inflow.add(pad)
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(pad))
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ cc.mu.Unlock()
+
+ if len(data) > 0 {
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ rl.endStreamError(cs, err)
+ return err
+ }
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ var code func()
+ if err == nil {
+ err = io.EOF
+ code = cs.copyTrailers
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+ if isConnectionCloseRequest(cs.req) {
+ rl.closeWhenIdle = true
+ }
+
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ err := f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ return cc.werr
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, false)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.cc.streamByID(f.StreamID, true)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STEAM an idle stream
+ return nil
+ }
+ select {
+ case <-cs.peerReset:
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
+ default:
+ err := streamError(cs.ID, f.ErrCode)
+ cs.resetErr = err
+ close(cs.peerReset)
+ cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ delete(rl.activeRes, cs.ID)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+ cs *clientStream
+ timer *time.Timer // if non-nil, we're doing a delayed write
+ fnonce *sync.Once // to call fn with
+ fn func() // the code to run in the goroutine, writing the body
+ resc chan error // result of fn's execution
+ delay time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+ s.cs = cs
+ if body == nil {
+ return
+ }
+ resc := make(chan error, 1)
+ s.resc = resc
+ s.fn = func() {
+ resc <- cs.writeRequestBody(body, cs.req.Body)
+ }
+ s.delay = t.expectContinueTimeout()
+ if s.delay == 0 ||
+ !httplex.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ return
+ }
+ s.fnonce = new(sync.Once)
+
+ // Arm the timer with a very large duration, which we'll
+ // intentionally lower later. It has to be large now because
+ // we need a handle to it before writing the headers, but the
+ // s.delay value is defined to not start until after the
+ // request headers were written.
+ const hugeDuration = 365 * 24 * time.Hour
+ s.timer = time.AfterFunc(hugeDuration, func() {
+ s.fnonce.Do(s.fn)
+ })
+ return
+}
+
+func (s bodyWriterState) cancel() {
+ if s.timer != nil {
+ s.timer.Stop()
+ }
+}
+
+func (s bodyWriterState) on100() {
+ if s.timer == nil {
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
+ return
+ }
+ s.timer.Stop()
+ go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+ if s.timer == nil {
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
+ go s.fn()
+ return
+ }
+ traceWait100Continue(s.cs.trace)
+ if s.timer.Stop() {
+ s.timer.Reset(s.delay)
+ }
+}
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func isConnectionCloseRequest(req *http.Request) bool {
+ return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go
new file mode 100644
index 000000000..96d0a0867
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport_test.go
@@ -0,0 +1,2620 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+var (
+ extNet = flag.Bool("extnet", false, "do external network tests")
+ transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
+ insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove?
+)
+
+var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true}
+
+func TestTransportExternal(t *testing.T) {
+ if !*extNet {
+ t.Skip("skipping external network test")
+ }
+ req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
+ rt := &Transport{TLSClientConfig: tlsConfigInsecure}
+ res, err := rt.RoundTrip(req)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ res.Write(os.Stdout)
+}
+
+func startH2cServer(t *testing.T) net.Listener {
+ h2Server := &Server{}
+ l := newLocalListener(t)
+ go func() {
+ conn, err := l.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ h2Server.ServeConn(conn, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, %v", r.URL.Path)
+ })})
+ }()
+ return l
+}
+
+func TestTransportH2c(t *testing.T) {
+ l := startH2cServer(t)
+ defer l.Close()
+ req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tr := &Transport{
+ AllowHTTP: true,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ return net.Dial(network, addr)
+ },
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.ProtoMajor != 2 {
+ t.Fatal("proto not h2c")
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := string(body), "Hello, /foobar"; got != want {
+ t.Fatalf("response got %v, want %v", got, want)
+ }
+}
+
+func TestTransport(t *testing.T) {
+ const body = "sup"
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, body)
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ t.Logf("Got res: %+v", res)
+ if g, w := res.StatusCode, 200; g != w {
+ t.Errorf("StatusCode = %v; want %v", g, w)
+ }
+ if g, w := res.Status, "200 OK"; g != w {
+ t.Errorf("Status = %q; want %q", g, w)
+ }
+ wantHeader := http.Header{
+ "Content-Length": []string{"3"},
+ "Content-Type": []string{"text/plain; charset=utf-8"},
+ "Date": []string{"XXX"}, // see cleanDate
+ }
+ cleanDate(res)
+ if !reflect.DeepEqual(res.Header, wantHeader) {
+ t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
+ }
+ if res.Request != req {
+ t.Errorf("Response.Request = %p; want %p", res.Request, req)
+ }
+ if res.TLS == nil {
+ t.Error("Response.TLS = nil; want non-nil")
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Errorf("Body read: %v", err)
+ } else if string(slurp) != body {
+ t.Errorf("Body = %q; want %q", slurp, body)
+ }
+}
+
+func onSameConn(t *testing.T, modReq func(*http.Request)) bool {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.RemoteAddr)
+ }, optOnlyServer, func(c net.Conn, st http.ConnState) {
+ t.Logf("conn %v is now state %v", c.RemoteAddr(), st)
+ })
+ defer st.Close()
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ get := func() string {
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ modReq(req)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("Body read: %v", err)
+ }
+ addr := strings.TrimSpace(string(slurp))
+ if addr == "" {
+ t.Fatalf("didn't get an addr in response")
+ }
+ return addr
+ }
+ first := get()
+ second := get()
+ return first == second
+}
+
+func TestTransportReusesConns(t *testing.T) {
+ if !onSameConn(t, func(*http.Request) {}) {
+ t.Errorf("first and second responses were on different connections")
+ }
+}
+
+func TestTransportReusesConn_RequestClose(t *testing.T) {
+ if onSameConn(t, func(r *http.Request) { r.Close = true }) {
+ t.Errorf("first and second responses were not on different connections")
+ }
+}
+
+func TestTransportReusesConn_ConnClose(t *testing.T) {
+ if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) {
+ t.Errorf("first and second responses were not on different connections")
+ }
+}
+
+// Tests that the Transport only keeps one pending dial open per destination address.
+// https://golang.org/issue/13397
+func TestTransportGroupsPendingDials(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.RemoteAddr)
+ }, optOnlyServer)
+ defer st.Close()
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ }
+ defer tr.CloseIdleConnections()
+ var (
+ mu sync.Mutex
+ dials = map[string]int{}
+ )
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Errorf("Body read: %v", err)
+ }
+ addr := strings.TrimSpace(string(slurp))
+ if addr == "" {
+ t.Errorf("didn't get an addr in response")
+ }
+ mu.Lock()
+ dials[addr]++
+ mu.Unlock()
+ }()
+ }
+ wg.Wait()
+ if len(dials) != 1 {
+ t.Errorf("saw %d dials; want 1: %v", len(dials), dials)
+ }
+ tr.CloseIdleConnections()
+ if err := retry(50, 10*time.Millisecond, func() error {
+ cp, ok := tr.connPool().(*clientConnPool)
+ if !ok {
+ return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool())
+ }
+ cp.mu.Lock()
+ defer cp.mu.Unlock()
+ if len(cp.dialing) != 0 {
+ return fmt.Errorf("dialing map = %v; want empty", cp.dialing)
+ }
+ if len(cp.conns) != 0 {
+ return fmt.Errorf("conns = %v; want empty", cp.conns)
+ }
+ if len(cp.keys) != 0 {
+ return fmt.Errorf("keys = %v; want empty", cp.keys)
+ }
+ return nil
+ }); err != nil {
+ t.Errorf("State of pool after CloseIdleConnections: %v", err)
+ }
+}
+
+func retry(tries int, delay time.Duration, fn func() error) error {
+ var err error
+ for i := 0; i < tries; i++ {
+ err = fn()
+ if err == nil {
+ return nil
+ }
+ time.Sleep(delay)
+ }
+ return err
+}
+
+func TestTransportAbortClosesPipes(t *testing.T) {
+ shutdown := make(chan struct{})
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ w.(http.Flusher).Flush()
+ <-shutdown
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+ defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
+
+ done := make(chan struct{})
+ requestMade := make(chan struct{})
+ go func() {
+ defer close(done)
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ close(requestMade)
+ _, err = ioutil.ReadAll(res.Body)
+ if err == nil {
+ t.Error("expected error from res.Body.Read")
+ }
+ }()
+
+ <-requestMade
+ // Now force the serve loop to end, via closing the connection.
+ st.closeConn()
+ // deadlock? that's a bug.
+ select {
+ case <-done:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout")
+ }
+}
+
+// TODO: merge this with TestTransportBody to make TestTransportRequest? This
+// could be a table-driven test with extra goodies.
+func TestTransportPath(t *testing.T) {
+ gotc := make(chan *url.URL, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ gotc <- r.URL
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ const (
+ path = "/testpath"
+ query = "q=1"
+ )
+ surl := st.ts.URL + path + "?" + query
+ req, err := http.NewRequest("POST", surl, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ got := <-gotc
+ if got.Path != path {
+ t.Errorf("Read Path = %q; want %q", got.Path, path)
+ }
+ if got.RawQuery != query {
+ t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query)
+ }
+}
+
+func randString(n int) string {
+ rnd := rand.New(rand.NewSource(int64(n)))
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = byte(rnd.Intn(256))
+ }
+ return string(b)
+}
+
+func TestTransportBody(t *testing.T) {
+ bodyTests := []struct {
+ body string
+ noContentLen bool
+ }{
+ {body: "some message"},
+ {body: "some message", noContentLen: true},
+ {body: ""},
+ {body: "", noContentLen: true},
+ {body: strings.Repeat("a", 1<<20), noContentLen: true},
+ {body: strings.Repeat("a", 1<<20)},
+ {body: randString(16<<10 - 1)},
+ {body: randString(16 << 10)},
+ {body: randString(16<<10 + 1)},
+ {body: randString(512<<10 - 1)},
+ {body: randString(512 << 10)},
+ {body: randString(512<<10 + 1)},
+ {body: randString(1<<20 - 1)},
+ {body: randString(1 << 20)},
+ {body: randString(1<<20 + 2)},
+ }
+
+ type reqInfo struct {
+ req *http.Request
+ slurp []byte
+ err error
+ }
+ gotc := make(chan reqInfo, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ slurp, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ gotc <- reqInfo{err: err}
+ } else {
+ gotc <- reqInfo{req: r, slurp: slurp}
+ }
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ for i, tt := range bodyTests {
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ var body io.Reader = strings.NewReader(tt.body)
+ if tt.noContentLen {
+ body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods
+ }
+ req, err := http.NewRequest("POST", st.ts.URL, body)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ defer res.Body.Close()
+ ri := <-gotc
+ if ri.err != nil {
+ t.Errorf("#%d: read error: %v", i, ri.err)
+ continue
+ }
+ if got := string(ri.slurp); got != tt.body {
+ t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body))
+ }
+ wantLen := int64(len(tt.body))
+ if tt.noContentLen && tt.body != "" {
+ wantLen = -1
+ }
+ if ri.req.ContentLength != wantLen {
+ t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen)
+ }
+ }
+}
+
+func shortString(v string) string {
+ const maxLen = 100
+ if len(v) <= maxLen {
+ return v
+ }
+ return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:])
+}
+
+func TestTransportDialTLS(t *testing.T) {
+ var mu sync.Mutex // guards following
+ var gotReq, didDial bool
+
+ ts := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ mu.Lock()
+ gotReq = true
+ mu.Unlock()
+ },
+ optOnlyServer,
+ )
+ defer ts.Close()
+ tr := &Transport{
+ DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
+ mu.Lock()
+ didDial = true
+ mu.Unlock()
+ cfg.InsecureSkipVerify = true
+ c, err := tls.Dial(netw, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return c, c.Handshake()
+ },
+ }
+ defer tr.CloseIdleConnections()
+ client := &http.Client{Transport: tr}
+ res, err := client.Get(ts.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ mu.Lock()
+ if !gotReq {
+ t.Error("didn't get request")
+ }
+ if !didDial {
+ t.Error("didn't use dial hook")
+ }
+}
+
+func TestConfigureTransport(t *testing.T) {
+ t1 := &http.Transport{}
+ err := ConfigureTransport(t1)
+ if err == errTransportVersion {
+ t.Skip(err)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) {
+ // Laziness, to avoid buildtags.
+ t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got)
+ }
+ wantNextProtos := []string{"h2", "http/1.1"}
+ if t1.TLSClientConfig == nil {
+ t.Errorf("nil t1.TLSClientConfig")
+ } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) {
+ t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos)
+ }
+ if err := ConfigureTransport(t1); err == nil {
+ t.Error("unexpected success on second call to ConfigureTransport")
+ }
+
+ // And does it work?
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, r.Proto)
+ }, optOnlyServer)
+ defer st.Close()
+
+ t1.TLSClientConfig.InsecureSkipVerify = true
+ c := &http.Client{Transport: t1}
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := string(slurp), "HTTP/2.0"; got != want {
+ t.Errorf("body = %q; want %q", got, want)
+ }
+}
+
+type capitalizeReader struct {
+ r io.Reader
+}
+
+func (cr capitalizeReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ for i, b := range p[:n] {
+ if b >= 'a' && b <= 'z' {
+ p[i] = b - ('a' - 'A')
+ }
+ }
+ return
+}
+
+type flushWriter struct {
+ w io.Writer
+}
+
+func (fw flushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if f, ok := fw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+ return
+}
+
+type clientTester struct {
+ t *testing.T
+ tr *Transport
+ sc, cc net.Conn // server and client conn
+ fr *Framer // server's framer
+ client func() error
+ server func() error
+}
+
+func newClientTester(t *testing.T) *clientTester {
+ var dialOnce struct {
+ sync.Mutex
+ dialed bool
+ }
+ ct := &clientTester{
+ t: t,
+ }
+ ct.tr = &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ dialOnce.Lock()
+ defer dialOnce.Unlock()
+ if dialOnce.dialed {
+ return nil, errors.New("only one dial allowed in test mode")
+ }
+ dialOnce.dialed = true
+ return ct.cc, nil
+ },
+ }
+
+ ln := newLocalListener(t)
+ cc, err := net.Dial("tcp", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+
+ }
+ sc, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ln.Close()
+ ct.cc = cc
+ ct.sc = sc
+ ct.fr = NewFramer(sc, sc)
+ return ct
+}
+
+func newLocalListener(t *testing.T) net.Listener {
+ ln, err := net.Listen("tcp4", "127.0.0.1:0")
+ if err == nil {
+ return ln
+ }
+ ln, err = net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ return ln
+}
+
+func (ct *clientTester) greet() {
+ buf := make([]byte, len(ClientPreface))
+ _, err := io.ReadFull(ct.sc, buf)
+ if err != nil {
+ ct.t.Fatalf("reading client preface: %v", err)
+ }
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ ct.t.Fatalf("Reading client settings frame: %v", err)
+ }
+ if sf, ok := f.(*SettingsFrame); !ok {
+ ct.t.Fatalf("Wanted client settings frame; got %v", f)
+ _ = sf // stash it away?
+ }
+ if err := ct.fr.WriteSettings(); err != nil {
+ ct.t.Fatal(err)
+ }
+ if err := ct.fr.WriteSettingsAck(); err != nil {
+ ct.t.Fatal(err)
+ }
+}
+
+func (ct *clientTester) readNonSettingsFrame() (Frame, error) {
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := f.(*SettingsFrame); ok {
+ continue
+ }
+ return f, nil
+ }
+}
+
+func (ct *clientTester) cleanup() {
+ ct.tr.CloseIdleConnections()
+}
+
+func (ct *clientTester) run() {
+ errc := make(chan error, 2)
+ ct.start("client", errc, ct.client)
+ ct.start("server", errc, ct.server)
+ defer ct.cleanup()
+ for i := 0; i < 2; i++ {
+ if err := <-errc; err != nil {
+ ct.t.Error(err)
+ return
+ }
+ }
+}
+
+func (ct *clientTester) start(which string, errc chan<- error, fn func() error) {
+ go func() {
+ finished := false
+ var err error
+ defer func() {
+ if !finished {
+ err = fmt.Errorf("%s goroutine didn't finish.", which)
+ } else if err != nil {
+ err = fmt.Errorf("%s: %v", which, err)
+ }
+ errc <- err
+ }()
+ err = fn()
+ finished = true
+ }()
+}
+
+func (ct *clientTester) readFrame() (Frame, error) {
+ return readFrameTimeout(ct.fr, 2*time.Second)
+}
+
+func (ct *clientTester) firstHeaders() (*HeadersFrame, error) {
+ for {
+ f, err := ct.readFrame()
+ if err != nil {
+ return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ return nil, fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ return hf, nil
+ }
+}
+
+type countingReader struct {
+ n *int64
+}
+
+func (r countingReader) Read(p []byte) (n int, err error) {
+ for i := range p {
+ p[i] = byte(i)
+ }
+ atomic.AddInt64(r.n, int64(len(p)))
+ return len(p), err
+}
+
+func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }
+func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
+
+func testTransportReqBodyAfterResponse(t *testing.T, status int) {
+ const bodySize = 10 << 20
+ clientDone := make(chan struct{})
+ ct := newClientTester(t)
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+
+ var n int64 // atomic
+ req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize))
+ if err != nil {
+ return err
+ }
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != status {
+ return fmt.Errorf("status code = %v; want %v", res.StatusCode, status)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("Slurp: %v", err)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("unexpected body: %q", slurp)
+ }
+ if status == 200 {
+ if got := atomic.LoadInt64(&n); got != bodySize {
+ return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize)
+ }
+ } else {
+ if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize {
+ return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize)
+ }
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ var dataRecv int64
+ var closed bool
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ // If the client's done, it
+ // will have reported any
+ // errors on its side.
+ return nil
+ default:
+ return err
+ }
+ }
+ //println(fmt.Sprintf("server got frame: %v", f))
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *HeadersFrame:
+ if !f.HeadersEnded() {
+ return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
+ }
+ if f.StreamEnded() {
+ return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f)
+ }
+ case *DataFrame:
+ dataLen := len(f.Data())
+ if dataLen > 0 {
+ if dataRecv == 0 {
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
+ return err
+ }
+ if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
+ return err
+ }
+ }
+ dataRecv += int64(dataLen)
+
+ if !closed && ((status != 200 && dataRecv > 0) ||
+ (status == 200 && dataRecv == bodySize)) {
+ closed = true
+ if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("Unexpected client frame %v", f)
+ }
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/13444
+func TestTransportFullDuplex(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(200) // redundant but for clarity
+ w.(http.Flusher).Flush()
+ io.Copy(flushWriter{w}, capitalizeReader{r.Body})
+ fmt.Fprintf(w, "bye.\n")
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ pr, pw := io.Pipe()
+ req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ContentLength = -1
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200)
+ }
+ bs := bufio.NewScanner(res.Body)
+ want := func(v string) {
+ if !bs.Scan() {
+ t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err())
+ }
+ }
+ write := func(v string) {
+ _, err := io.WriteString(pw, v)
+ if err != nil {
+ t.Fatalf("pipe write: %v", err)
+ }
+ }
+ write("foo\n")
+ want("FOO")
+ write("bar\n")
+ want("BAR")
+ pw.Close()
+ want("bye.")
+ if err := bs.Err(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestTransportConnectRequest(t *testing.T) {
+ gotc := make(chan *http.Request, 1)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ gotc <- r
+ }, optOnlyServer)
+ defer st.Close()
+
+ u, err := url.Parse(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ tests := []struct {
+ req *http.Request
+ want string
+ }{
+ {
+ req: &http.Request{
+ Method: "CONNECT",
+ Header: http.Header{},
+ URL: u,
+ },
+ want: u.Host,
+ },
+ {
+ req: &http.Request{
+ Method: "CONNECT",
+ Header: http.Header{},
+ URL: u,
+ Host: "example.com:123",
+ },
+ want: "example.com:123",
+ },
+ }
+
+ for i, tt := range tests {
+ res, err := c.Do(tt.req)
+ if err != nil {
+ t.Errorf("%d. RoundTrip = %v", i, err)
+ continue
+ }
+ res.Body.Close()
+ req := <-gotc
+ if req.Method != "CONNECT" {
+ t.Errorf("method = %q; want CONNECT", req.Method)
+ }
+ if req.Host != tt.want {
+ t.Errorf("Host = %q; want %q", req.Host, tt.want)
+ }
+ if req.URL.Host != tt.want {
+ t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want)
+ }
+ }
+}
+
+type headerType int
+
+const (
+ noHeader headerType = iota // omitted
+ oneHeader
+ splitHeader // broken into continuation on purpose
+)
+
+const (
+ f0 = noHeader
+ f1 = oneHeader
+ f2 = splitHeader
+ d0 = false
+ d1 = true
+)
+
+// Test all 36 combinations of response frame orders:
+// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) }
+// Generated by http://play.golang.org/p/SScqYKJYXd
+func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) }
+func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) }
+func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) }
+func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) }
+func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) }
+func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) }
+func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) }
+func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) }
+func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) }
+func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) }
+func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) }
+func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) }
+func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) }
+func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) }
+func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) }
+func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) }
+func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) }
+func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) }
+func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) }
+func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) }
+func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) }
+func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) }
+func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) }
+func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) }
+func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) }
+func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) }
+func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) }
+func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) }
+func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) }
+func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) }
+func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) }
+func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) }
+func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) }
+func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) }
+func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) }
+func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) }
+
+func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) {
+ const reqBody = "some request body"
+ const resBody = "some response body"
+
+ if resHeader == noHeader {
+ // TODO: test 100-continue followed by immediate
+ // server stream reset, without headers in the middle?
+ panic("invalid combination")
+ }
+
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody))
+ if expect100Continue != noHeader {
+ req.Header.Set("Expect", "100-continue")
+ }
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("Slurp: %v", err)
+ }
+ wantBody := resBody
+ if !withData {
+ wantBody = ""
+ }
+ if string(slurp) != wantBody {
+ return fmt.Errorf("body = %q; want %q", slurp, wantBody)
+ }
+ if trailers == noHeader {
+ if len(res.Trailer) > 0 {
+ t.Errorf("Trailer = %v; want none", res.Trailer)
+ }
+ } else {
+ want := http.Header{"Some-Trailer": {"some-value"}}
+ if !reflect.DeepEqual(res.Trailer, want) {
+ t.Errorf("Trailer = %v; want %v", res.Trailer, want)
+ }
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ endStream := false
+ send := func(mode headerType) {
+ hbf := buf.Bytes()
+ switch mode {
+ case oneHeader:
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.Header().StreamID,
+ EndHeaders: true,
+ EndStream: endStream,
+ BlockFragment: hbf,
+ })
+ case splitHeader:
+ if len(hbf) < 2 {
+ panic("too small")
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.Header().StreamID,
+ EndHeaders: false,
+ EndStream: endStream,
+ BlockFragment: hbf[:1],
+ })
+ ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:])
+ default:
+ panic("bogus mode")
+ }
+ }
+ switch f := f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ case *DataFrame:
+ if !f.StreamEnded() {
+ // No need to send flow control tokens. The test request body is tiny.
+ continue
+ }
+ // Response headers (1+ frames; 1 or 2 in this test, but never 0)
+ {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"})
+ enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"})
+ if trailers != noHeader {
+ enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"})
+ }
+ endStream = withData == false && trailers == noHeader
+ send(resHeader)
+ }
+ if withData {
+ endStream = trailers == noHeader
+ ct.fr.WriteData(f.StreamID, endStream, []byte(resBody))
+ }
+ if trailers != noHeader {
+ endStream = true
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"})
+ send(trailers)
+ }
+ if endStream {
+ return nil
+ }
+ case *HeadersFrame:
+ if expect100Continue != noHeader {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
+ send(expect100Continue)
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportReceiveUndeclaredTrailer(t *testing.T) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("body = %q; want nothing", slurp)
+ }
+ if _, ok := res.Trailer["Some-Trailer"]; !ok {
+ return fmt.Errorf("expected Some-Trailer")
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var n int
+ var hf *HeadersFrame
+ for hf == nil && n < 10 {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ hf, _ = f.(*HeadersFrame)
+ n++
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ // send headers without Trailer header
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+
+ // send trailers
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ return nil
+ }
+ ct.run()
+}
+
+func TestTransportInvalidTrailer_Pseudo1(t *testing.T) {
+ testTransportInvalidTrailer_Pseudo(t, oneHeader)
+}
+func TestTransportInvalidTrailer_Pseudo2(t *testing.T) {
+ testTransportInvalidTrailer_Pseudo(t, splitHeader)
+}
+func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) {
+ testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"})
+ enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
+ })
+}
+
+func TestTransportInvalidTrailer_Capital1(t *testing.T) {
+ testTransportInvalidTrailer_Capital(t, oneHeader)
+}
+func TestTransportInvalidTrailer_Capital2(t *testing.T) {
+ testTransportInvalidTrailer_Capital(t, splitHeader)
+}
+func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) {
+ testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
+ enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"})
+ })
+}
+func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) {
+ testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"})
+ })
+}
+func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) {
+ testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) {
+ enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"})
+ })
+}
+
+func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("RoundTrip: %v", err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return fmt.Errorf("status code = %v; want 200", res.StatusCode)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ se, ok := err.(StreamError)
+ if !ok || se.Cause != wantErr {
+ return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("body = %q; want nothing", slurp)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ switch f := f.(type) {
+ case *HeadersFrame:
+ var endStream bool
+ send := func(mode headerType) {
+ hbf := buf.Bytes()
+ switch mode {
+ case oneHeader:
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: endStream,
+ BlockFragment: hbf,
+ })
+ case splitHeader:
+ if len(hbf) < 2 {
+ panic("too small")
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: false,
+ EndStream: endStream,
+ BlockFragment: hbf[:1],
+ })
+ ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
+ default:
+ panic("bogus mode")
+ }
+ }
+ // Response headers (1+ frames; 1 or 2 in this test, but never 0)
+ {
+ buf.Reset()
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"})
+ endStream = false
+ send(oneHeader)
+ }
+ // Trailers:
+ {
+ endStream = true
+ buf.Reset()
+ writeTrailer(enc)
+ send(trailers)
+ }
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportChecksResponseHeaderListSize(t *testing.T) {
+ ct := newClientTester(t)
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != errResponseHeaderListSize {
+ if res != nil {
+ res.Body.Close()
+ }
+ size := int64(0)
+ for k, vv := range res.Header {
+ for _, v := range vv {
+ size += int64(len(k)) + int64(len(v)) + 32
+ }
+ }
+ return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return err
+ }
+ switch f := f.(type) {
+ case *HeadersFrame:
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ large := strings.Repeat("a", 1<<10)
+ for i := 0; i < 5042; i++ {
+ enc.WriteField(hpack.HeaderField{Name: large, Value: large})
+ }
+ if size, want := buf.Len(), 6329; size != want {
+ // Note: this number might change if
+ // our hpack implementation
+ // changes. That's fine. This is
+ // just a sanity check that our
+ // response can fit in a single
+ // header block fragment frame.
+ return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want)
+ }
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+// Test that the the Transport returns a typed error from Response.Body.Read calls
+// when the server sends an error. (here we use a panic, since that should generate
+// a stream error, but others like cancel should be similar)
+func TestTransportBodyReadErrorType(t *testing.T) {
+ doPanic := make(chan bool, 1)
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ w.(http.Flusher).Flush() // force headers out
+ <-doPanic
+ panic("boom")
+ },
+ optOnlyServer,
+ optQuiet,
+ )
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ doPanic <- true
+ buf := make([]byte, 100)
+ n, err := res.Body.Read(buf)
+ want := StreamError{StreamID: 0x1, Code: 0x2}
+ if !reflect.DeepEqual(want, err) {
+ t.Errorf("Read = %v, %#v; want error %#v", n, err, want)
+ }
+}
+
+// golang.org/issue/13924
+// This used to fail after many iterations, especially with -race:
+// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race
+func TestTransportDoubleCloseOnWriteError(t *testing.T) {
+ var (
+ mu sync.Mutex
+ conn net.Conn // to close if set
+ )
+
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ mu.Lock()
+ defer mu.Unlock()
+ if conn != nil {
+ conn.Close()
+ }
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ conn = tc
+ return tc, nil
+ },
+ }
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+ c.Get(st.ts.URL)
+}
+
+// Test that the http1 Transport.DisableKeepAlives option is respected
+// and connections are closed as soon as idle.
+// See golang.org/issue/14008
+func TestTransportDisableKeepAlives(t *testing.T) {
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "hi")
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ connClosed := make(chan struct{}) // closed on tls.Conn.Close
+ tr := &Transport{
+ t1: &http.Transport{
+ DisableKeepAlives: true,
+ },
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &noteCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil
+ },
+ }
+ c := &http.Client{Transport: tr}
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ioutil.ReadAll(res.Body); err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ select {
+ case <-connClosed:
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout")
+ }
+
+}
+
+// Test concurrent requests with Transport.DisableKeepAlives. We can share connections,
+// but when things are totally idle, it still needs to close.
+func TestTransportDisableKeepAlives_Concurrency(t *testing.T) {
+ const D = 25 * time.Millisecond
+ st := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(D)
+ io.WriteString(w, "hi")
+ },
+ optOnlyServer,
+ )
+ defer st.Close()
+
+ var dials int32
+ var conns sync.WaitGroup
+ tr := &Transport{
+ t1: &http.Transport{
+ DisableKeepAlives: true,
+ },
+ TLSClientConfig: tlsConfigInsecure,
+ DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tc, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ atomic.AddInt32(&dials, 1)
+ conns.Add(1)
+ return &noteCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil
+ },
+ }
+ c := &http.Client{Transport: tr}
+ var reqs sync.WaitGroup
+ const N = 20
+ for i := 0; i < N; i++ {
+ reqs.Add(1)
+ if i == N-1 {
+ // For the final request, try to make all the
+ // others close. This isn't verified in the
+ // count, other than the Log statement, since
+ // it's so timing dependent. This test is
+ // really to make sure we don't interrupt a
+ // valid request.
+ time.Sleep(D * 2)
+ }
+ go func() {
+ defer reqs.Done()
+ res, err := c.Get(st.ts.URL)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if _, err := ioutil.ReadAll(res.Body); err != nil {
+ t.Error(err)
+ return
+ }
+ res.Body.Close()
+ }()
+ }
+ reqs.Wait()
+ conns.Wait()
+ t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N)
+}
+
+type noteCloseConn struct {
+ net.Conn
+ onceClose sync.Once
+ closefn func()
+}
+
+func (c *noteCloseConn) Close() error {
+ c.onceClose.Do(c.closefn)
+ return c.Conn.Close()
+}
+
+func isTimeout(err error) bool {
+ switch err := err.(type) {
+ case nil:
+ return false
+ case *url.Error:
+ return isTimeout(err.Err)
+ case net.Error:
+ return err.Timeout()
+ }
+ return false
+}
+
+// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent.
+func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) {
+ testTransportResponseHeaderTimeout(t, false)
+}
+func TestTransportResponseHeaderTimeout_Body(t *testing.T) {
+ testTransportResponseHeaderTimeout(t, true)
+}
+
+func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
+ ct := newClientTester(t)
+ ct.tr.t1 = &http.Transport{
+ ResponseHeaderTimeout: 5 * time.Millisecond,
+ }
+ ct.client = func() error {
+ c := &http.Client{Transport: ct.tr}
+ var err error
+ var n int64
+ const bodySize = 4 << 20
+ if body {
+ _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize))
+ } else {
+ _, err = c.Get("https://dummy.tld/")
+ }
+ if !isTimeout(err) {
+ t.Errorf("client expected timeout error; got %#v", err)
+ }
+ if body && n != bodySize {
+ t.Errorf("only read %d bytes of body; want %d", n, bodySize)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ switch f := f.(type) {
+ case *DataFrame:
+ dataLen := len(f.Data())
+ if dataLen > 0 {
+ if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
+ return err
+ }
+ if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
+ return err
+ }
+ }
+ case *RSTStreamFrame:
+ if f.StreamID == 1 && f.ErrCode == ErrCodeCancel {
+ return nil
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+func TestTransportDisableCompression(t *testing.T) {
+ const body = "sup"
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ want := http.Header{
+ "User-Agent": []string{"Go-http-client/2.0"},
+ }
+ if !reflect.DeepEqual(r.Header, want) {
+ t.Errorf("request headers = %v; want %v", r.Header, want)
+ }
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{
+ TLSClientConfig: tlsConfigInsecure,
+ t1: &http.Transport{
+ DisableCompression: true,
+ },
+ }
+ defer tr.CloseIdleConnections()
+
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+}
+
+// RFC 7540 section 8.1.2.2
+func TestTransportRejectsConnHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ var got []string
+ for k := range r.Header {
+ got = append(got, k)
+ }
+ sort.Strings(got)
+ w.Header().Set("Got-Header", strings.Join(got, ","))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ tests := []struct {
+ key string
+ value []string
+ want string
+ }{
+ {
+ key: "Upgrade",
+ value: []string{"anything"},
+ want: "ERROR: http2: invalid Upgrade request header",
+ },
+ {
+ key: "Connection",
+ value: []string{"foo"},
+ want: "ERROR: http2: invalid Connection request header",
+ },
+ {
+ key: "Connection",
+ value: []string{"close"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Connection",
+ value: []string{"close", "something-else"},
+ want: "ERROR: http2: invalid Connection request header",
+ },
+ {
+ key: "Connection",
+ value: []string{"keep-alive"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Proxy-Connection", // just deleted and ignored
+ value: []string{"keep-alive"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{""},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"foo"},
+ want: "ERROR: http2: invalid Transfer-Encoding request header",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"chunked"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Transfer-Encoding",
+ value: []string{"chunked", "other"},
+ want: "ERROR: http2: invalid Transfer-Encoding request header",
+ },
+ {
+ key: "Content-Length",
+ value: []string{"123"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ {
+ key: "Keep-Alive",
+ value: []string{"doop"},
+ want: "Accept-Encoding,User-Agent",
+ },
+ }
+
+ for _, tt := range tests {
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req.Header[tt.key] = tt.value
+ res, err := tr.RoundTrip(req)
+ var got string
+ if err != nil {
+ got = fmt.Sprintf("ERROR: %v", err)
+ } else {
+ got = res.Header.Get("Got-Header")
+ res.Body.Close()
+ }
+ if got != tt.want {
+ t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want)
+ }
+ }
+}
+
+// golang.org/issue/14048
+func TestTransportFailsOnInvalidHeaders(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ var got []string
+ for k := range r.Header {
+ got = append(got, k)
+ }
+ sort.Strings(got)
+ w.Header().Set("Got-Header", strings.Join(got, ","))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tests := [...]struct {
+ h http.Header
+ wantErr string
+ }{
+ 0: {
+ h: http.Header{"with space": {"foo"}},
+ wantErr: `invalid HTTP header name "with space"`,
+ },
+ 1: {
+ h: http.Header{"name": {"БрÑд"}},
+ wantErr: "", // okay
+ },
+ 2: {
+ h: http.Header{"имÑ": {"Brad"}},
+ wantErr: `invalid HTTP header name "имÑ"`,
+ },
+ 3: {
+ h: http.Header{"foo": {"foo\x01bar"}},
+ wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`,
+ },
+ }
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ for i, tt := range tests {
+ req, _ := http.NewRequest("GET", st.ts.URL, nil)
+ req.Header = tt.h
+ res, err := tr.RoundTrip(req)
+ var bad bool
+ if tt.wantErr == "" {
+ if err != nil {
+ bad = true
+ t.Errorf("case %d: error = %v; want no error", i, err)
+ }
+ } else {
+ if !strings.Contains(fmt.Sprint(err), tt.wantErr) {
+ bad = true
+ t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr)
+ }
+ }
+ if err == nil {
+ if bad {
+ t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header"))
+ }
+ res.Body.Close()
+ }
+ }
+}
+
+// Tests that gzipReader doesn't crash on a second Read call following
+// the first Read call's gzip.NewReader returning an error.
+func TestGzipReader_DoubleReadCrash(t *testing.T) {
+ gz := &gzipReader{
+ body: ioutil.NopCloser(strings.NewReader("0123456789")),
+ }
+ var buf [1]byte
+ n, err1 := gz.Read(buf[:])
+ if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") {
+ t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1)
+ }
+ n, err2 := gz.Read(buf[:])
+ if n != 0 || err2 != err1 {
+ t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1)
+ }
+}
+
+func TestTransportNewTLSConfig(t *testing.T) {
+ tests := [...]struct {
+ conf *tls.Config
+ host string
+ want *tls.Config
+ }{
+ // Normal case.
+ 0: {
+ conf: nil,
+ host: "foo.com",
+ want: &tls.Config{
+ ServerName: "foo.com",
+ NextProtos: []string{NextProtoTLS},
+ },
+ },
+
+ // User-provided name (bar.com) takes precedence:
+ 1: {
+ conf: &tls.Config{
+ ServerName: "bar.com",
+ },
+ host: "foo.com",
+ want: &tls.Config{
+ ServerName: "bar.com",
+ NextProtos: []string{NextProtoTLS},
+ },
+ },
+
+ // NextProto is prepended:
+ 2: {
+ conf: &tls.Config{
+ NextProtos: []string{"foo", "bar"},
+ },
+ host: "example.com",
+ want: &tls.Config{
+ ServerName: "example.com",
+ NextProtos: []string{NextProtoTLS, "foo", "bar"},
+ },
+ },
+
+ // NextProto is not duplicated:
+ 3: {
+ conf: &tls.Config{
+ NextProtos: []string{"foo", "bar", NextProtoTLS},
+ },
+ host: "example.com",
+ want: &tls.Config{
+ ServerName: "example.com",
+ NextProtos: []string{"foo", "bar", NextProtoTLS},
+ },
+ },
+ }
+ for i, tt := range tests {
+ tr := &Transport{TLSClientConfig: tt.conf}
+ got := tr.newTLSConfig(tt.host)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d. got %#v; want %#v", i, got, tt.want)
+ }
+ }
+}
+
+// The Google GFE responds to HEAD requests with a HEADERS frame
+// without END_STREAM, followed by a 0-length DATA frame with
+// END_STREAM. Make sure we don't get confused by that. (We did.)
+func TestTransportReadHeadResponse(t *testing.T) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+ ct.client = func() error {
+ defer close(clientDone)
+ req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ if res.ContentLength != 123 {
+ return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("ReadAll: %v", err)
+ }
+ if len(slurp) > 0 {
+ return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ continue
+ }
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false, // as the GFE does
+ BlockFragment: buf.Bytes(),
+ })
+ ct.fr.WriteData(hf.StreamID, true, nil)
+
+ <-clientDone
+ return nil
+ }
+ }
+ ct.run()
+}
+
+type neverEnding byte
+
+func (b neverEnding) Read(p []byte) (int, error) {
+ for i := range p {
+ p[i] = byte(b)
+ }
+ return len(p), nil
+}
+
+// golang.org/issue/15425: test that a handler closing the request
+// body doesn't terminate the stream to the peer. (It just stops
+// readability from the handler's side, and eventually the client
+// runs out of flow control tokens)
+func TestTransportHandlerBodyClose(t *testing.T) {
+ const bodySize = 10 << 20
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ r.Body.Close()
+ io.Copy(w, io.LimitReader(neverEnding('A'), bodySize))
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ g0 := runtime.NumGoroutine()
+
+ const numReq = 10
+ for i := 0; i < numReq; i++ {
+ req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ if n != bodySize || err != nil {
+ t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize)
+ }
+ }
+ tr.CloseIdleConnections()
+
+ gd := runtime.NumGoroutine() - g0
+ if gd > numReq/2 {
+ t.Errorf("appeared to leak goroutines")
+ }
+
+}
+
+// https://golang.org/issue/15930
+func TestTransportFlowControl(t *testing.T) {
+ const (
+ total = 100 << 20 // 100MB
+ bufLen = 1 << 16
+ )
+
+ var wrote int64 // updated atomically
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ b := make([]byte, bufLen)
+ for wrote < total {
+ n, err := w.Write(b)
+ atomic.AddInt64(&wrote, int64(n))
+ if err != nil {
+ t.Errorf("ResponseWriter.Write error: %v", err)
+ break
+ }
+ w.(http.Flusher).Flush()
+ }
+ }, optOnlyServer)
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+ req, err := http.NewRequest("GET", st.ts.URL, nil)
+ if err != nil {
+ t.Fatal("NewRequest error:", err)
+ }
+ resp, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal("RoundTrip error:", err)
+ }
+ defer resp.Body.Close()
+
+ var read int64
+ b := make([]byte, bufLen)
+ for {
+ n, err := resp.Body.Read(b)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal("Read error:", err)
+ }
+ read += int64(n)
+
+ const max = transportDefaultStreamFlow
+ if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max {
+ t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read)
+ }
+
+ // Let the server get ahead of the client.
+ time.Sleep(1 * time.Millisecond)
+ }
+}
+
+// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make
+// the Transport remember it and return it back to users (via
+// RoundTrip or request body reads) if needed (e.g. if the server
+// proceeds to close the TCP connection before the client gets its
+// response)
+func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) {
+ testTransportUsesGoAwayDebugError(t, false)
+}
+
+func TestTransportUsesGoAwayDebugError_Body(t *testing.T) {
+ testTransportUsesGoAwayDebugError(t, true)
+}
+
+func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+
+ const goAwayErrCode = ErrCodeHTTP11Required // arbitrary
+ const goAwayDebugData = "some debug data"
+
+ ct.client = func() error {
+ defer close(clientDone)
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if failMidBody {
+ if err != nil {
+ return fmt.Errorf("unexpected client RoundTrip error: %v", err)
+ }
+ _, err = io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ }
+ want := GoAwayError{
+ LastStreamID: 5,
+ ErrCode: goAwayErrCode,
+ DebugData: goAwayDebugData,
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ t.Logf("ReadFrame: %v", err)
+ return nil
+ }
+ hf, ok := f.(*HeadersFrame)
+ if !ok {
+ continue
+ }
+ if failMidBody {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ // Write two GOAWAY frames, to test that the Transport takes
+ // the interesting parts of both.
+ ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
+ ct.fr.WriteGoAway(5, goAwayErrCode, nil)
+ ct.sc.(*net.TCPConn).CloseWrite()
+ <-clientDone
+ return nil
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/16481
+func TestTransportReturnsUnusedFlowControl(t *testing.T) {
+ ct := newClientTester(t)
+
+ clientClosed := make(chan bool, 1)
+ serverWroteBody := make(chan bool, 1)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ <-serverWroteBody
+
+ if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {
+ return fmt.Errorf("body read = %v, %v; want 1, nil", n, err)
+ }
+ res.Body.Close() // leaving 4999 bytes unread
+ clientClosed <- true
+
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var hf *HeadersFrame
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ var ok bool
+ hf, ok = f.(*HeadersFrame)
+ if !ok {
+ return fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ break
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ ct.fr.WriteData(hf.StreamID, false, make([]byte, 5000)) // without ending stream
+ serverWroteBody <- true
+
+ <-clientClosed
+
+ waitingFor := "RSTStreamFrame"
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err)
+ }
+ if _, ok := f.(*SettingsFrame); ok {
+ continue
+ }
+ switch waitingFor {
+ case "RSTStreamFrame":
+ if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel {
+ return fmt.Errorf("Expected a WindowUpdateFrame with code cancel; got %v", summarizeFrame(f))
+ }
+ waitingFor = "WindowUpdateFrame"
+ case "WindowUpdateFrame":
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 {
+ return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f))
+ }
+ return nil
+ }
+ }
+ }
+ ct.run()
+}
+
+// Issue 16612: adjust flow control on open streams when transport
+// receives SETTINGS with INITIAL_WINDOW_SIZE from server.
+func TestTransportAdjustsFlowControl(t *testing.T) {
+ ct := newClientTester(t)
+ clientDone := make(chan struct{})
+
+ const bodySize = 1 << 20
+
+ ct.client = func() error {
+ defer ct.cc.(*net.TCPConn).CloseWrite()
+ defer close(clientDone)
+
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ res.Body.Close()
+ return nil
+ }
+ ct.server = func() error {
+ _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface)))
+ if err != nil {
+ return fmt.Errorf("reading client preface: %v", err)
+ }
+
+ var gotBytes int64
+ var sentSettings bool
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ select {
+ case <-clientDone:
+ return nil
+ default:
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ }
+ switch f := f.(type) {
+ case *DataFrame:
+ gotBytes += int64(len(f.Data()))
+ // After we've got half the client's
+ // initial flow control window's worth
+ // of request body data, give it just
+ // enough flow control to finish.
+ if gotBytes >= initialWindowSize/2 && !sentSettings {
+ sentSettings = true
+
+ ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize})
+ ct.fr.WriteWindowUpdate(0, bodySize)
+ ct.fr.WriteSettingsAck()
+ }
+
+ if f.StreamEnded() {
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: f.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: buf.Bytes(),
+ })
+ }
+ }
+ }
+ }
+ ct.run()
+}
+
+// See golang.org/issue/16556
+func TestTransportReturnsDataPaddingFlowControl(t *testing.T) {
+ ct := newClientTester(t)
+
+ unblockClient := make(chan bool, 1)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ <-unblockClient
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ var hf *HeadersFrame
+ for {
+ f, err := ct.fr.ReadFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
+ }
+ switch f.(type) {
+ case *WindowUpdateFrame, *SettingsFrame:
+ continue
+ }
+ var ok bool
+ hf, ok = f.(*HeadersFrame)
+ if !ok {
+ return fmt.Errorf("Got %T; want HeadersFrame", f)
+ }
+ break
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+ pad := []byte("12345")
+ ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream
+
+ f, err := ct.readNonSettingsFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err)
+ }
+ wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 {
+ return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f))
+ }
+
+ f, err = ct.readNonSettingsFrame()
+ if err != nil {
+ return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err)
+ }
+ if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 {
+ return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f))
+ }
+ unblockClient <- true
+ return nil
+ }
+ ct.run()
+}
+
+// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a
+// StreamError as a result of the response HEADERS
+func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) {
+ ct := newClientTester(t)
+
+ ct.client = func() error {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ res, err := ct.tr.RoundTrip(req)
+ if err == nil {
+ res.Body.Close()
+ return errors.New("unexpected successful GET")
+ }
+ want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")}
+ if !reflect.DeepEqual(want, err) {
+ t.Errorf("RoundTrip error = %#v; want %#v", err, want)
+ }
+ return nil
+ }
+ ct.server = func() error {
+ ct.greet()
+
+ hf, err := ct.firstHeaders()
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ enc := hpack.NewEncoder(&buf)
+ enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces
+ ct.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: buf.Bytes(),
+ })
+
+ for {
+ fr, err := ct.readFrame()
+ if err != nil {
+ return fmt.Errorf("error waiting for RST_STREAM from client: %v", err)
+ }
+ if _, ok := fr.(*SettingsFrame); ok {
+ continue
+ }
+ if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol {
+ t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr))
+ }
+ break
+ }
+
+ return nil
+ }
+ ct.run()
+}
+
+// byteAndEOFReader returns is in an io.Reader which reads one byte
+// (the underlying byte) and io.EOF at once in its Read call.
+type byteAndEOFReader byte
+
+func (b byteAndEOFReader) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ panic("unexpected useless call")
+ }
+ p[0] = byte(b)
+ return 1, io.EOF
+}
+
+// Issue 16788: the Transport had a regression where it started
+// sending a spurious DATA frame with a duplicate END_STREAM bit after
+// the request body writer goroutine had already read an EOF from the
+// Request.Body and included the END_STREAM on a data-carrying DATA
+// frame.
+//
+// Notably, to trigger this, the requests need to use a Request.Body
+// which returns (non-0, io.EOF) and also needs to set the ContentLength
+// explicitly.
+func TestTransportBodyDoubleEndStream(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ // Nothing.
+ }, optOnlyServer)
+ defer st.Close()
+
+ tr := &Transport{TLSClientConfig: tlsConfigInsecure}
+ defer tr.CloseIdleConnections()
+
+ for i := 0; i < 2; i++ {
+ req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a'))
+ req.ContentLength = 1
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatalf("failure on req %d: %v", i+1, err)
+ }
+ defer res.Body.Close()
+ }
+}
+
+// golangorg/issue/16847
+func TestTransportRequestPathPseudo(t *testing.T) {
+ type result struct {
+ path string
+ err string
+ }
+ tests := []struct {
+ req *http.Request
+ want result
+ }{
+ 0: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "foo.com",
+ Path: "/foo",
+ },
+ },
+ want: result{path: "/foo"},
+ },
+ // I guess we just don't let users request "//foo" as
+ // a path, since it's illegal to start with two
+ // slashes....
+ 1: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "foo.com",
+ Path: "//foo",
+ },
+ },
+ want: result{err: `invalid request :path "//foo"`},
+ },
+
+ // Opaque with //$Matching_Hostname/path
+ 2: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//foo.com/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque with some other Request.Host instead:
+ 3: {
+ req: &http.Request{
+ Method: "GET",
+ Host: "bar.com",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//bar.com/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque without the leading "//":
+ 4: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Opaque: "/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{path: "/path"},
+ },
+
+ // Opaque we can't handle:
+ 5: {
+ req: &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "https",
+ Opaque: "//unknown_host/path",
+ Host: "foo.com",
+ Path: "/ignored",
+ },
+ },
+ want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`},
+ },
+
+ // A CONNECT request:
+ 6: {
+ req: &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{
+ Host: "foo.com",
+ },
+ },
+ want: result{},
+ },
+ }
+ for i, tt := range tests {
+ cc := &ClientConn{}
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.mu.Lock()
+ hdrs, err := cc.encodeHeaders(tt.req, false, "", -1)
+ cc.mu.Unlock()
+ var got result
+ hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) {
+ if f.Name == ":path" {
+ got.path = f.Value
+ }
+ })
+ if err != nil {
+ got.err = err.Error()
+ } else if len(hdrs) > 0 {
+ if _, err := hpackDec.Write(hdrs); err != nil {
+ t.Errorf("%d. bogus hpack: %v", i, err)
+ continue
+ }
+ }
+ if got != tt.want {
+ t.Errorf("%d. got %+v; want %+v", i, got, tt.want)
+ }
+
+ }
+
+}
+
+// golang.org/issue/17071 -- don't sniff the first byte of the request body
+// before we've determined that the ClientConn is usable.
+func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) {
+ const body = "foo"
+ req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body)))
+ cc := &ClientConn{
+ closed: true,
+ }
+ _, err := cc.RoundTrip(req)
+ if err != errClientConnUnusable {
+ t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err)
+ }
+ slurp, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("ReadAll = %v", err)
+ }
+ if string(slurp) != body {
+ t.Errorf("Body = %q; want %q", slurp, body)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 000000000..27ef0dd4d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,264 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("endsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ if p.code != 0 {
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ time.Sleep(50 * time.Millisecond)
+ ctx.CloseConn()
+ }
+ return err
+}
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ endHeaders := len(headerBlock) == 0
+ var err error
+ if first {
+ first = false
+ err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 000000000..c24316ce7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,283 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+ // write is the interface value that does the writing, once the
+ // writeScheduler (below) has decided to select this frame
+ // to write. The write functions are all defined in write.go.
+ write writeFramer
+
+ stream *stream // used for prioritization. nil for non-stream frames.
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+ var streamID uint32
+ if wm.stream != nil {
+ streamID = wm.stream.id
+ }
+ var des string
+ if s, ok := wm.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wm.write)
+ }
+ return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+ // zero are frames not associated with a specific stream.
+ // They're sent before any stream-specific freams.
+ zero writeQueue
+
+ // maxFrameSize is the maximum size of a DATA frame
+ // we'll write. Must be non-zero and between 16K-16M.
+ maxFrameSize uint32
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // when a stream is idle, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // canSend is a slice of memory that's reused between frame
+ // scheduling decisions to hold the list of writeQueues (from sq)
+ // which have enough flow control data to send. After canSend is
+ // built, the best is selected.
+ canSend []*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+ if len(q.s) != 0 {
+ panic("queue must be empty")
+ }
+ ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+ ln := len(ws.queuePool)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ q := ws.queuePool[ln-1]
+ ws.queuePool = ws.queuePool[:ln-1]
+ return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+ st := wm.stream
+ if st == nil {
+ ws.zero.push(wm)
+ } else {
+ ws.streamQueue(st.id).push(wm)
+ }
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+ if q, ok := ws.sq[streamID]; ok {
+ return q
+ }
+ if ws.sq == nil {
+ ws.sq = make(map[uint32]*writeQueue)
+ }
+ q := ws.getEmptyQueue()
+ ws.sq[streamID] = q
+ return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+ if ws.maxFrameSize == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+
+ // If there any frames not associated with streams, prefer those first.
+ // These are usually SETTINGS, etc.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ if len(ws.sq) == 0 {
+ return
+ }
+
+ // Next, prioritize frames on streams that aren't DATA frames (no cost).
+ for id, q := range ws.sq {
+ if q.firstIsNoCost() {
+ return ws.takeFrom(id, q)
+ }
+ }
+
+ // Now, all that remains are DATA frames with non-zero bytes to
+ // send. So pick the best one.
+ if len(ws.canSend) != 0 {
+ panic("should be empty")
+ }
+ for _, q := range ws.sq {
+ if n := ws.streamWritableBytes(q); n > 0 {
+ ws.canSend = append(ws.canSend, q)
+ }
+ }
+ if len(ws.canSend) == 0 {
+ return
+ }
+ defer ws.zeroCanSend()
+
+ // TODO: find the best queue
+ q := ws.canSend[0]
+
+ return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+ for i := range ws.canSend {
+ ws.canSend[i] = nil
+ }
+ ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+ wm := q.head()
+ ret := wm.stream.flow.available() // max we can write
+ if ret == 0 {
+ return 0
+ }
+ if int32(ws.maxFrameSize) < ret {
+ ret = int32(ws.maxFrameSize)
+ }
+ if ret == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+ wd := wm.write.(*writeData)
+ if len(wd.p) < int(ret) {
+ ret = int32(len(wd.p))
+ }
+ return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+ wm = q.head()
+ // If the first item in this queue costs flow control tokens
+ // and we don't have enough, write as much as we can.
+ if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+ allowed := wm.stream.flow.available() // max we can write
+ if allowed == 0 {
+ // No quota available. Caller can try the next stream.
+ return frameWriteMsg{}, false
+ }
+ if int32(ws.maxFrameSize) < allowed {
+ allowed = int32(ws.maxFrameSize)
+ }
+ // TODO: further restrict the allowed size, because even if
+ // the peer says it's okay to write 16MB data frames, we might
+ // want to write smaller ones to properly weight competing
+ // streams' priorities.
+
+ if len(wd.p) > int(allowed) {
+ wm.stream.flow.take(allowed)
+ chunk := wd.p[:allowed]
+ wd.p = wd.p[allowed:]
+ // Make up a new write message of a valid size, rather
+ // than shifting one off the queue.
+ return frameWriteMsg{
+ stream: wm.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: chunk,
+ // even if the original had endStream set, there
+ // arebytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false:
+ endStream: false,
+ },
+ // our caller is blocking on the final DATA frame, not
+ // these intermediates, so no need to wait:
+ done: nil,
+ }, true
+ }
+ wm.stream.flow.take(int32(len(wd.p)))
+ }
+
+ q.shift()
+ if q.empty() {
+ ws.putEmptyQueue(q)
+ delete(ws.sq, id)
+ }
+ return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+ q, ok := ws.sq[id]
+ if !ok {
+ return
+ }
+ delete(ws.sq, id)
+
+ // But keep it for others later.
+ for i := range q.s {
+ q.s[i] = frameWriteMsg{}
+ }
+ q.s = q.s[:0]
+ ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+ s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+ q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wm := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s = q.s[:len(q.s)-1]
+ return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+ if df, ok := q.s[0].write.(*writeData); ok {
+ return len(df.p) == 0
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go
new file mode 100644
index 000000000..610b2cdbc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/z_spec_test.go
@@ -0,0 +1,356 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/xml"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+)
+
+var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
+
+// The global map of sentence coverage for the http2 spec.
+var defaultSpecCoverage specCoverage
+
+var loadSpecOnce sync.Once
+
+func loadSpec() {
+ if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
+ panic(err)
+ } else {
+ defaultSpecCoverage = readSpecCov(f)
+ f.Close()
+ }
+}
+
+// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
+// "covered" will be included in report outputted by TestSpecCoverage.
+func covers(sec, sentences string) {
+ loadSpecOnce.Do(loadSpec)
+ defaultSpecCoverage.cover(sec, sentences)
+}
+
+type specPart struct {
+ section string
+ sentence string
+}
+
+func (ss specPart) Less(oo specPart) bool {
+ atoi := func(s string) int {
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+ return n
+ }
+ a := strings.Split(ss.section, ".")
+ b := strings.Split(oo.section, ".")
+ for len(a) > 0 {
+ if len(b) == 0 {
+ return false
+ }
+ x, y := atoi(a[0]), atoi(b[0])
+ if x == y {
+ a, b = a[1:], b[1:]
+ continue
+ }
+ return x < y
+ }
+ if len(b) > 0 {
+ return true
+ }
+ return false
+}
+
+type bySpecSection []specPart
+
+func (a bySpecSection) Len() int { return len(a) }
+func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
+func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type specCoverage struct {
+ coverage map[specPart]bool
+ d *xml.Decoder
+}
+
+func joinSection(sec []int) string {
+ s := fmt.Sprintf("%d", sec[0])
+ for _, n := range sec[1:] {
+ s = fmt.Sprintf("%s.%d", s, n)
+ }
+ return s
+}
+
+func (sc specCoverage) readSection(sec []int) {
+ var (
+ buf = new(bytes.Buffer)
+ sub = 0
+ )
+ for {
+ tk, err := sc.d.Token()
+ if err != nil {
+ if err == io.EOF {
+ return
+ }
+ panic(err)
+ }
+ switch v := tk.(type) {
+ case xml.StartElement:
+ if skipElement(v) {
+ if err := sc.d.Skip(); err != nil {
+ panic(err)
+ }
+ if v.Name.Local == "section" {
+ sub++
+ }
+ break
+ }
+ switch v.Name.Local {
+ case "section":
+ sub++
+ sc.readSection(append(sec, sub))
+ case "xref":
+ buf.Write(sc.readXRef(v))
+ }
+ case xml.CharData:
+ if len(sec) == 0 {
+ break
+ }
+ buf.Write(v)
+ case xml.EndElement:
+ if v.Name.Local == "section" {
+ sc.addSentences(joinSection(sec), buf.String())
+ return
+ }
+ }
+ }
+}
+
+func (sc specCoverage) readXRef(se xml.StartElement) []byte {
+ var b []byte
+ for {
+ tk, err := sc.d.Token()
+ if err != nil {
+ panic(err)
+ }
+ switch v := tk.(type) {
+ case xml.CharData:
+ if b != nil {
+ panic("unexpected CharData")
+ }
+ b = []byte(string(v))
+ case xml.EndElement:
+ if v.Name.Local != "xref" {
+ panic("expected </xref>")
+ }
+ if b != nil {
+ return b
+ }
+ sig := attrSig(se)
+ switch sig {
+ case "target":
+ return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
+ case "fmt-of,rel,target", "fmt-,,rel,target":
+ return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
+ case "fmt-of,sec,target", "fmt-,,sec,target":
+ return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
+ case "fmt-of,rel,sec,target":
+ return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
+ default:
+ panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
+ }
+ default:
+ panic(fmt.Sprintf("unexpected tag %q", v))
+ }
+ }
+}
+
+var skipAnchor = map[string]bool{
+ "intro": true,
+ "Overview": true,
+}
+
+var skipTitle = map[string]bool{
+ "Acknowledgements": true,
+ "Change Log": true,
+ "Document Organization": true,
+ "Conventions and Terminology": true,
+}
+
+func skipElement(s xml.StartElement) bool {
+ switch s.Name.Local {
+ case "artwork":
+ return true
+ case "section":
+ for _, attr := range s.Attr {
+ switch attr.Name.Local {
+ case "anchor":
+ if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
+ return true
+ }
+ case "title":
+ if skipTitle[attr.Value] {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func readSpecCov(r io.Reader) specCoverage {
+ sc := specCoverage{
+ coverage: map[specPart]bool{},
+ d: xml.NewDecoder(r)}
+ sc.readSection(nil)
+ return sc
+}
+
+func (sc specCoverage) addSentences(sec string, sentence string) {
+ for _, s := range parseSentences(sentence) {
+ sc.coverage[specPart{sec, s}] = false
+ }
+}
+
+func (sc specCoverage) cover(sec string, sentence string) {
+ for _, s := range parseSentences(sentence) {
+ p := specPart{sec, s}
+ if _, ok := sc.coverage[p]; !ok {
+ panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
+ }
+ sc.coverage[specPart{sec, s}] = true
+ }
+
+}
+
+var whitespaceRx = regexp.MustCompile(`\s+`)
+
+func parseSentences(sens string) []string {
+ sens = strings.TrimSpace(sens)
+ if sens == "" {
+ return nil
+ }
+ ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
+ for i, s := range ss {
+ s = strings.TrimSpace(s)
+ if !strings.HasSuffix(s, ".") {
+ s += "."
+ }
+ ss[i] = s
+ }
+ return ss
+}
+
+func TestSpecParseSentences(t *testing.T) {
+ tests := []struct {
+ ss string
+ want []string
+ }{
+ {"Sentence 1. Sentence 2.",
+ []string{
+ "Sentence 1.",
+ "Sentence 2.",
+ }},
+ {"Sentence 1. \nSentence 2.\tSentence 3.",
+ []string{
+ "Sentence 1.",
+ "Sentence 2.",
+ "Sentence 3.",
+ }},
+ }
+
+ for i, tt := range tests {
+ got := parseSentences(tt.ss)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%d: got = %q, want %q", i, got, tt.want)
+ }
+ }
+}
+
+func TestSpecCoverage(t *testing.T) {
+ if !*coverSpec {
+ t.Skip()
+ }
+
+ loadSpecOnce.Do(loadSpec)
+
+ var (
+ list []specPart
+ cv = defaultSpecCoverage.coverage
+ total = len(cv)
+ complete = 0
+ )
+
+ for sp, touched := range defaultSpecCoverage.coverage {
+ if touched {
+ complete++
+ } else {
+ list = append(list, sp)
+ }
+ }
+ sort.Stable(bySpecSection(list))
+
+ if testing.Short() && len(list) > 5 {
+ list = list[:5]
+ }
+
+ for _, p := range list {
+ t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
+ }
+
+ t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100)
+}
+
+func attrSig(se xml.StartElement) string {
+ var names []string
+ for _, attr := range se.Attr {
+ if attr.Name.Local == "fmt" {
+ names = append(names, "fmt-"+attr.Value)
+ } else {
+ names = append(names, attr.Name.Local)
+ }
+ }
+ sort.Strings(names)
+ return strings.Join(names, ",")
+}
+
+func attrValue(se xml.StartElement, attr string) string {
+ for _, a := range se.Attr {
+ if a.Name.Local == attr {
+ return a.Value
+ }
+ }
+ panic("unknown attribute " + attr)
+}
+
+func TestSpecPartLess(t *testing.T) {
+ tests := []struct {
+ sec1, sec2 string
+ want bool
+ }{
+ {"6.2.1", "6.2", false},
+ {"6.2", "6.2.1", true},
+ {"6.10", "6.10.1", true},
+ {"6.10", "6.1.1", false}, // 10, not 1
+ {"6.1", "6.1", false}, // equal, so not less
+ }
+ for _, tt := range tests {
+ got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
+ if got != tt.want {
+ t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go
new file mode 100644
index 000000000..75db991df
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/dstunreach.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A DstUnreach represents an ICMP destination unreachable message
+// body.
+type DstUnreach struct {
+ Data []byte // data, known as original datagram field
+ Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *DstUnreach) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+ return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *DstUnreach) Marshal(proto int) ([]byte, error) {
+ return marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+}
+
+// parseDstUnreach parses b as an ICMP destination unreachable message
+// body.
+func parseDstUnreach(proto int, b []byte) (MessageBody, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ p := &DstUnreach{}
+ var err error
+ p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go
new file mode 100644
index 000000000..dd5518115
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/echo.go
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// An Echo represents an ICMP echo request or reply message body.
+type Echo struct {
+ ID int // identifier
+ Seq int // sequence number
+ Data []byte // data
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *Echo) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ return 4 + len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *Echo) Marshal(proto int) ([]byte, error) {
+ b := make([]byte, 4+len(p.Data))
+ binary.BigEndian.PutUint16(b[:2], uint16(p.ID))
+ binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq))
+ copy(b[4:], p.Data)
+ return b, nil
+}
+
+// parseEcho parses b as an ICMP echo request or reply message body.
+func parseEcho(proto int, b []byte) (MessageBody, error) {
+ bodyLen := len(b)
+ if bodyLen < 4 {
+ return nil, errMessageTooShort
+ }
+ p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))}
+ if bodyLen > 4 {
+ p.Data = make([]byte, bodyLen-4)
+ copy(p.Data, b[4:])
+ }
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go
new file mode 100644
index 000000000..a68bfb010
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/endpoint.go
@@ -0,0 +1,113 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "net"
+ "runtime"
+ "syscall"
+ "time"
+
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+var _ net.PacketConn = &PacketConn{}
+
+// A PacketConn represents a packet network endpoint that uses either
+// ICMPv4 or ICMPv6.
+type PacketConn struct {
+ c net.PacketConn
+ p4 *ipv4.PacketConn
+ p6 *ipv6.PacketConn
+}
+
+func (c *PacketConn) ok() bool { return c != nil && c.c != nil }
+
+// IPv4PacketConn returns the ipv4.PacketConn of c.
+// It returns nil when c is not created as the endpoint for ICMPv4.
+func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn {
+ if !c.ok() {
+ return nil
+ }
+ return c.p4
+}
+
+// IPv6PacketConn returns the ipv6.PacketConn of c.
+// It returns nil when c is not created as the endpoint for ICMPv6.
+func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn {
+ if !c.ok() {
+ return nil
+ }
+ return c.p6
+}
+
+// ReadFrom reads an ICMP message from the connection.
+func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) {
+ if !c.ok() {
+ return 0, nil, syscall.EINVAL
+ }
+ // Please be informed that ipv4.NewPacketConn enables
+ // IP_STRIPHDR option by default on Darwin.
+ // See golang.org/issue/9395 for further information.
+ if runtime.GOOS == "darwin" && c.p4 != nil {
+ n, _, peer, err := c.p4.ReadFrom(b)
+ return n, peer, err
+ }
+ return c.c.ReadFrom(b)
+}
+
+// WriteTo writes the ICMP message b to dst.
+// Dst must be net.UDPAddr when c is a non-privileged
+// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr.
+func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ return c.c.WriteTo(b, dst)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ return c.c.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *PacketConn) LocalAddr() net.Addr {
+ if !c.ok() {
+ return nil
+ }
+ return c.c.LocalAddr()
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ return c.c.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ return c.c.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ return c.c.SetWriteDeadline(t)
+}
diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go
new file mode 100644
index 000000000..1df4ceccd
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/example_test.go
@@ -0,0 +1,63 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp_test
+
+import (
+ "log"
+ "net"
+ "os"
+ "runtime"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/ipv6"
+)
+
+func ExamplePacketConn_nonPrivilegedPing() {
+ switch runtime.GOOS {
+ case "darwin":
+ case "linux":
+ log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state")
+ default:
+ log.Println("not supported on", runtime.GOOS)
+ return
+ }
+
+ c, err := icmp.ListenPacket("udp6", "fe80::1%en0")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+
+ wm := icmp.Message{
+ Type: ipv6.ICMPTypeEchoRequest, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }
+ wb, err := wm.Marshal(nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil {
+ log.Fatal(err)
+ }
+
+ rb := make([]byte, 1500)
+ n, peer, err := c.ReadFrom(rb)
+ if err != nil {
+ log.Fatal(err)
+ }
+ rm, err := icmp.ParseMessage(58, rb[:n])
+ if err != nil {
+ log.Fatal(err)
+ }
+ switch rm.Type {
+ case ipv6.ICMPTypeEchoReply:
+ log.Printf("got reflection from %v", peer)
+ default:
+ log.Printf("got %+v; want echo reply", rm)
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go
new file mode 100644
index 000000000..402a7514b
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/extension.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// An Extension represents an ICMP extension.
+type Extension interface {
+ // Len returns the length of ICMP extension.
+ // Proto must be either the ICMPv4 or ICMPv6 protocol number.
+ Len(proto int) int
+
+ // Marshal returns the binary encoding of ICMP extension.
+ // Proto must be either the ICMPv4 or ICMPv6 protocol number.
+ Marshal(proto int) ([]byte, error)
+}
+
+const extensionVersion = 2
+
+func validExtensionHeader(b []byte) bool {
+ v := int(b[0]&0xf0) >> 4
+ s := binary.BigEndian.Uint16(b[2:4])
+ if s != 0 {
+ s = checksum(b)
+ }
+ if v != extensionVersion || s != 0 {
+ return false
+ }
+ return true
+}
+
+// parseExtensions parses b as a list of ICMP extensions.
+// The length attribute l must be the length attribute field in
+// received icmp messages.
+//
+// It will return a list of ICMP extensions and an adjusted length
+// attribute that represents the length of the padded original
+// datagram field. Otherwise, it returns an error.
+func parseExtensions(b []byte, l int) ([]Extension, int, error) {
+ // Still a lot of non-RFC 4884 compliant implementations are
+ // out there. Set the length attribute l to 128 when it looks
+ // inappropriate for backwards compatibility.
+ //
+ // A minimal extension at least requires 8 octets; 4 octets
+ // for an extension header, and 4 octets for a single object
+ // header.
+ //
+ // See RFC 4884 for further information.
+ if 128 > l || l+8 > len(b) {
+ l = 128
+ }
+ if l+8 > len(b) {
+ return nil, -1, errNoExtension
+ }
+ if !validExtensionHeader(b[l:]) {
+ if l == 128 {
+ return nil, -1, errNoExtension
+ }
+ l = 128
+ if !validExtensionHeader(b[l:]) {
+ return nil, -1, errNoExtension
+ }
+ }
+ var exts []Extension
+ for b = b[l+4:]; len(b) >= 4; {
+ ol := int(binary.BigEndian.Uint16(b[:2]))
+ if 4 > ol || ol > len(b) {
+ break
+ }
+ switch b[2] {
+ case classMPLSLabelStack:
+ ext, err := parseMPLSLabelStack(b[:ol])
+ if err != nil {
+ return nil, -1, err
+ }
+ exts = append(exts, ext)
+ case classInterfaceInfo:
+ ext, err := parseInterfaceInfo(b[:ol])
+ if err != nil {
+ return nil, -1, err
+ }
+ exts = append(exts, ext)
+ }
+ b = b[ol:]
+ }
+ return exts, l, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go
new file mode 100644
index 000000000..0b3f7b9e1
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/extension_test.go
@@ -0,0 +1,259 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "net"
+ "reflect"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var marshalAndParseExtensionTests = []struct {
+ proto int
+ hdr []byte
+ obj []byte
+ exts []Extension
+}{
+ // MPLS label stack with no label
+ {
+ proto: iana.ProtocolICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x04, 0x01, 0x01,
+ },
+ exts: []Extension{
+ &MPLSLabelStack{
+ Class: classMPLSLabelStack,
+ Type: typeIncomingMPLSLabelStack,
+ },
+ },
+ },
+ // MPLS label stack with a single label
+ {
+ proto: iana.ProtocolIPv6ICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x08, 0x01, 0x01,
+ 0x03, 0xe8, 0xe9, 0xff,
+ },
+ exts: []Extension{
+ &MPLSLabelStack{
+ Class: classMPLSLabelStack,
+ Type: typeIncomingMPLSLabelStack,
+ Labels: []MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ },
+ },
+ // MPLS label stack with multiple labels
+ {
+ proto: iana.ProtocolICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x0c, 0x01, 0x01,
+ 0x03, 0xe8, 0xde, 0xfe,
+ 0x03, 0xe8, 0xe1, 0xff,
+ },
+ exts: []Extension{
+ &MPLSLabelStack{
+ Class: classMPLSLabelStack,
+ Type: typeIncomingMPLSLabelStack,
+ Labels: []MPLSLabel{
+ {
+ Label: 16013,
+ TC: 0x7,
+ S: false,
+ TTL: 254,
+ },
+ {
+ Label: 16014,
+ TC: 0,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ },
+ },
+ // Interface information with no attribute
+ {
+ proto: iana.ProtocolICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x04, 0x02, 0x00,
+ },
+ exts: []Extension{
+ &InterfaceInfo{
+ Class: classInterfaceInfo,
+ },
+ },
+ },
+ // Interface information with ifIndex and name
+ {
+ proto: iana.ProtocolICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x10, 0x02, 0x0a,
+ 0x00, 0x00, 0x00, 0x10,
+ 0x08, byte('e'), byte('n'), byte('1'),
+ byte('0'), byte('1'), 0x00, 0x00,
+ },
+ exts: []Extension{
+ &InterfaceInfo{
+ Class: classInterfaceInfo,
+ Type: 0x0a,
+ Interface: &net.Interface{
+ Index: 16,
+ Name: "en101",
+ },
+ },
+ },
+ },
+ // Interface information with ifIndex, IPAddr, name and MTU
+ {
+ proto: iana.ProtocolIPv6ICMP,
+ hdr: []byte{
+ 0x20, 0x00, 0x00, 0x00,
+ },
+ obj: []byte{
+ 0x00, 0x28, 0x02, 0x0f,
+ 0x00, 0x00, 0x00, 0x0f,
+ 0x00, 0x02, 0x00, 0x00,
+ 0xfe, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x08, byte('e'), byte('n'), byte('1'),
+ byte('0'), byte('1'), 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00,
+ },
+ exts: []Extension{
+ &InterfaceInfo{
+ Class: classInterfaceInfo,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.ParseIP("fe80::1"),
+ Zone: "en101",
+ },
+ },
+ },
+ },
+}
+
+func TestMarshalAndParseExtension(t *testing.T) {
+ for i, tt := range marshalAndParseExtensionTests {
+ for j, ext := range tt.exts {
+ var err error
+ var b []byte
+ switch ext := ext.(type) {
+ case *MPLSLabelStack:
+ b, err = ext.Marshal(tt.proto)
+ if err != nil {
+ t.Errorf("#%v/%v: %v", i, j, err)
+ continue
+ }
+ case *InterfaceInfo:
+ b, err = ext.Marshal(tt.proto)
+ if err != nil {
+ t.Errorf("#%v/%v: %v", i, j, err)
+ continue
+ }
+ }
+ if !reflect.DeepEqual(b, tt.obj) {
+ t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj)
+ continue
+ }
+ }
+
+ for j, wire := range []struct {
+ data []byte // original datagram
+ inlattr int // length of padded original datagram, a hint
+ outlattr int // length of padded original datagram, a want
+ err error
+ }{
+ {nil, 0, -1, errNoExtension},
+ {make([]byte, 127), 128, -1, errNoExtension},
+
+ {make([]byte, 128), 127, -1, errNoExtension},
+ {make([]byte, 128), 128, -1, errNoExtension},
+ {make([]byte, 128), 129, -1, errNoExtension},
+
+ {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil},
+ {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil},
+ {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil},
+
+ {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension},
+ {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil},
+ {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension},
+ } {
+ exts, l, err := parseExtensions(wire.data, wire.inlattr)
+ if err != wire.err {
+ t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err)
+ continue
+ }
+ if wire.err != nil {
+ continue
+ }
+ if l != wire.outlattr {
+ t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr)
+ }
+ if !reflect.DeepEqual(exts, tt.exts) {
+ for j, ext := range exts {
+ switch ext := ext.(type) {
+ case *MPLSLabelStack:
+ want := tt.exts[j].(*MPLSLabelStack)
+ t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want)
+ case *InterfaceInfo:
+ want := tt.exts[j].(*InterfaceInfo)
+ t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want)
+ }
+ }
+ continue
+ }
+ }
+ }
+}
+
+var parseInterfaceNameTests = []struct {
+ b []byte
+ error
+}{
+ {[]byte{0, 'e', 'n', '0'}, errInvalidExtension},
+ {[]byte{4, 'e', 'n', '0'}, nil},
+ {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension},
+ {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort},
+}
+
+func TestParseInterfaceName(t *testing.T) {
+ ifi := InterfaceInfo{Interface: &net.Interface{}}
+ for i, tt := range parseInterfaceNameTests {
+ if _, err := ifi.parseName(tt.b); err != tt.error {
+ t.Errorf("#%d: got %v; want %v", i, err, tt.error)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/helper.go b/vendor/golang.org/x/net/icmp/helper.go
new file mode 100644
index 000000000..6c4e633bc
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/helper.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+var (
+ // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
+ freebsdVersion uint32
+
+ nativeEndian binary.ByteOrder
+)
+
+func init() {
+ i := uint32(1)
+ b := (*[4]byte)(unsafe.Pointer(&i))
+ if b[0] == 1 {
+ nativeEndian = binary.LittleEndian
+ } else {
+ nativeEndian = binary.BigEndian
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go
new file mode 100644
index 000000000..398fd388f
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/helper_posix.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package icmp
+
+import (
+ "net"
+ "strconv"
+ "syscall"
+)
+
+func sockaddr(family int, address string) (syscall.Sockaddr, error) {
+ switch family {
+ case syscall.AF_INET:
+ a, err := net.ResolveIPAddr("ip4", address)
+ if err != nil {
+ return nil, err
+ }
+ if len(a.IP) == 0 {
+ a.IP = net.IPv4zero
+ }
+ if a.IP = a.IP.To4(); a.IP == nil {
+ return nil, net.InvalidAddrError("non-ipv4 address")
+ }
+ sa := &syscall.SockaddrInet4{}
+ copy(sa.Addr[:], a.IP)
+ return sa, nil
+ case syscall.AF_INET6:
+ a, err := net.ResolveIPAddr("ip6", address)
+ if err != nil {
+ return nil, err
+ }
+ if len(a.IP) == 0 {
+ a.IP = net.IPv6unspecified
+ }
+ if a.IP.Equal(net.IPv4zero) {
+ a.IP = net.IPv6unspecified
+ }
+ if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil {
+ return nil, net.InvalidAddrError("non-ipv6 address")
+ }
+ sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)}
+ copy(sa.Addr[:], a.IP)
+ return sa, nil
+ default:
+ return nil, net.InvalidAddrError("unexpected family")
+ }
+}
+
+func zoneToUint32(zone string) uint32 {
+ if zone == "" {
+ return 0
+ }
+ if ifi, err := net.InterfaceByName(zone); err == nil {
+ return uint32(ifi.Index)
+ }
+ n, err := strconv.Atoi(zone)
+ if err != nil {
+ return 0
+ }
+ return uint32(n)
+}
+
+func last(s string, b byte) int {
+ i := len(s)
+ for i--; i >= 0; i-- {
+ if s[i] == b {
+ break
+ }
+ }
+ return i
+}
diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go
new file mode 100644
index 000000000..78b5b98bf
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/interface.go
@@ -0,0 +1,236 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "encoding/binary"
+ "net"
+ "strings"
+
+ "golang.org/x/net/internal/iana"
+)
+
+const (
+ classInterfaceInfo = 2
+
+ afiIPv4 = 1
+ afiIPv6 = 2
+)
+
+const (
+ attrMTU = 1 << iota
+ attrName
+ attrIPAddr
+ attrIfIndex
+)
+
+// An InterfaceInfo represents interface and next-hop identification.
+type InterfaceInfo struct {
+ Class int // extension object class number
+ Type int // extension object sub-type
+ Interface *net.Interface
+ Addr *net.IPAddr
+}
+
+func (ifi *InterfaceInfo) nameLen() int {
+ if len(ifi.Interface.Name) > 63 {
+ return 64
+ }
+ l := 1 + len(ifi.Interface.Name)
+ return (l + 3) &^ 3
+}
+
+func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) {
+ l = 4
+ if ifi.Interface != nil && ifi.Interface.Index > 0 {
+ attrs |= attrIfIndex
+ l += 4
+ if len(ifi.Interface.Name) > 0 {
+ attrs |= attrName
+ l += ifi.nameLen()
+ }
+ if ifi.Interface.MTU > 0 {
+ attrs |= attrMTU
+ l += 4
+ }
+ }
+ if ifi.Addr != nil {
+ switch proto {
+ case iana.ProtocolICMP:
+ if ifi.Addr.IP.To4() != nil {
+ attrs |= attrIPAddr
+ l += 4 + net.IPv4len
+ }
+ case iana.ProtocolIPv6ICMP:
+ if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
+ attrs |= attrIPAddr
+ l += 4 + net.IPv6len
+ }
+ }
+ }
+ return
+}
+
+// Len implements the Len method of Extension interface.
+func (ifi *InterfaceInfo) Len(proto int) int {
+ _, l := ifi.attrsAndLen(proto)
+ return l
+}
+
+// Marshal implements the Marshal method of Extension interface.
+func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) {
+ attrs, l := ifi.attrsAndLen(proto)
+ b := make([]byte, l)
+ if err := ifi.marshal(proto, b, attrs, l); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error {
+ binary.BigEndian.PutUint16(b[:2], uint16(l))
+ b[2], b[3] = classInterfaceInfo, byte(ifi.Type)
+ for b = b[4:]; len(b) > 0 && attrs != 0; {
+ switch {
+ case attrs&attrIfIndex != 0:
+ b = ifi.marshalIfIndex(proto, b)
+ attrs &^= attrIfIndex
+ case attrs&attrIPAddr != 0:
+ b = ifi.marshalIPAddr(proto, b)
+ attrs &^= attrIPAddr
+ case attrs&attrName != 0:
+ b = ifi.marshalName(proto, b)
+ attrs &^= attrName
+ case attrs&attrMTU != 0:
+ b = ifi.marshalMTU(proto, b)
+ attrs &^= attrMTU
+ }
+ }
+ return nil
+}
+
+func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte {
+ binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index))
+ return b[4:]
+}
+
+func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4]))
+ return b[4:], nil
+}
+
+func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte {
+ switch proto {
+ case iana.ProtocolICMP:
+ binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4))
+ copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4())
+ b = b[4+net.IPv4len:]
+ case iana.ProtocolIPv6ICMP:
+ binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6))
+ copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16())
+ b = b[4+net.IPv6len:]
+ }
+ return b
+}
+
+func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ afi := int(binary.BigEndian.Uint16(b[:2]))
+ b = b[4:]
+ switch afi {
+ case afiIPv4:
+ if len(b) < net.IPv4len {
+ return nil, errMessageTooShort
+ }
+ ifi.Addr.IP = make(net.IP, net.IPv4len)
+ copy(ifi.Addr.IP, b[:net.IPv4len])
+ b = b[net.IPv4len:]
+ case afiIPv6:
+ if len(b) < net.IPv6len {
+ return nil, errMessageTooShort
+ }
+ ifi.Addr.IP = make(net.IP, net.IPv6len)
+ copy(ifi.Addr.IP, b[:net.IPv6len])
+ b = b[net.IPv6len:]
+ }
+ return b, nil
+}
+
+func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte {
+ l := byte(ifi.nameLen())
+ b[0] = l
+ copy(b[1:], []byte(ifi.Interface.Name))
+ return b[l:]
+}
+
+func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) {
+ if 4 > len(b) || len(b) < int(b[0]) {
+ return nil, errMessageTooShort
+ }
+ l := int(b[0])
+ if l%4 != 0 || 4 > l || l > 64 {
+ return nil, errInvalidExtension
+ }
+ var name [63]byte
+ copy(name[:], b[1:l])
+ ifi.Interface.Name = strings.Trim(string(name[:]), "\000")
+ return b[l:], nil
+}
+
+func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte {
+ binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU))
+ return b[4:]
+}
+
+func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4]))
+ return b[4:], nil
+}
+
+func parseInterfaceInfo(b []byte) (Extension, error) {
+ ifi := &InterfaceInfo{
+ Class: int(b[2]),
+ Type: int(b[3]),
+ }
+ if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 {
+ ifi.Interface = &net.Interface{}
+ }
+ if ifi.Type&attrIPAddr != 0 {
+ ifi.Addr = &net.IPAddr{}
+ }
+ attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU)
+ for b = b[4:]; len(b) > 0 && attrs != 0; {
+ var err error
+ switch {
+ case attrs&attrIfIndex != 0:
+ b, err = ifi.parseIfIndex(b)
+ attrs &^= attrIfIndex
+ case attrs&attrIPAddr != 0:
+ b, err = ifi.parseIPAddr(b)
+ attrs &^= attrIPAddr
+ case attrs&attrName != 0:
+ b, err = ifi.parseName(b)
+ attrs &^= attrName
+ case attrs&attrMTU != 0:
+ b, err = ifi.parseMTU(b)
+ attrs &^= attrMTU
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
+ ifi.Addr.Zone = ifi.Interface.Name
+ }
+ return ifi, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go
new file mode 100644
index 000000000..729ddc97c
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/ipv4.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "encoding/binary"
+ "net"
+ "runtime"
+
+ "golang.org/x/net/ipv4"
+)
+
+// ParseIPv4Header parses b as an IPv4 header of ICMP error message
+// invoking packet, which is contained in ICMP error message.
+func ParseIPv4Header(b []byte) (*ipv4.Header, error) {
+ if len(b) < ipv4.HeaderLen {
+ return nil, errHeaderTooShort
+ }
+ hdrlen := int(b[0]&0x0f) << 2
+ if hdrlen > len(b) {
+ return nil, errBufferTooShort
+ }
+ h := &ipv4.Header{
+ Version: int(b[0] >> 4),
+ Len: hdrlen,
+ TOS: int(b[1]),
+ ID: int(binary.BigEndian.Uint16(b[4:6])),
+ FragOff: int(binary.BigEndian.Uint16(b[6:8])),
+ TTL: int(b[8]),
+ Protocol: int(b[9]),
+ Checksum: int(binary.BigEndian.Uint16(b[10:12])),
+ Src: net.IPv4(b[12], b[13], b[14], b[15]),
+ Dst: net.IPv4(b[16], b[17], b[18], b[19]),
+ }
+ switch runtime.GOOS {
+ case "darwin":
+ h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+ case "freebsd":
+ if freebsdVersion >= 1000000 {
+ h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+ } else {
+ h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+ }
+ default:
+ h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+ }
+ h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13
+ h.FragOff = h.FragOff & 0x1fff
+ if hdrlen-ipv4.HeaderLen > 0 {
+ h.Options = make([]byte, hdrlen-ipv4.HeaderLen)
+ copy(h.Options, b[ipv4.HeaderLen:])
+ }
+ return h, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go
new file mode 100644
index 000000000..47cc00d07
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/ipv4_test.go
@@ -0,0 +1,82 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "encoding/binary"
+ "net"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/ipv4"
+)
+
+type ipv4HeaderTest struct {
+ wireHeaderFromKernel [ipv4.HeaderLen]byte
+ wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte
+ Header *ipv4.Header
+}
+
+var ipv4HeaderLittleEndianTest = ipv4HeaderTest{
+ // TODO(mikio): Add platform dependent wire header formats when
+ // we support new platforms.
+ wireHeaderFromKernel: [ipv4.HeaderLen]byte{
+ 0x45, 0x01, 0xbe, 0xef,
+ 0xca, 0xfe, 0x45, 0xdc,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{
+ 0x45, 0x01, 0xef, 0xbe,
+ 0xca, 0xfe, 0x45, 0xdc,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ Header: &ipv4.Header{
+ Version: ipv4.Version,
+ Len: ipv4.HeaderLen,
+ TOS: 1,
+ TotalLen: 0xbeef,
+ ID: 0xcafe,
+ Flags: ipv4.DontFragment,
+ FragOff: 1500,
+ TTL: 255,
+ Protocol: 1,
+ Checksum: 0xdead,
+ Src: net.IPv4(172, 16, 254, 254),
+ Dst: net.IPv4(192, 168, 0, 1),
+ },
+}
+
+func TestParseIPv4Header(t *testing.T) {
+ tt := &ipv4HeaderLittleEndianTest
+ if nativeEndian != binary.LittleEndian {
+ t.Skip("no test for non-little endian machine yet")
+ }
+
+ var wh []byte
+ switch runtime.GOOS {
+ case "darwin":
+ wh = tt.wireHeaderFromTradBSDKernel[:]
+ case "freebsd":
+ if freebsdVersion >= 1000000 {
+ wh = tt.wireHeaderFromKernel[:]
+ } else {
+ wh = tt.wireHeaderFromTradBSDKernel[:]
+ }
+ default:
+ wh = tt.wireHeaderFromKernel[:]
+ }
+ h, err := ParseIPv4Header(wh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(h, tt.Header) {
+ t.Fatalf("got %#v; want %#v", h, tt.Header)
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go
new file mode 100644
index 000000000..58eaa77d0
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/ipv6.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "net"
+
+ "golang.org/x/net/internal/iana"
+)
+
+const ipv6PseudoHeaderLen = 2*net.IPv6len + 8
+
+// IPv6PseudoHeader returns an IPv6 pseudo header for checksum
+// calculation.
+func IPv6PseudoHeader(src, dst net.IP) []byte {
+ b := make([]byte, ipv6PseudoHeaderLen)
+ copy(b, src.To16())
+ copy(b[net.IPv6len:], dst.To16())
+ b[len(b)-1] = byte(iana.ProtocolIPv6ICMP)
+ return b
+}
diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go
new file mode 100644
index 000000000..7fac4f965
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/listen_posix.go
@@ -0,0 +1,100 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package icmp
+
+import (
+ "net"
+ "os"
+ "runtime"
+ "syscall"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option
+
+// ListenPacket listens for incoming ICMP packets addressed to
+// address. See net.Dial for the syntax of address.
+//
+// For non-privileged datagram-oriented ICMP endpoints, network must
+// be "udp4" or "udp6". The endpoint allows to read, write a few
+// limited ICMP messages such as echo request and echo reply.
+// Currently only Darwin and Linux support this.
+//
+// Examples:
+// ListenPacket("udp4", "192.168.0.1")
+// ListenPacket("udp4", "0.0.0.0")
+// ListenPacket("udp6", "fe80::1%en0")
+// ListenPacket("udp6", "::")
+//
+// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
+// followed by a colon and an ICMP protocol number or name.
+//
+// Examples:
+// ListenPacket("ip4:icmp", "192.168.0.1")
+// ListenPacket("ip4:1", "0.0.0.0")
+// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
+// ListenPacket("ip6:58", "::")
+func ListenPacket(network, address string) (*PacketConn, error) {
+ var family, proto int
+ switch network {
+ case "udp4":
+ family, proto = syscall.AF_INET, iana.ProtocolICMP
+ case "udp6":
+ family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP
+ default:
+ i := last(network, ':')
+ switch network[:i] {
+ case "ip4":
+ proto = iana.ProtocolICMP
+ case "ip6":
+ proto = iana.ProtocolIPv6ICMP
+ }
+ }
+ var cerr error
+ var c net.PacketConn
+ switch family {
+ case syscall.AF_INET, syscall.AF_INET6:
+ s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto)
+ if err != nil {
+ return nil, os.NewSyscallError("socket", err)
+ }
+ if runtime.GOOS == "darwin" && family == syscall.AF_INET {
+ if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil {
+ syscall.Close(s)
+ return nil, os.NewSyscallError("setsockopt", err)
+ }
+ }
+ sa, err := sockaddr(family, address)
+ if err != nil {
+ syscall.Close(s)
+ return nil, err
+ }
+ if err := syscall.Bind(s, sa); err != nil {
+ syscall.Close(s)
+ return nil, os.NewSyscallError("bind", err)
+ }
+ f := os.NewFile(uintptr(s), "datagram-oriented icmp")
+ c, cerr = net.FilePacketConn(f)
+ f.Close()
+ default:
+ c, cerr = net.ListenPacket(network, address)
+ }
+ if cerr != nil {
+ return nil, cerr
+ }
+ switch proto {
+ case iana.ProtocolICMP:
+ return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil
+ case iana.ProtocolIPv6ICMP:
+ return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil
+ default:
+ return &PacketConn{c: c}, nil
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go
new file mode 100644
index 000000000..668728d17
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/listen_stub.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package icmp
+
+// ListenPacket listens for incoming ICMP packets addressed to
+// address. See net.Dial for the syntax of address.
+//
+// For non-privileged datagram-oriented ICMP endpoints, network must
+// be "udp4" or "udp6". The endpoint allows to read, write a few
+// limited ICMP messages such as echo request and echo reply.
+// Currently only Darwin and Linux support this.
+//
+// Examples:
+// ListenPacket("udp4", "192.168.0.1")
+// ListenPacket("udp4", "0.0.0.0")
+// ListenPacket("udp6", "fe80::1%en0")
+// ListenPacket("udp6", "::")
+//
+// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
+// followed by a colon and an ICMP protocol number or name.
+//
+// Examples:
+// ListenPacket("ip4:icmp", "192.168.0.1")
+// ListenPacket("ip4:1", "0.0.0.0")
+// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
+// ListenPacket("ip6:58", "::")
+func ListenPacket(network, address string) (*PacketConn, error) {
+ return nil, errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go
new file mode 100644
index 000000000..42d6df2c1
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/message.go
@@ -0,0 +1,150 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package icmp provides basic functions for the manipulation of
+// messages used in the Internet Control Message Protocols,
+// ICMPv4 and ICMPv6.
+//
+// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443.
+// Multi-part message support for ICMP is defined in RFC 4884.
+// ICMP extensions for MPLS are defined in RFC 4950.
+// ICMP extensions for interface and next-hop identification are
+// defined in RFC 5837.
+package icmp // import "golang.org/x/net/icmp"
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "syscall"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+var (
+ errMessageTooShort = errors.New("message too short")
+ errHeaderTooShort = errors.New("header too short")
+ errBufferTooShort = errors.New("buffer too short")
+ errOpNoSupport = errors.New("operation not supported")
+ errNoExtension = errors.New("no extension")
+ errInvalidExtension = errors.New("invalid extension")
+)
+
+func checksum(b []byte) uint16 {
+ csumcv := len(b) - 1 // checksum coverage
+ s := uint32(0)
+ for i := 0; i < csumcv; i += 2 {
+ s += uint32(b[i+1])<<8 | uint32(b[i])
+ }
+ if csumcv&1 == 0 {
+ s += uint32(b[csumcv])
+ }
+ s = s>>16 + s&0xffff
+ s = s + s>>16
+ return ^uint16(s)
+}
+
+// A Type represents an ICMP message type.
+type Type interface {
+ Protocol() int
+}
+
+// A Message represents an ICMP message.
+type Message struct {
+ Type Type // type, either ipv4.ICMPType or ipv6.ICMPType
+ Code int // code
+ Checksum int // checksum
+ Body MessageBody // body
+}
+
+// Marshal returns the binary encoding of the ICMP message m.
+//
+// For an ICMPv4 message, the returned message always contains the
+// calculated checksum field.
+//
+// For an ICMPv6 message, the returned message contains the calculated
+// checksum field when psh is not nil, otherwise the kernel will
+// compute the checksum field during the message transmission.
+// When psh is not nil, it must be the pseudo header for IPv6.
+func (m *Message) Marshal(psh []byte) ([]byte, error) {
+ var mtype int
+ switch typ := m.Type.(type) {
+ case ipv4.ICMPType:
+ mtype = int(typ)
+ case ipv6.ICMPType:
+ mtype = int(typ)
+ default:
+ return nil, syscall.EINVAL
+ }
+ b := []byte{byte(mtype), byte(m.Code), 0, 0}
+ if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil {
+ b = append(psh, b...)
+ }
+ if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 {
+ mb, err := m.Body.Marshal(m.Type.Protocol())
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, mb...)
+ }
+ if m.Type.Protocol() == iana.ProtocolIPv6ICMP {
+ if psh == nil { // cannot calculate checksum here
+ return b, nil
+ }
+ off, l := 2*net.IPv6len, len(b)-len(psh)
+ binary.BigEndian.PutUint32(b[off:off+4], uint32(l))
+ }
+ s := checksum(b)
+ // Place checksum back in header; using ^= avoids the
+ // assumption the checksum bytes are zero.
+ b[len(psh)+2] ^= byte(s)
+ b[len(psh)+3] ^= byte(s >> 8)
+ return b[len(psh):], nil
+}
+
+var parseFns = map[Type]func(int, []byte) (MessageBody, error){
+ ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach,
+ ipv4.ICMPTypeTimeExceeded: parseTimeExceeded,
+ ipv4.ICMPTypeParameterProblem: parseParamProb,
+
+ ipv4.ICMPTypeEcho: parseEcho,
+ ipv4.ICMPTypeEchoReply: parseEcho,
+
+ ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach,
+ ipv6.ICMPTypePacketTooBig: parsePacketTooBig,
+ ipv6.ICMPTypeTimeExceeded: parseTimeExceeded,
+ ipv6.ICMPTypeParameterProblem: parseParamProb,
+
+ ipv6.ICMPTypeEchoRequest: parseEcho,
+ ipv6.ICMPTypeEchoReply: parseEcho,
+}
+
+// ParseMessage parses b as an ICMP message.
+// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+func ParseMessage(proto int, b []byte) (*Message, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ var err error
+ m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))}
+ switch proto {
+ case iana.ProtocolICMP:
+ m.Type = ipv4.ICMPType(b[0])
+ case iana.ProtocolIPv6ICMP:
+ m.Type = ipv6.ICMPType(b[0])
+ default:
+ return nil, syscall.EINVAL
+ }
+ if fn, ok := parseFns[m.Type]; !ok {
+ m.Body, err = parseDefaultMessageBody(proto, b[4:])
+ } else {
+ m.Body, err = fn(proto, b[4:])
+ }
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go
new file mode 100644
index 000000000..5d2605f8d
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/message_test.go
@@ -0,0 +1,134 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp_test
+
+import (
+ "net"
+ "reflect"
+ "testing"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+var marshalAndParseMessageForIPv4Tests = []icmp.Message{
+ {
+ Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15,
+ Body: &icmp.DstUnreach{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv4.ICMPTypeTimeExceeded, Code: 1,
+ Body: &icmp.TimeExceeded{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv4.ICMPTypeParameterProblem, Code: 2,
+ Body: &icmp.ParamProb{
+ Pointer: 8,
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: 1, Seq: 2,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ },
+ {
+ Type: ipv4.ICMPTypePhoturis,
+ Body: &icmp.DefaultMessageBody{
+ Data: []byte{0x80, 0x40, 0x20, 0x10},
+ },
+ },
+}
+
+func TestMarshalAndParseMessageForIPv4(t *testing.T) {
+ for i, tt := range marshalAndParseMessageForIPv4Tests {
+ b, err := tt.Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Type != tt.Type || m.Code != tt.Code {
+ t.Errorf("#%v: got %v; want %v", i, m, &tt)
+ }
+ if !reflect.DeepEqual(m.Body, tt.Body) {
+ t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body)
+ }
+ }
+}
+
+var marshalAndParseMessageForIPv6Tests = []icmp.Message{
+ {
+ Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6,
+ Body: &icmp.DstUnreach{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv6.ICMPTypePacketTooBig, Code: 0,
+ Body: &icmp.PacketTooBig{
+ MTU: 1<<16 - 1,
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv6.ICMPTypeTimeExceeded, Code: 1,
+ Body: &icmp.TimeExceeded{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv6.ICMPTypeParameterProblem, Code: 2,
+ Body: &icmp.ParamProb{
+ Pointer: 8,
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ },
+ },
+ {
+ Type: ipv6.ICMPTypeEchoRequest, Code: 0,
+ Body: &icmp.Echo{
+ ID: 1, Seq: 2,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ },
+ {
+ Type: ipv6.ICMPTypeDuplicateAddressConfirmation,
+ Body: &icmp.DefaultMessageBody{
+ Data: []byte{0x80, 0x40, 0x20, 0x10},
+ },
+ },
+}
+
+func TestMarshalAndParseMessageForIPv6(t *testing.T) {
+ pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1"))
+ for i, tt := range marshalAndParseMessageForIPv6Tests {
+ for _, psh := range [][]byte{pshicmp, nil} {
+ b, err := tt.Marshal(psh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Type != tt.Type || m.Code != tt.Code {
+ t.Errorf("#%v: got %v; want %v", i, m, &tt)
+ }
+ if !reflect.DeepEqual(m.Body, tt.Body) {
+ t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body)
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go
new file mode 100644
index 000000000..2121a17be
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/messagebody.go
@@ -0,0 +1,41 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A MessageBody represents an ICMP message body.
+type MessageBody interface {
+ // Len returns the length of ICMP message body.
+ // Proto must be either the ICMPv4 or ICMPv6 protocol number.
+ Len(proto int) int
+
+ // Marshal returns the binary encoding of ICMP message body.
+ // Proto must be either the ICMPv4 or ICMPv6 protocol number.
+ Marshal(proto int) ([]byte, error)
+}
+
+// A DefaultMessageBody represents the default message body.
+type DefaultMessageBody struct {
+ Data []byte // data
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *DefaultMessageBody) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ return len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) {
+ return p.Data, nil
+}
+
+// parseDefaultMessageBody parses b as an ICMP message body.
+func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) {
+ p := &DefaultMessageBody{Data: make([]byte, len(b))}
+ copy(p.Data, b)
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go
new file mode 100644
index 000000000..c31491748
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/mpls.go
@@ -0,0 +1,77 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// A MPLSLabel represents a MPLS label stack entry.
+type MPLSLabel struct {
+ Label int // label value
+ TC int // traffic class; formerly experimental use
+ S bool // bottom of stack
+ TTL int // time to live
+}
+
+const (
+ classMPLSLabelStack = 1
+ typeIncomingMPLSLabelStack = 1
+)
+
+// A MPLSLabelStack represents a MPLS label stack.
+type MPLSLabelStack struct {
+ Class int // extension object class number
+ Type int // extension object sub-type
+ Labels []MPLSLabel
+}
+
+// Len implements the Len method of Extension interface.
+func (ls *MPLSLabelStack) Len(proto int) int {
+ return 4 + (4 * len(ls.Labels))
+}
+
+// Marshal implements the Marshal method of Extension interface.
+func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) {
+ b := make([]byte, ls.Len(proto))
+ if err := ls.marshal(proto, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func (ls *MPLSLabelStack) marshal(proto int, b []byte) error {
+ l := ls.Len(proto)
+ binary.BigEndian.PutUint16(b[:2], uint16(l))
+ b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack
+ off := 4
+ for _, ll := range ls.Labels {
+ b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0)
+ b[off+2] |= byte(ll.TC << 1 & 0x0e)
+ if ll.S {
+ b[off+2] |= 0x1
+ }
+ b[off+3] = byte(ll.TTL)
+ off += 4
+ }
+ return nil
+}
+
+func parseMPLSLabelStack(b []byte) (Extension, error) {
+ ls := &MPLSLabelStack{
+ Class: int(b[2]),
+ Type: int(b[3]),
+ }
+ for b = b[4:]; len(b) >= 4; b = b[4:] {
+ ll := MPLSLabel{
+ Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4,
+ TC: int(b[2]&0x0e) >> 1,
+ TTL: int(b[3]),
+ }
+ if b[2]&0x1 != 0 {
+ ll.S = true
+ }
+ ls.Labels = append(ls.Labels, ll)
+ }
+ return ls, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go
new file mode 100644
index 000000000..f27135660
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/multipart.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "golang.org/x/net/internal/iana"
+
+// multipartMessageBodyDataLen takes b as an original datagram and
+// exts as extensions, and returns a required length for message body
+// and a required length for a padded original datagram in wire
+// format.
+func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) {
+ for _, ext := range exts {
+ bodyLen += ext.Len(proto)
+ }
+ if bodyLen > 0 {
+ dataLen = multipartMessageOrigDatagramLen(proto, b)
+ bodyLen += 4 // length of extension header
+ } else {
+ dataLen = len(b)
+ }
+ bodyLen += dataLen
+ return bodyLen, dataLen
+}
+
+// multipartMessageOrigDatagramLen takes b as an original datagram,
+// and returns a required length for a padded orignal datagram in wire
+// format.
+func multipartMessageOrigDatagramLen(proto int, b []byte) int {
+ roundup := func(b []byte, align int) int {
+ // According to RFC 4884, the padded original datagram
+ // field must contain at least 128 octets.
+ if len(b) < 128 {
+ return 128
+ }
+ r := len(b)
+ return (r + align - 1) & ^(align - 1)
+ }
+ switch proto {
+ case iana.ProtocolICMP:
+ return roundup(b, 4)
+ case iana.ProtocolIPv6ICMP:
+ return roundup(b, 8)
+ default:
+ return len(b)
+ }
+}
+
+// marshalMultipartMessageBody takes data as an original datagram and
+// exts as extesnsions, and returns a binary encoding of message body.
+// It can be used for non-multipart message bodies when exts is nil.
+func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) {
+ bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts)
+ b := make([]byte, 4+bodyLen)
+ copy(b[4:], data)
+ off := dataLen + 4
+ if len(exts) > 0 {
+ b[dataLen+4] = byte(extensionVersion << 4)
+ off += 4 // length of object header
+ for _, ext := range exts {
+ switch ext := ext.(type) {
+ case *MPLSLabelStack:
+ if err := ext.marshal(proto, b[off:]); err != nil {
+ return nil, err
+ }
+ off += ext.Len(proto)
+ case *InterfaceInfo:
+ attrs, l := ext.attrsAndLen(proto)
+ if err := ext.marshal(proto, b[off:], attrs, l); err != nil {
+ return nil, err
+ }
+ off += ext.Len(proto)
+ }
+ }
+ s := checksum(b[dataLen+4:])
+ b[dataLen+4+2] ^= byte(s)
+ b[dataLen+4+3] ^= byte(s >> 8)
+ switch proto {
+ case iana.ProtocolICMP:
+ b[1] = byte(dataLen / 4)
+ case iana.ProtocolIPv6ICMP:
+ b[0] = byte(dataLen / 8)
+ }
+ }
+ return b, nil
+}
+
+// parseMultipartMessageBody parses b as either a non-multipart
+// message body or a multipart message body.
+func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) {
+ var l int
+ switch proto {
+ case iana.ProtocolICMP:
+ l = 4 * int(b[1])
+ case iana.ProtocolIPv6ICMP:
+ l = 8 * int(b[0])
+ }
+ if len(b) == 4 {
+ return nil, nil, nil
+ }
+ exts, l, err := parseExtensions(b[4:], l)
+ if err != nil {
+ l = len(b) - 4
+ }
+ data := make([]byte, l)
+ copy(data, b[4:])
+ return data, exts, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go
new file mode 100644
index 000000000..966ccb8da
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/multipart_test.go
@@ -0,0 +1,442 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp_test
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "testing"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{
+ {
+ Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15,
+ Body: &icmp.DstUnreach{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{
+ Class: 1,
+ Type: 1,
+ Labels: []icmp.MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.IPv4(192, 168, 0, 1).To4(),
+ },
+ },
+ },
+ },
+ },
+ {
+ Type: ipv4.ICMPTypeTimeExceeded, Code: 1,
+ Body: &icmp.TimeExceeded{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ Extensions: []icmp.Extension{
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.IPv4(192, 168, 0, 1).To4(),
+ },
+ },
+ &icmp.MPLSLabelStack{
+ Class: 1,
+ Type: 1,
+ Labels: []icmp.MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Type: ipv4.ICMPTypeParameterProblem, Code: 2,
+ Body: &icmp.ParamProb{
+ Pointer: 8,
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{
+ Class: 1,
+ Type: 1,
+ Labels: []icmp.MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.IPv4(192, 168, 0, 1).To4(),
+ },
+ },
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x2f,
+ Interface: &net.Interface{
+ Index: 16,
+ Name: "en102",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.IPv4(192, 168, 0, 2).To4(),
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) {
+ for i, tt := range marshalAndParseMultipartMessageForIPv4Tests {
+ b, err := tt.Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b[5] != 32 {
+ t.Errorf("#%v: got %v; want 32", i, b[5])
+ }
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Type != tt.Type || m.Code != tt.Code {
+ t.Errorf("#%v: got %v; want %v", i, m, &tt)
+ }
+ switch m.Type {
+ case ipv4.ICMPTypeDestinationUnreachable:
+ got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach)
+ if !reflect.DeepEqual(got.Extensions, want.Extensions) {
+ t.Error(dumpExtensions(i, got.Extensions, want.Extensions))
+ }
+ if len(got.Data) != 128 {
+ t.Errorf("#%v: got %v; want 128", i, len(got.Data))
+ }
+ case ipv4.ICMPTypeTimeExceeded:
+ got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded)
+ if !reflect.DeepEqual(got.Extensions, want.Extensions) {
+ t.Error(dumpExtensions(i, got.Extensions, want.Extensions))
+ }
+ if len(got.Data) != 128 {
+ t.Errorf("#%v: got %v; want 128", i, len(got.Data))
+ }
+ case ipv4.ICMPTypeParameterProblem:
+ got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb)
+ if !reflect.DeepEqual(got.Extensions, want.Extensions) {
+ t.Error(dumpExtensions(i, got.Extensions, want.Extensions))
+ }
+ if len(got.Data) != 128 {
+ t.Errorf("#%v: got %v; want 128", i, len(got.Data))
+ }
+ }
+ }
+}
+
+var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{
+ {
+ Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6,
+ Body: &icmp.DstUnreach{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{
+ Class: 1,
+ Type: 1,
+ Labels: []icmp.MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.ParseIP("fe80::1"),
+ Zone: "en101",
+ },
+ },
+ },
+ },
+ },
+ {
+ Type: ipv6.ICMPTypeTimeExceeded, Code: 1,
+ Body: &icmp.TimeExceeded{
+ Data: []byte("ERROR-INVOKING-PACKET"),
+ Extensions: []icmp.Extension{
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x0f,
+ Interface: &net.Interface{
+ Index: 15,
+ Name: "en101",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.ParseIP("fe80::1"),
+ Zone: "en101",
+ },
+ },
+ &icmp.MPLSLabelStack{
+ Class: 1,
+ Type: 1,
+ Labels: []icmp.MPLSLabel{
+ {
+ Label: 16014,
+ TC: 0x4,
+ S: true,
+ TTL: 255,
+ },
+ },
+ },
+ &icmp.InterfaceInfo{
+ Class: 2,
+ Type: 0x2f,
+ Interface: &net.Interface{
+ Index: 16,
+ Name: "en102",
+ MTU: 8192,
+ },
+ Addr: &net.IPAddr{
+ IP: net.ParseIP("fe80::1"),
+ Zone: "en102",
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) {
+ pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1"))
+ for i, tt := range marshalAndParseMultipartMessageForIPv6Tests {
+ for _, psh := range [][]byte{pshicmp, nil} {
+ b, err := tt.Marshal(psh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b[4] != 16 {
+ t.Errorf("#%v: got %v; want 16", i, b[4])
+ }
+ m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Type != tt.Type || m.Code != tt.Code {
+ t.Errorf("#%v: got %v; want %v", i, m, &tt)
+ }
+ switch m.Type {
+ case ipv6.ICMPTypeDestinationUnreachable:
+ got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach)
+ if !reflect.DeepEqual(got.Extensions, want.Extensions) {
+ t.Error(dumpExtensions(i, got.Extensions, want.Extensions))
+ }
+ if len(got.Data) != 128 {
+ t.Errorf("#%v: got %v; want 128", i, len(got.Data))
+ }
+ case ipv6.ICMPTypeTimeExceeded:
+ got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded)
+ if !reflect.DeepEqual(got.Extensions, want.Extensions) {
+ t.Error(dumpExtensions(i, got.Extensions, want.Extensions))
+ }
+ if len(got.Data) != 128 {
+ t.Errorf("#%v: got %v; want 128", i, len(got.Data))
+ }
+ }
+ }
+ }
+}
+
+func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string {
+ var s string
+ for j, got := range gotExts {
+ switch got := got.(type) {
+ case *icmp.MPLSLabelStack:
+ want := wantExts[j].(*icmp.MPLSLabelStack)
+ if !reflect.DeepEqual(got, want) {
+ s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want)
+ }
+ case *icmp.InterfaceInfo:
+ want := wantExts[j].(*icmp.InterfaceInfo)
+ if !reflect.DeepEqual(got, want) {
+ s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr)
+ }
+ }
+ }
+ return s[:len(s)-1]
+}
+
+var multipartMessageBodyLenTests = []struct {
+ proto int
+ in icmp.MessageBody
+ out int
+}{
+ {
+ iana.ProtocolICMP,
+ &icmp.DstUnreach{
+ Data: make([]byte, ipv4.HeaderLen),
+ },
+ 4 + ipv4.HeaderLen, // unused and original datagram
+ },
+ {
+ iana.ProtocolICMP,
+ &icmp.TimeExceeded{
+ Data: make([]byte, ipv4.HeaderLen),
+ },
+ 4 + ipv4.HeaderLen, // unused and original datagram
+ },
+ {
+ iana.ProtocolICMP,
+ &icmp.ParamProb{
+ Data: make([]byte, ipv4.HeaderLen),
+ },
+ 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram
+ },
+
+ {
+ iana.ProtocolICMP,
+ &icmp.ParamProb{
+ Data: make([]byte, ipv4.HeaderLen),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram
+ },
+ {
+ iana.ProtocolICMP,
+ &icmp.ParamProb{
+ Data: make([]byte, 128),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram
+ },
+ {
+ iana.ProtocolICMP,
+ &icmp.ParamProb{
+ Data: make([]byte, 129),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram
+ },
+
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.DstUnreach{
+ Data: make([]byte, ipv6.HeaderLen),
+ },
+ 4 + ipv6.HeaderLen, // unused and original datagram
+ },
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.PacketTooBig{
+ Data: make([]byte, ipv6.HeaderLen),
+ },
+ 4 + ipv6.HeaderLen, // mtu and original datagram
+ },
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.TimeExceeded{
+ Data: make([]byte, ipv6.HeaderLen),
+ },
+ 4 + ipv6.HeaderLen, // unused and original datagram
+ },
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.ParamProb{
+ Data: make([]byte, ipv6.HeaderLen),
+ },
+ 4 + ipv6.HeaderLen, // pointer and original datagram
+ },
+
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.DstUnreach{
+ Data: make([]byte, 127),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram
+ },
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.DstUnreach{
+ Data: make([]byte, 128),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram
+ },
+ {
+ iana.ProtocolIPv6ICMP,
+ &icmp.DstUnreach{
+ Data: make([]byte, 129),
+ Extensions: []icmp.Extension{
+ &icmp.MPLSLabelStack{},
+ },
+ },
+ 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram
+ },
+}
+
+func TestMultipartMessageBodyLen(t *testing.T) {
+ for i, tt := range multipartMessageBodyLenTests {
+ if out := tt.in.Len(tt.proto); out != tt.out {
+ t.Errorf("#%d: got %d; want %d", i, out, tt.out)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go
new file mode 100644
index 000000000..a1c9df7bf
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/packettoobig.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// A PacketTooBig represents an ICMP packet too big message body.
+type PacketTooBig struct {
+ MTU int // maximum transmission unit of the nexthop link
+ Data []byte // data, known as original datagram field
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *PacketTooBig) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ return 4 + len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *PacketTooBig) Marshal(proto int) ([]byte, error) {
+ b := make([]byte, 4+len(p.Data))
+ binary.BigEndian.PutUint32(b[:4], uint32(p.MTU))
+ copy(b[4:], p.Data)
+ return b, nil
+}
+
+// parsePacketTooBig parses b as an ICMP packet too big message body.
+func parsePacketTooBig(proto int, b []byte) (MessageBody, error) {
+ bodyLen := len(b)
+ if bodyLen < 4 {
+ return nil, errMessageTooShort
+ }
+ p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))}
+ if bodyLen > 4 {
+ p.Data = make([]byte, bodyLen-4)
+ copy(p.Data, b[4:])
+ }
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go
new file mode 100644
index 000000000..0a2548daa
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/paramprob.go
@@ -0,0 +1,63 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+ "encoding/binary"
+ "golang.org/x/net/internal/iana"
+)
+
+// A ParamProb represents an ICMP parameter problem message body.
+type ParamProb struct {
+ Pointer uintptr // offset within the data where the error was detected
+ Data []byte // data, known as original datagram field
+ Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *ParamProb) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+ return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *ParamProb) Marshal(proto int) ([]byte, error) {
+ if proto == iana.ProtocolIPv6ICMP {
+ b := make([]byte, p.Len(proto))
+ binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer))
+ copy(b[4:], p.Data)
+ return b, nil
+ }
+ b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+ if err != nil {
+ return nil, err
+ }
+ b[0] = byte(p.Pointer)
+ return b, nil
+}
+
+// parseParamProb parses b as an ICMP parameter problem message body.
+func parseParamProb(proto int, b []byte) (MessageBody, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ p := &ParamProb{}
+ if proto == iana.ProtocolIPv6ICMP {
+ p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4]))
+ p.Data = make([]byte, len(b)-4)
+ copy(p.Data, b[4:])
+ return p, nil
+ }
+ p.Pointer = uintptr(b[0])
+ var err error
+ p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go
new file mode 100644
index 000000000..3171dad11
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/ping_test.go
@@ -0,0 +1,200 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp_test
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) {
+ const host = "www.google.com"
+ ips, err := net.LookupIP(host)
+ if err != nil {
+ return nil, err
+ }
+ netaddr := func(ip net.IP) (net.Addr, error) {
+ switch c.LocalAddr().(type) {
+ case *net.UDPAddr:
+ return &net.UDPAddr{IP: ip}, nil
+ case *net.IPAddr:
+ return &net.IPAddr{IP: ip}, nil
+ default:
+ return nil, errors.New("neither UDPAddr nor IPAddr")
+ }
+ }
+ for _, ip := range ips {
+ switch protocol {
+ case iana.ProtocolICMP:
+ if ip.To4() != nil {
+ return netaddr(ip)
+ }
+ case iana.ProtocolIPv6ICMP:
+ if ip.To16() != nil && ip.To4() == nil {
+ return netaddr(ip)
+ }
+ }
+ }
+ return nil, errors.New("no A or AAAA record")
+}
+
+type pingTest struct {
+ network, address string
+ protocol int
+ mtype icmp.Type
+}
+
+var nonPrivilegedPingTests = []pingTest{
+ {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho},
+
+ {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest},
+}
+
+func TestNonPrivilegedPing(t *testing.T) {
+ if testing.Short() {
+ t.Skip("avoid external network")
+ }
+ switch runtime.GOOS {
+ case "darwin":
+ case "linux":
+ t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state")
+ default:
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ for i, tt := range nonPrivilegedPingTests {
+ if err := doPing(tt, i); err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+var privilegedPingTests = []pingTest{
+ {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho},
+
+ {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest},
+}
+
+func TestPrivilegedPing(t *testing.T) {
+ if testing.Short() {
+ t.Skip("avoid external network")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ for i, tt := range privilegedPingTests {
+ if err := doPing(tt, i); err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func doPing(tt pingTest, seq int) error {
+ c, err := icmp.ListenPacket(tt.network, tt.address)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ dst, err := googleAddr(c, tt.protocol)
+ if err != nil {
+ return err
+ }
+
+ if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP {
+ var f ipv6.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv6.ICMPTypeDestinationUnreachable)
+ f.Accept(ipv6.ICMPTypePacketTooBig)
+ f.Accept(ipv6.ICMPTypeTimeExceeded)
+ f.Accept(ipv6.ICMPTypeParameterProblem)
+ f.Accept(ipv6.ICMPTypeEchoReply)
+ if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil {
+ return err
+ }
+ }
+
+ wm := icmp.Message{
+ Type: tt.mtype, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq),
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }
+ wb, err := wm.Marshal(nil)
+ if err != nil {
+ return err
+ }
+ if n, err := c.WriteTo(wb, dst); err != nil {
+ return err
+ } else if n != len(wb) {
+ return fmt.Errorf("got %v; want %v", n, len(wb))
+ }
+
+ rb := make([]byte, 1500)
+ if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {
+ return err
+ }
+ n, peer, err := c.ReadFrom(rb)
+ if err != nil {
+ return err
+ }
+ rm, err := icmp.ParseMessage(tt.protocol, rb[:n])
+ if err != nil {
+ return err
+ }
+ switch rm.Type {
+ case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply:
+ return nil
+ default:
+ return fmt.Errorf("got %+v from %v; want echo reply", rm, peer)
+ }
+}
+
+func TestConcurrentNonPrivilegedListenPacket(t *testing.T) {
+ if testing.Short() {
+ t.Skip("avoid external network")
+ }
+ switch runtime.GOOS {
+ case "darwin":
+ case "linux":
+ t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state")
+ default:
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ network, address := "udp4", "127.0.0.1"
+ if !nettest.SupportsIPv4() {
+ network, address = "udp6", "::1"
+ }
+ const N = 1000
+ var wg sync.WaitGroup
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+ c, err := icmp.ListenPacket(network, address)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ c.Close()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go
new file mode 100644
index 000000000..c75f3ddaa
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "syscall"
+
+func init() {
+ freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate")
+}
diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go
new file mode 100644
index 000000000..344e15848
--- /dev/null
+++ b/vendor/golang.org/x/net/icmp/timeexceeded.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A TimeExceeded represents an ICMP time exceeded message body.
+type TimeExceeded struct {
+ Data []byte // data, known as original datagram field
+ Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *TimeExceeded) Len(proto int) int {
+ if p == nil {
+ return 0
+ }
+ l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+ return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *TimeExceeded) Marshal(proto int) ([]byte, error) {
+ return marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+}
+
+// parseTimeExceeded parses b as an ICMP time exceeded message body.
+func parseTimeExceeded(proto int, b []byte) (MessageBody, error) {
+ if len(b) < 4 {
+ return nil, errMessageTooShort
+ }
+ p := &TimeExceeded{}
+ var err error
+ p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go
new file mode 100644
index 000000000..3daa8979e
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna.go
@@ -0,0 +1,68 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package idna implements IDNA2008 (Internationalized Domain Names for
+// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
+// RFC 5894.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
+// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang".
+func ToASCII(s string) (string, error) {
+ if ascii(s) {
+ return s, nil
+ }
+ labels := strings.Split(s, ".")
+ for i, label := range labels {
+ if !ascii(label) {
+ a, err := encode(acePrefix, label)
+ if err != nil {
+ return "", err
+ }
+ labels[i] = a
+ }
+ }
+ return strings.Join(labels, "."), nil
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang".
+func ToUnicode(s string) (string, error) {
+ if !strings.Contains(s, acePrefix) {
+ return s, nil
+ }
+ labels := strings.Split(s, ".")
+ for i, label := range labels {
+ if strings.HasPrefix(label, acePrefix) {
+ u, err := decode(label[len(acePrefix):])
+ if err != nil {
+ return "", err
+ }
+ labels[i] = u
+ }
+ }
+ return strings.Join(labels, "."), nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go
new file mode 100644
index 000000000..b1bc6fa22
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+import (
+ "testing"
+)
+
+var idnaTestCases = [...]struct {
+ ascii, unicode string
+}{
+ // Labels.
+ {"books", "books"},
+ {"xn--bcher-kva", "bücher"},
+
+ // Domains.
+ {"foo--xn--bar.org", "foo--xn--bar.org"},
+ {"golang.org", "golang.org"},
+ {"example.xn--p1ai", "example.рф"},
+ {"xn--czrw28b.tw", "商業.tw"},
+ {"www.xn--mller-kva.de", "www.müller.de"},
+}
+
+func TestIDNA(t *testing.T) {
+ for _, tc := range idnaTestCases {
+ if a, err := ToASCII(tc.unicode); err != nil {
+ t.Errorf("ToASCII(%q): %v", tc.unicode, err)
+ } else if a != tc.ascii {
+ t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii)
+ }
+
+ if u, err := ToUnicode(tc.ascii); err != nil {
+ t.Errorf("ToUnicode(%q): %v", tc.ascii, err)
+ } else if u != tc.unicode {
+ t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode)
+ }
+ }
+}
+
+// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode
+// return errors.
diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go
new file mode 100644
index 000000000..92e733f6a
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/punycode.go
@@ -0,0 +1,200 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// This file implements the Punycode algorithm from RFC 3492.
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "unicode/utf8"
+)
+
+// These parameter values are specified in section 5.
+//
+// All computation is done with int32s, so that overflow behavior is identical
+// regardless of whether int is 32-bit or 64-bit.
+const (
+ base int32 = 36
+ damp int32 = 700
+ initialBias int32 = 72
+ initialN int32 = 128
+ skew int32 = 38
+ tmax int32 = 26
+ tmin int32 = 1
+)
+
+// decode decodes a string as specified in section 6.2.
+func decode(encoded string) (string, error) {
+ if encoded == "" {
+ return "", nil
+ }
+ pos := 1 + strings.LastIndex(encoded, "-")
+ if pos == 1 {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ if pos == len(encoded) {
+ return encoded[:len(encoded)-1], nil
+ }
+ output := make([]rune, 0, len(encoded))
+ if pos != 0 {
+ for _, r := range encoded[:pos-1] {
+ output = append(output, r)
+ }
+ }
+ i, n, bias := int32(0), initialN, initialBias
+ for pos < len(encoded) {
+ oldI, w := i, int32(1)
+ for k := base; ; k += base {
+ if pos == len(encoded) {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ digit, ok := decodeDigit(encoded[pos])
+ if !ok {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ pos++
+ i += digit * w
+ if i < 0 {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ t := k - bias
+ if t < tmin {
+ t = tmin
+ } else if t > tmax {
+ t = tmax
+ }
+ if digit < t {
+ break
+ }
+ w *= base - t
+ if w >= math.MaxInt32/base {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ }
+ x := int32(len(output) + 1)
+ bias = adapt(i-oldI, x, oldI == 0)
+ n += i / x
+ i %= x
+ if n > utf8.MaxRune || len(output) >= 1024 {
+ return "", fmt.Errorf("idna: invalid label %q", encoded)
+ }
+ output = append(output, 0)
+ copy(output[i+1:], output[i:])
+ output[i] = n
+ i++
+ }
+ return string(output), nil
+}
+
+// encode encodes a string as specified in section 6.3 and prepends prefix to
+// the result.
+//
+// The "while h < length(input)" line in the specification becomes "for
+// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
+func encode(prefix, s string) (string, error) {
+ output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
+ copy(output, prefix)
+ delta, n, bias := int32(0), initialN, initialBias
+ b, remaining := int32(0), int32(0)
+ for _, r := range s {
+ if r < 0x80 {
+ b++
+ output = append(output, byte(r))
+ } else {
+ remaining++
+ }
+ }
+ h := b
+ if b > 0 {
+ output = append(output, '-')
+ }
+ for remaining != 0 {
+ m := int32(0x7fffffff)
+ for _, r := range s {
+ if m > r && r >= n {
+ m = r
+ }
+ }
+ delta += (m - n) * (h + 1)
+ if delta < 0 {
+ return "", fmt.Errorf("idna: invalid label %q", s)
+ }
+ n = m
+ for _, r := range s {
+ if r < n {
+ delta++
+ if delta < 0 {
+ return "", fmt.Errorf("idna: invalid label %q", s)
+ }
+ continue
+ }
+ if r > n {
+ continue
+ }
+ q := delta
+ for k := base; ; k += base {
+ t := k - bias
+ if t < tmin {
+ t = tmin
+ } else if t > tmax {
+ t = tmax
+ }
+ if q < t {
+ break
+ }
+ output = append(output, encodeDigit(t+(q-t)%(base-t)))
+ q = (q - t) / (base - t)
+ }
+ output = append(output, encodeDigit(q))
+ bias = adapt(delta, h+1, h == b)
+ delta = 0
+ h++
+ remaining--
+ }
+ delta++
+ n++
+ }
+ return string(output), nil
+}
+
+func decodeDigit(x byte) (digit int32, ok bool) {
+ switch {
+ case '0' <= x && x <= '9':
+ return int32(x - ('0' - 26)), true
+ case 'A' <= x && x <= 'Z':
+ return int32(x - 'A'), true
+ case 'a' <= x && x <= 'z':
+ return int32(x - 'a'), true
+ }
+ return 0, false
+}
+
+func encodeDigit(digit int32) byte {
+ switch {
+ case 0 <= digit && digit < 26:
+ return byte(digit + 'a')
+ case 26 <= digit && digit < 36:
+ return byte(digit + ('0' - 26))
+ }
+ panic("idna: internal error in punycode encoding")
+}
+
+// adapt is the bias adaptation function specified in section 6.1.
+func adapt(delta, numPoints int32, firstTime bool) int32 {
+ if firstTime {
+ delta /= damp
+ } else {
+ delta /= 2
+ }
+ delta += delta / numPoints
+ k := int32(0)
+ for delta > ((base-tmin)*tmax)/2 {
+ delta /= base - tmin
+ k += base
+ }
+ return k + (base-tmin+1)*delta/(delta+skew)
+}
diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go
new file mode 100644
index 000000000..bfec81dec
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/punycode_test.go
@@ -0,0 +1,198 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+import (
+ "strings"
+ "testing"
+)
+
+var punycodeTestCases = [...]struct {
+ s, encoded string
+}{
+ {"", ""},
+ {"-", "--"},
+ {"-a", "-a-"},
+ {"-a-", "-a--"},
+ {"a", "a-"},
+ {"a-", "a--"},
+ {"a-b", "a-b-"},
+ {"books", "books-"},
+ {"bücher", "bcher-kva"},
+ {"Hello世界", "Hello-ck1hg65u"},
+ {"ü", "tda"},
+ {"üý", "tdac"},
+
+ // The test cases below come from RFC 3492 section 7.1 with Errata 3026.
+ {
+ // (A) Arabic (Egyptian).
+ "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" +
+ "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
+ "egbpdaj6bu4bxfgehfvwxn",
+ },
+ {
+ // (B) Chinese (simplified).
+ "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
+ "ihqwcrb4cv8a8dqg056pqjye",
+ },
+ {
+ // (C) Chinese (traditional).
+ "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
+ "ihqwctvzc91f659drss3x8bo0yb",
+ },
+ {
+ // (D) Czech.
+ "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" +
+ "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" +
+ "\u0065\u0073\u006B\u0079",
+ "Proprostnemluvesky-uyb24dma41a",
+ },
+ {
+ // (E) Hebrew.
+ "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" +
+ "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" +
+ "\u05D1\u05E8\u05D9\u05EA",
+ "4dbcagdahymbxekheh6e0a7fei0b",
+ },
+ {
+ // (F) Hindi (Devanagari).
+ "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" +
+ "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" +
+ "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" +
+ "\u0939\u0948\u0902",
+ "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd",
+ },
+ {
+ // (G) Japanese (kanji and hiragana).
+ "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" +
+ "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
+ "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa",
+ },
+ {
+ // (H) Korean (Hangul syllables).
+ "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" +
+ "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" +
+ "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
+ "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" +
+ "psd879ccm6fea98c",
+ },
+ {
+ // (I) Russian (Cyrillic).
+ "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" +
+ "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" +
+ "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" +
+ "\u0438",
+ "b1abfaaepdrnnbgefbadotcwatmq2g4l",
+ },
+ {
+ // (J) Spanish.
+ "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" +
+ "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" +
+ "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" +
+ "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" +
+ "\u0061\u00F1\u006F\u006C",
+ "PorqunopuedensimplementehablarenEspaol-fmd56a",
+ },
+ {
+ // (K) Vietnamese.
+ "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" +
+ "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" +
+ "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" +
+ "\u0056\u0069\u1EC7\u0074",
+ "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g",
+ },
+ {
+ // (L) 3<nen>B<gumi><kinpachi><sensei>.
+ "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
+ "3B-ww4c5e180e575a65lsy2b",
+ },
+ {
+ // (M) <amuro><namie>-with-SUPER-MONKEYS.
+ "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" +
+ "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" +
+ "\u004F\u004E\u004B\u0045\u0059\u0053",
+ "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n",
+ },
+ {
+ // (N) Hello-Another-Way-<sorezore><no><basho>.
+ "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" +
+ "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" +
+ "\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
+ "Hello-Another-Way--fc4qua05auwb3674vfr0b",
+ },
+ {
+ // (O) <hitotsu><yane><no><shita>2.
+ "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
+ "2-u9tlzr9756bt3uc0v",
+ },
+ {
+ // (P) Maji<de>Koi<suru>5<byou><mae>
+ "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" +
+ "\u308B\u0035\u79D2\u524D",
+ "MajiKoi5-783gue6qz075azm5e",
+ },
+ {
+ // (Q) <pafii>de<runba>
+ "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
+ "de-jg4avhby1noc0d",
+ },
+ {
+ // (R) <sono><supiido><de>
+ "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
+ "d9juau41awczczp",
+ },
+ {
+ // (S) -> $1.00 <-
+ "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" +
+ "\u003C\u002D",
+ "-> $1.00 <--",
+ },
+}
+
+func TestPunycode(t *testing.T) {
+ for _, tc := range punycodeTestCases {
+ if got, err := decode(tc.encoded); err != nil {
+ t.Errorf("decode(%q): %v", tc.encoded, err)
+ } else if got != tc.s {
+ t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s)
+ }
+
+ if got, err := encode("", tc.s); err != nil {
+ t.Errorf(`encode("", %q): %v`, tc.s, err)
+ } else if got != tc.encoded {
+ t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded)
+ }
+ }
+}
+
+var punycodeErrorTestCases = [...]string{
+ "decode -", // A sole '-' is invalid.
+ "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z].
+ "decode foo#bar", // '#' is not in [0-9A-Za-z].
+ "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z].
+ "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated.
+ "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF.
+ "decode 9999999999a", // "9999999999a" overflows the int32 calculation.
+
+ "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow.
+}
+
+func TestPunycodeErrors(t *testing.T) {
+ for _, tc := range punycodeErrorTestCases {
+ var err error
+ switch {
+ case strings.HasPrefix(tc, "decode "):
+ _, err = decode(tc[7:])
+ case strings.HasPrefix(tc, "encode "):
+ _, err = encode("", tc[7:])
+ }
+ if err == nil {
+ if len(tc) > 256 {
+ tc = tc[:100] + "..." + tc[len(tc)-100:]
+ }
+ t.Errorf("no error for %s", tc)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go
new file mode 100644
index 000000000..3438a27c8
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/iana/const.go
@@ -0,0 +1,180 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
+package iana // import "golang.org/x/net/internal/iana"
+
+// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25
+const (
+ DiffServCS0 = 0x0 // CS0
+ DiffServCS1 = 0x20 // CS1
+ DiffServCS2 = 0x40 // CS2
+ DiffServCS3 = 0x60 // CS3
+ DiffServCS4 = 0x80 // CS4
+ DiffServCS5 = 0xa0 // CS5
+ DiffServCS6 = 0xc0 // CS6
+ DiffServCS7 = 0xe0 // CS7
+ DiffServAF11 = 0x28 // AF11
+ DiffServAF12 = 0x30 // AF12
+ DiffServAF13 = 0x38 // AF13
+ DiffServAF21 = 0x48 // AF21
+ DiffServAF22 = 0x50 // AF22
+ DiffServAF23 = 0x58 // AF23
+ DiffServAF31 = 0x68 // AF31
+ DiffServAF32 = 0x70 // AF32
+ DiffServAF33 = 0x78 // AF33
+ DiffServAF41 = 0x88 // AF41
+ DiffServAF42 = 0x90 // AF42
+ DiffServAF43 = 0x98 // AF43
+ DiffServEFPHB = 0xb8 // EF PHB
+ DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
+)
+
+// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06
+const (
+ NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport)
+ ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1))
+ ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0))
+ CongestionExperienced = 0x3 // CE (Congestion Experienced)
+)
+
+// Protocol Numbers, Updated: 2015-10-06
+const (
+ ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number
+ ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option
+ ProtocolICMP = 1 // Internet Control Message
+ ProtocolIGMP = 2 // Internet Group Management
+ ProtocolGGP = 3 // Gateway-to-Gateway
+ ProtocolIPv4 = 4 // IPv4 encapsulation
+ ProtocolST = 5 // Stream
+ ProtocolTCP = 6 // Transmission Control
+ ProtocolCBT = 7 // CBT
+ ProtocolEGP = 8 // Exterior Gateway Protocol
+ ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
+ ProtocolBBNRCCMON = 10 // BBN RCC Monitoring
+ ProtocolNVPII = 11 // Network Voice Protocol
+ ProtocolPUP = 12 // PUP
+ ProtocolEMCON = 14 // EMCON
+ ProtocolXNET = 15 // Cross Net Debugger
+ ProtocolCHAOS = 16 // Chaos
+ ProtocolUDP = 17 // User Datagram
+ ProtocolMUX = 18 // Multiplexing
+ ProtocolDCNMEAS = 19 // DCN Measurement Subsystems
+ ProtocolHMP = 20 // Host Monitoring
+ ProtocolPRM = 21 // Packet Radio Measurement
+ ProtocolXNSIDP = 22 // XEROX NS IDP
+ ProtocolTRUNK1 = 23 // Trunk-1
+ ProtocolTRUNK2 = 24 // Trunk-2
+ ProtocolLEAF1 = 25 // Leaf-1
+ ProtocolLEAF2 = 26 // Leaf-2
+ ProtocolRDP = 27 // Reliable Data Protocol
+ ProtocolIRTP = 28 // Internet Reliable Transaction
+ ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4
+ ProtocolNETBLT = 30 // Bulk Data Transfer Protocol
+ ProtocolMFENSP = 31 // MFE Network Services Protocol
+ ProtocolMERITINP = 32 // MERIT Internodal Protocol
+ ProtocolDCCP = 33 // Datagram Congestion Control Protocol
+ Protocol3PC = 34 // Third Party Connect Protocol
+ ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol
+ ProtocolXTP = 36 // XTP
+ ProtocolDDP = 37 // Datagram Delivery Protocol
+ ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto
+ ProtocolTPPP = 39 // TP++ Transport Protocol
+ ProtocolIL = 40 // IL Transport Protocol
+ ProtocolIPv6 = 41 // IPv6 encapsulation
+ ProtocolSDRP = 42 // Source Demand Routing Protocol
+ ProtocolIPv6Route = 43 // Routing Header for IPv6
+ ProtocolIPv6Frag = 44 // Fragment Header for IPv6
+ ProtocolIDRP = 45 // Inter-Domain Routing Protocol
+ ProtocolRSVP = 46 // Reservation Protocol
+ ProtocolGRE = 47 // Generic Routing Encapsulation
+ ProtocolDSR = 48 // Dynamic Source Routing Protocol
+ ProtocolBNA = 49 // BNA
+ ProtocolESP = 50 // Encap Security Payload
+ ProtocolAH = 51 // Authentication Header
+ ProtocolINLSP = 52 // Integrated Net Layer Security TUBA
+ ProtocolNARP = 54 // NBMA Address Resolution Protocol
+ ProtocolMOBILE = 55 // IP Mobility
+ ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management
+ ProtocolSKIP = 57 // SKIP
+ ProtocolIPv6ICMP = 58 // ICMP for IPv6
+ ProtocolIPv6NoNxt = 59 // No Next Header for IPv6
+ ProtocolIPv6Opts = 60 // Destination Options for IPv6
+ ProtocolCFTP = 62 // CFTP
+ ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK
+ ProtocolKRYPTOLAN = 65 // Kryptolan
+ ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol
+ ProtocolIPPC = 67 // Internet Pluribus Packet Core
+ ProtocolSATMON = 69 // SATNET Monitoring
+ ProtocolVISA = 70 // VISA Protocol
+ ProtocolIPCV = 71 // Internet Packet Core Utility
+ ProtocolCPNX = 72 // Computer Protocol Network Executive
+ ProtocolCPHB = 73 // Computer Protocol Heart Beat
+ ProtocolWSN = 74 // Wang Span Network
+ ProtocolPVP = 75 // Packet Video Protocol
+ ProtocolBRSATMON = 76 // Backroom SATNET Monitoring
+ ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary
+ ProtocolWBMON = 78 // WIDEBAND Monitoring
+ ProtocolWBEXPAK = 79 // WIDEBAND EXPAK
+ ProtocolISOIP = 80 // ISO Internet Protocol
+ ProtocolVMTP = 81 // VMTP
+ ProtocolSECUREVMTP = 82 // SECURE-VMTP
+ ProtocolVINES = 83 // VINES
+ ProtocolTTP = 84 // Transaction Transport Protocol
+ ProtocolIPTM = 84 // Internet Protocol Traffic Manager
+ ProtocolNSFNETIGP = 85 // NSFNET-IGP
+ ProtocolDGP = 86 // Dissimilar Gateway Protocol
+ ProtocolTCF = 87 // TCF
+ ProtocolEIGRP = 88 // EIGRP
+ ProtocolOSPFIGP = 89 // OSPFIGP
+ ProtocolSpriteRPC = 90 // Sprite RPC Protocol
+ ProtocolLARP = 91 // Locus Address Resolution Protocol
+ ProtocolMTP = 92 // Multicast Transport Protocol
+ ProtocolAX25 = 93 // AX.25 Frames
+ ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol
+ ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro.
+ ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation
+ ProtocolENCAP = 98 // Encapsulation Header
+ ProtocolGMTP = 100 // GMTP
+ ProtocolIFMP = 101 // Ipsilon Flow Management Protocol
+ ProtocolPNNI = 102 // PNNI over IP
+ ProtocolPIM = 103 // Protocol Independent Multicast
+ ProtocolARIS = 104 // ARIS
+ ProtocolSCPS = 105 // SCPS
+ ProtocolQNX = 106 // QNX
+ ProtocolAN = 107 // Active Networks
+ ProtocolIPComp = 108 // IP Payload Compression Protocol
+ ProtocolSNP = 109 // Sitara Networks Protocol
+ ProtocolCompaqPeer = 110 // Compaq Peer Protocol
+ ProtocolIPXinIP = 111 // IPX in IP
+ ProtocolVRRP = 112 // Virtual Router Redundancy Protocol
+ ProtocolPGM = 113 // PGM Reliable Transport Protocol
+ ProtocolL2TP = 115 // Layer Two Tunneling Protocol
+ ProtocolDDX = 116 // D-II Data Exchange (DDX)
+ ProtocolIATP = 117 // Interactive Agent Transfer Protocol
+ ProtocolSTP = 118 // Schedule Transfer Protocol
+ ProtocolSRP = 119 // SpectraLink Radio Protocol
+ ProtocolUTI = 120 // UTI
+ ProtocolSMP = 121 // Simple Message Protocol
+ ProtocolPTP = 123 // Performance Transparency Protocol
+ ProtocolISIS = 124 // ISIS over IPv4
+ ProtocolFIRE = 125 // FIRE
+ ProtocolCRTP = 126 // Combat Radio Transport Protocol
+ ProtocolCRUDP = 127 // Combat Radio User Datagram
+ ProtocolSSCOPMCE = 128 // SSCOPMCE
+ ProtocolIPLT = 129 // IPLT
+ ProtocolSPS = 130 // Secure Packet Shield
+ ProtocolPIPE = 131 // Private IP Encapsulation within IP
+ ProtocolSCTP = 132 // Stream Control Transmission Protocol
+ ProtocolFC = 133 // Fibre Channel
+ ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE
+ ProtocolMobilityHeader = 135 // Mobility Header
+ ProtocolUDPLite = 136 // UDPLite
+ ProtocolMPLSinIP = 137 // MPLS-in-IP
+ ProtocolMANET = 138 // MANET Protocols
+ ProtocolHIP = 139 // Host Identity Protocol
+ ProtocolShim6 = 140 // Shim6 Protocol
+ ProtocolWESP = 141 // Wrapped Encapsulating Security Payload
+ ProtocolROHC = 142 // Robust Header Compression
+ ProtocolReserved = 255 // Reserved
+)
diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go
new file mode 100644
index 000000000..2d8c07ca1
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/iana/gen.go
@@ -0,0 +1,293 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates internet protocol constants and tables by
+// reading IANA protocol registries.
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+)
+
+var registries = []struct {
+ url string
+ parse func(io.Writer, io.Reader) error
+}{
+ {
+ "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
+ parseDSCPRegistry,
+ },
+ {
+ "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
+ parseTOSTCByte,
+ },
+ {
+ "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
+ parseProtocolNumbers,
+ },
+}
+
+func main() {
+ var bb bytes.Buffer
+ fmt.Fprintf(&bb, "// go generate gen.go\n")
+ fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+ fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
+ fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
+ for _, r := range registries {
+ resp, err := http.Get(r.url)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+ os.Exit(1)
+ }
+ if err := r.parse(&bb, resp.Body); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ fmt.Fprintf(&bb, "\n")
+ }
+ b, err := format.Source(bb.Bytes())
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func parseDSCPRegistry(w io.Writer, r io.Reader) error {
+ dec := xml.NewDecoder(r)
+ var dr dscpRegistry
+ if err := dec.Decode(&dr); err != nil {
+ return err
+ }
+ drs := dr.escape()
+ fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
+ fmt.Fprintf(w, "const (\n")
+ for _, dr := range drs {
+ fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value)
+ fmt.Fprintf(w, "// %s\n", dr.OrigName)
+ }
+ fmt.Fprintf(w, ")\n")
+ return nil
+}
+
+type dscpRegistry struct {
+ XMLName xml.Name `xml:"registry"`
+ Title string `xml:"title"`
+ Updated string `xml:"updated"`
+ Note string `xml:"note"`
+ RegTitle string `xml:"registry>title"`
+ PoolRecords []struct {
+ Name string `xml:"name"`
+ Space string `xml:"space"`
+ } `xml:"registry>record"`
+ Records []struct {
+ Name string `xml:"name"`
+ Space string `xml:"space"`
+ } `xml:"registry>registry>record"`
+}
+
+type canonDSCPRecord struct {
+ OrigName string
+ Name string
+ Value int
+}
+
+func (drr *dscpRegistry) escape() []canonDSCPRecord {
+ drs := make([]canonDSCPRecord, len(drr.Records))
+ sr := strings.NewReplacer(
+ "+", "",
+ "-", "",
+ "/", "",
+ ".", "",
+ " ", "",
+ )
+ for i, dr := range drr.Records {
+ s := strings.TrimSpace(dr.Name)
+ drs[i].OrigName = s
+ drs[i].Name = sr.Replace(s)
+ n, err := strconv.ParseUint(dr.Space, 2, 8)
+ if err != nil {
+ continue
+ }
+ drs[i].Value = int(n) << 2
+ }
+ return drs
+}
+
+func parseTOSTCByte(w io.Writer, r io.Reader) error {
+ dec := xml.NewDecoder(r)
+ var ttb tosTCByte
+ if err := dec.Decode(&ttb); err != nil {
+ return err
+ }
+ trs := ttb.escape()
+ fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated)
+ fmt.Fprintf(w, "const (\n")
+ for _, tr := range trs {
+ fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value)
+ fmt.Fprintf(w, "// %s\n", tr.OrigKeyword)
+ }
+ fmt.Fprintf(w, ")\n")
+ return nil
+}
+
+type tosTCByte struct {
+ XMLName xml.Name `xml:"registry"`
+ Title string `xml:"title"`
+ Updated string `xml:"updated"`
+ Note string `xml:"note"`
+ RegTitle string `xml:"registry>title"`
+ Records []struct {
+ Binary string `xml:"binary"`
+ Keyword string `xml:"keyword"`
+ } `xml:"registry>record"`
+}
+
+type canonTOSTCByteRecord struct {
+ OrigKeyword string
+ Keyword string
+ Value int
+}
+
+func (ttb *tosTCByte) escape() []canonTOSTCByteRecord {
+ trs := make([]canonTOSTCByteRecord, len(ttb.Records))
+ sr := strings.NewReplacer(
+ "Capable", "",
+ "(", "",
+ ")", "",
+ "+", "",
+ "-", "",
+ "/", "",
+ ".", "",
+ " ", "",
+ )
+ for i, tr := range ttb.Records {
+ s := strings.TrimSpace(tr.Keyword)
+ trs[i].OrigKeyword = s
+ ss := strings.Split(s, " ")
+ if len(ss) > 1 {
+ trs[i].Keyword = strings.Join(ss[1:], " ")
+ } else {
+ trs[i].Keyword = ss[0]
+ }
+ trs[i].Keyword = sr.Replace(trs[i].Keyword)
+ n, err := strconv.ParseUint(tr.Binary, 2, 8)
+ if err != nil {
+ continue
+ }
+ trs[i].Value = int(n)
+ }
+ return trs
+}
+
+func parseProtocolNumbers(w io.Writer, r io.Reader) error {
+ dec := xml.NewDecoder(r)
+ var pn protocolNumbers
+ if err := dec.Decode(&pn); err != nil {
+ return err
+ }
+ prs := pn.escape()
+ prs = append([]canonProtocolRecord{{
+ Name: "IP",
+ Descr: "IPv4 encapsulation, pseudo protocol number",
+ Value: 0,
+ }}, prs...)
+ fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
+ fmt.Fprintf(w, "const (\n")
+ for _, pr := range prs {
+ if pr.Name == "" {
+ continue
+ }
+ fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
+ s := pr.Descr
+ if s == "" {
+ s = pr.OrigName
+ }
+ fmt.Fprintf(w, "// %s\n", s)
+ }
+ fmt.Fprintf(w, ")\n")
+ return nil
+}
+
+type protocolNumbers struct {
+ XMLName xml.Name `xml:"registry"`
+ Title string `xml:"title"`
+ Updated string `xml:"updated"`
+ RegTitle string `xml:"registry>title"`
+ Note string `xml:"registry>note"`
+ Records []struct {
+ Value string `xml:"value"`
+ Name string `xml:"name"`
+ Descr string `xml:"description"`
+ } `xml:"registry>record"`
+}
+
+type canonProtocolRecord struct {
+ OrigName string
+ Name string
+ Descr string
+ Value int
+}
+
+func (pn *protocolNumbers) escape() []canonProtocolRecord {
+ prs := make([]canonProtocolRecord, len(pn.Records))
+ sr := strings.NewReplacer(
+ "-in-", "in",
+ "-within-", "within",
+ "-over-", "over",
+ "+", "P",
+ "-", "",
+ "/", "",
+ ".", "",
+ " ", "",
+ )
+ for i, pr := range pn.Records {
+ if strings.Contains(pr.Name, "Deprecated") ||
+ strings.Contains(pr.Name, "deprecated") {
+ continue
+ }
+ prs[i].OrigName = pr.Name
+ s := strings.TrimSpace(pr.Name)
+ switch pr.Name {
+ case "ISIS over IPv4":
+ prs[i].Name = "ISIS"
+ case "manet":
+ prs[i].Name = "MANET"
+ default:
+ prs[i].Name = sr.Replace(s)
+ }
+ ss := strings.Split(pr.Descr, "\n")
+ for i := range ss {
+ ss[i] = strings.TrimSpace(ss[i])
+ }
+ if len(ss) > 1 {
+ prs[i].Descr = strings.Join(ss, " ")
+ } else {
+ prs[i].Descr = ss[0]
+ }
+ prs[i].Value, _ = strconv.Atoi(pr.Value)
+ }
+ return prs
+}
diff --git a/vendor/golang.org/x/net/internal/netreflect/socket.go b/vendor/golang.org/x/net/internal/netreflect/socket.go
new file mode 100644
index 000000000..e82e51c44
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/netreflect/socket.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netreflect implements run-time reflection for the
+// facilities of net package.
+package netreflect
+
+import (
+ "errors"
+ "net"
+)
+
+var (
+ errInvalidType = errors.New("invalid type")
+ errOpNoSupport = errors.New("operation not supported")
+)
+
+// SocketOf returns the socket descriptor of c.
+func SocketOf(c net.Conn) (uintptr, error) {
+ switch c.(type) {
+ case *net.TCPConn, *net.UDPConn, *net.IPConn, *net.UnixConn:
+ return socketOf(c)
+ default:
+ return 0, errInvalidType
+ }
+}
+
+// PacketSocketOf returns the socket descriptor of c.
+func PacketSocketOf(c net.PacketConn) (uintptr, error) {
+ switch c.(type) {
+ case *net.UDPConn, *net.IPConn, *net.UnixConn:
+ return socketOf(c.(net.Conn))
+ default:
+ return 0, errInvalidType
+ }
+}
diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_posix.go b/vendor/golang.org/x/net/internal/netreflect/socket_posix.go
new file mode 100644
index 000000000..df475a2b2
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/netreflect/socket_posix.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package netreflect
+
+import (
+ "net"
+ "reflect"
+ "runtime"
+)
+
+func socketOf(c net.Conn) (uintptr, error) {
+ v := reflect.ValueOf(c)
+ switch e := v.Elem(); e.Kind() {
+ case reflect.Struct:
+ fd := e.FieldByName("conn").FieldByName("fd")
+ switch e := fd.Elem(); e.Kind() {
+ case reflect.Struct:
+ sysfd := e.FieldByName("sysfd")
+ if runtime.GOOS == "windows" {
+ return uintptr(sysfd.Uint()), nil
+ }
+ return uintptr(sysfd.Int()), nil
+ }
+ }
+ return 0, errInvalidType
+}
diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_stub.go b/vendor/golang.org/x/net/internal/netreflect/socket_stub.go
new file mode 100644
index 000000000..85adb4b7f
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/netreflect/socket_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package netreflect
+
+import "net"
+
+func socketOf(c net.Conn) (uintptr, error) { return 0, errOpNoSupport }
diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_test.go b/vendor/golang.org/x/net/internal/netreflect/socket_test.go
new file mode 100644
index 000000000..305665ac1
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/netreflect/socket_test.go
@@ -0,0 +1,123 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package netreflect_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+func localPath() string {
+ f, err := ioutil.TempFile("", "netreflect")
+ if err != nil {
+ panic(err)
+ }
+ path := f.Name()
+ f.Close()
+ os.Remove(path)
+ return path
+}
+
+func newLocalListener(network string) (net.Listener, error) {
+ switch network {
+ case "tcp":
+ if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil {
+ return ln, nil
+ }
+ return net.Listen("tcp6", "[::1]:0")
+ case "tcp4":
+ return net.Listen("tcp4", "127.0.0.1:0")
+ case "tcp6":
+ return net.Listen("tcp6", "[::1]:0")
+ case "unix", "unixpacket":
+ return net.Listen(network, localPath())
+ }
+ return nil, fmt.Errorf("%s is not supported", network)
+}
+
+func newLocalPacketListener(network string) (net.PacketConn, error) {
+ switch network {
+ case "udp":
+ if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil {
+ return c, nil
+ }
+ return net.ListenPacket("udp6", "[::1]:0")
+ case "udp4":
+ return net.ListenPacket("udp4", "127.0.0.1:0")
+ case "udp6":
+ return net.ListenPacket("udp6", "[::1]:0")
+ case "unixgram":
+ return net.ListenPacket(network, localPath())
+ }
+ return nil, fmt.Errorf("%s is not supported", network)
+}
+
+func TestSocketOf(t *testing.T) {
+ for _, network := range []string{"tcp", "unix", "unixpacket"} {
+ switch runtime.GOOS {
+ case "darwin":
+ if network == "unixpacket" {
+ continue
+ }
+ case "nacl", "plan9":
+ continue
+ case "windows":
+ if network == "unix" || network == "unixpacket" {
+ continue
+ }
+ }
+ ln, err := newLocalListener(network)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ defer func() {
+ path := ln.Addr().String()
+ ln.Close()
+ if network == "unix" || network == "unixpacket" {
+ os.Remove(path)
+ }
+ }()
+ c, err := net.Dial(ln.Addr().Network(), ln.Addr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ defer c.Close()
+ if _, err := netreflect.SocketOf(c); err != nil {
+ t.Error(err)
+ continue
+ }
+ }
+}
+
+func TestPacketSocketOf(t *testing.T) {
+ for _, network := range []string{"udp", "unixgram"} {
+ switch runtime.GOOS {
+ case "nacl", "plan9":
+ continue
+ case "windows":
+ if network == "unixgram" {
+ continue
+ }
+ }
+ c, err := newLocalPacketListener(network)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ defer c.Close()
+ if _, err := netreflect.PacketSocketOf(c); err != nil {
+ t.Error(err)
+ continue
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go
new file mode 100644
index 000000000..b2308a0e8
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package nettest
+
+import (
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func supportsIPv6MulticastDeliveryOnLoopback() bool {
+ switch runtime.GOOS {
+ case "freebsd":
+ // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065.
+ // Even after the fix, it looks like the latest
+ // kernels don't deliver link-local scoped multicast
+ // packets correctly.
+ return false
+ case "darwin":
+ // See http://support.apple.com/kb/HT1633.
+ s, err := syscall.Sysctl("kern.osrelease")
+ if err != nil {
+ return false
+ }
+ ss := strings.Split(s, ".")
+ if len(ss) == 0 {
+ return false
+ }
+ // OS X 10.9 (Darwin 13) or above seems to do the
+ // right thing; preserving the packet header as it's
+ // needed for the checksum calcuration with pseudo
+ // header on loopback multicast delivery process.
+ // If not, you'll probably see what is the slow-acting
+ // kernel crash caused by lazy mbuf corruption.
+ // See ip6_mloopback in netinet6/ip6_output.c.
+ if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 13 {
+ return false
+ }
+ return true
+ default:
+ return true
+ }
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go
new file mode 100644
index 000000000..a42b80709
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux solaris
+
+package nettest
+
+func supportsIPv6MulticastDeliveryOnLoopback() bool {
+ return true
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go
new file mode 100644
index 000000000..963ed9965
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_posix.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package nettest
+
+import (
+ "os"
+ "syscall"
+)
+
+func protocolNotSupported(err error) bool {
+ switch err := err.(type) {
+ case syscall.Errno:
+ switch err {
+ case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT:
+ return true
+ }
+ case *os.SyscallError:
+ switch err := err.Err.(type) {
+ case syscall.Errno:
+ switch err {
+ case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT:
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go
new file mode 100644
index 000000000..22d493585
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_stub.go
@@ -0,0 +1,28 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package nettest
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func maxOpenFiles() int {
+ return defaultMaxOpenFiles
+}
+
+func supportsRawIPSocket() (string, bool) {
+ return fmt.Sprintf("not supported on %s", runtime.GOOS), false
+}
+
+func supportsIPv6MulticastDeliveryOnLoopback() bool {
+ return false
+}
+
+func protocolNotSupported(err error) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go
new file mode 100644
index 000000000..ed13e448b
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package nettest
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "syscall"
+)
+
+func maxOpenFiles() int {
+ var rlim syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {
+ return defaultMaxOpenFiles
+ }
+ return int(rlim.Cur)
+}
+
+func supportsRawIPSocket() (string, bool) {
+ if os.Getuid() != 0 {
+ return fmt.Sprintf("must be root on %s", runtime.GOOS), false
+ }
+ return "", true
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go
new file mode 100644
index 000000000..b0a6a30c4
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/helper_windows.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nettest
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+func maxOpenFiles() int {
+ return 4 * defaultMaxOpenFiles /* actually it's 16581375 */
+}
+
+func supportsRawIPSocket() (string, bool) {
+ // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx:
+ // Note: To use a socket of type SOCK_RAW requires administrative privileges.
+ // Users running Winsock applications that use raw sockets must be a member of
+ // the Administrators group on the local computer, otherwise raw socket calls
+ // will fail with an error code of WSAEACCES. On Windows Vista and later, access
+ // for raw sockets is enforced at socket creation. In earlier versions of Windows,
+ // access for raw sockets is enforced during other socket operations.
+ s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0)
+ if err == syscall.WSAEACCES {
+ return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false
+ }
+ if err != nil {
+ return err.Error(), false
+ }
+ syscall.Closesocket(s)
+ return "", true
+}
+
+func supportsIPv6MulticastDeliveryOnLoopback() bool {
+ return true
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go
new file mode 100644
index 000000000..53ae13a98
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/interface.go
@@ -0,0 +1,94 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nettest
+
+import "net"
+
+// IsMulticastCapable reports whether ifi is an IP multicast-capable
+// network interface. Network must be "ip", "ip4" or "ip6".
+func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) {
+ switch network {
+ case "ip", "ip4", "ip6":
+ default:
+ return nil, false
+ }
+ if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 {
+ return nil, false
+ }
+ return hasRoutableIP(network, ifi)
+}
+
+// RoutedInterface returns a network interface that can route IP
+// traffic and satisfies flags. It returns nil when an appropriate
+// network interface is not found. Network must be "ip", "ip4" or
+// "ip6".
+func RoutedInterface(network string, flags net.Flags) *net.Interface {
+ switch network {
+ case "ip", "ip4", "ip6":
+ default:
+ return nil
+ }
+ ift, err := net.Interfaces()
+ if err != nil {
+ return nil
+ }
+ for _, ifi := range ift {
+ if ifi.Flags&flags != flags {
+ continue
+ }
+ if _, ok := hasRoutableIP(network, &ifi); !ok {
+ continue
+ }
+ return &ifi
+ }
+ return nil
+}
+
+func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) {
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return nil, false
+ }
+ for _, ifa := range ifat {
+ switch ifa := ifa.(type) {
+ case *net.IPAddr:
+ if ip := routableIP(network, ifa.IP); ip != nil {
+ return ip, true
+ }
+ case *net.IPNet:
+ if ip := routableIP(network, ifa.IP); ip != nil {
+ return ip, true
+ }
+ }
+ }
+ return nil, false
+}
+
+func routableIP(network string, ip net.IP) net.IP {
+ if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() {
+ return nil
+ }
+ switch network {
+ case "ip4":
+ if ip := ip.To4(); ip != nil {
+ return ip
+ }
+ case "ip6":
+ if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation
+ return nil
+ }
+ if ip := ip.To16(); ip != nil && ip.To4() == nil {
+ return ip
+ }
+ default:
+ if ip := ip.To4(); ip != nil {
+ return ip
+ }
+ if ip := ip.To16(); ip != nil {
+ return ip
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go
new file mode 100644
index 000000000..bb34aec0b
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nettest
+
+const defaultMaxOpenFiles = 256
+
+// MaxOpenFiles returns the maximum number of open files for the
+// caller's process.
+func MaxOpenFiles() int { return maxOpenFiles() }
diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go
new file mode 100644
index 000000000..86de2773d
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/nettest/stack.go
@@ -0,0 +1,49 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nettest provides utilities for IP testing.
+package nettest // import "golang.org/x/net/internal/nettest"
+
+import "net"
+
+// SupportsIPv4 reports whether the platform supports IPv4 networking
+// functionality.
+func SupportsIPv4() bool {
+ ln, err := net.Listen("tcp4", "127.0.0.1:0")
+ if err != nil {
+ return false
+ }
+ ln.Close()
+ return true
+}
+
+// SupportsIPv6 reports whether the platform supports IPv6 networking
+// functionality.
+func SupportsIPv6() bool {
+ ln, err := net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ return false
+ }
+ ln.Close()
+ return true
+}
+
+// SupportsRawIPSocket reports whether the platform supports raw IP
+// sockets.
+func SupportsRawIPSocket() (string, bool) {
+ return supportsRawIPSocket()
+}
+
+// SupportsIPv6MulticastDeliveryOnLoopback reports whether the
+// platform supports IPv6 multicast packet delivery on software
+// loopback interface.
+func SupportsIPv6MulticastDeliveryOnLoopback() bool {
+ return supportsIPv6MulticastDeliveryOnLoopback()
+}
+
+// ProtocolNotSupported reports whether err is a protocol not
+// supported error.
+func ProtocolNotSupported(err error) bool {
+ return protocolNotSupported(err)
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 000000000..1119f3448
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := dstStart.Sub(srcStart) / srcInterval
+ srcIndex += int(advance)
+ srcStart = srcStart.Add(advance * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go
new file mode 100644
index 000000000..66325a912
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go
@@ -0,0 +1,170 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package timeseries
+
+import (
+ "math"
+ "testing"
+ "time"
+)
+
+func isNear(x *Float, y float64, tolerance float64) bool {
+ return math.Abs(x.Value()-y) < tolerance
+}
+
+func isApproximate(x *Float, y float64) bool {
+ return isNear(x, y, 1e-2)
+}
+
+func checkApproximate(t *testing.T, o Observable, y float64) {
+ x := o.(*Float)
+ if !isApproximate(x, y) {
+ t.Errorf("Wanted %g, got %g", y, x.Value())
+ }
+}
+
+func checkNear(t *testing.T, o Observable, y, tolerance float64) {
+ x := o.(*Float)
+ if !isNear(x, y, tolerance) {
+ t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value())
+ }
+}
+
+var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC)
+
+func tu(s int64) time.Time {
+ return baseTime.Add(time.Duration(s) * time.Second)
+}
+
+func tu2(s int64, ns int64) time.Time {
+ return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond)
+}
+
+func TestBasicTimeSeries(t *testing.T) {
+ ts := NewTimeSeries(NewFloat)
+ fo := new(Float)
+ *fo = Float(10)
+ ts.AddWithTime(fo, tu(1))
+ ts.AddWithTime(fo, tu(1))
+ ts.AddWithTime(fo, tu(1))
+ ts.AddWithTime(fo, tu(1))
+ checkApproximate(t, ts.Range(tu(0), tu(1)), 40)
+ checkApproximate(t, ts.Total(), 40)
+ ts.AddWithTime(fo, tu(3))
+ ts.AddWithTime(fo, tu(3))
+ ts.AddWithTime(fo, tu(3))
+ checkApproximate(t, ts.Range(tu(0), tu(2)), 40)
+ checkApproximate(t, ts.Range(tu(2), tu(4)), 30)
+ checkApproximate(t, ts.Total(), 70)
+ ts.AddWithTime(fo, tu(1))
+ ts.AddWithTime(fo, tu(1))
+ checkApproximate(t, ts.Range(tu(0), tu(2)), 60)
+ checkApproximate(t, ts.Range(tu(2), tu(4)), 30)
+ checkApproximate(t, ts.Total(), 90)
+ *fo = Float(100)
+ ts.AddWithTime(fo, tu(100))
+ checkApproximate(t, ts.Range(tu(99), tu(100)), 100)
+ checkApproximate(t, ts.Range(tu(0), tu(4)), 36)
+ checkApproximate(t, ts.Total(), 190)
+ *fo = Float(10)
+ ts.AddWithTime(fo, tu(1))
+ ts.AddWithTime(fo, tu(1))
+ checkApproximate(t, ts.Range(tu(0), tu(4)), 44)
+ checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100)
+ checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100)
+ checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100)
+ checkApproximate(t, ts.Total(), 210)
+
+ for i, l := range ts.ComputeRange(tu(36), tu(100), 64) {
+ if i == 63 {
+ checkApproximate(t, l, 100)
+ } else {
+ checkApproximate(t, l, 0)
+ }
+ }
+
+ checkApproximate(t, ts.Range(tu(0), tu(100)), 210)
+ checkApproximate(t, ts.Range(tu(10), tu(100)), 100)
+
+ for i, l := range ts.ComputeRange(tu(0), tu(100), 100) {
+ if i < 10 {
+ checkApproximate(t, l, 11)
+ } else if i >= 90 {
+ checkApproximate(t, l, 10)
+ } else {
+ checkApproximate(t, l, 0)
+ }
+ }
+}
+
+func TestFloat(t *testing.T) {
+ f := Float(1)
+ if g, w := f.String(), "1"; g != w {
+ t.Errorf("Float(1).String = %q; want %q", g, w)
+ }
+ f2 := Float(2)
+ var o Observable = &f2
+ f.Add(o)
+ if g, w := f.Value(), 3.0; g != w {
+ t.Errorf("Float post-add = %v; want %v", g, w)
+ }
+ f.Multiply(2)
+ if g, w := f.Value(), 6.0; g != w {
+ t.Errorf("Float post-multiply = %v; want %v", g, w)
+ }
+ f.Clear()
+ if g, w := f.Value(), 0.0; g != w {
+ t.Errorf("Float post-clear = %v; want %v", g, w)
+ }
+ f.CopyFrom(&f2)
+ if g, w := f.Value(), 2.0; g != w {
+ t.Errorf("Float post-CopyFrom = %v; want %v", g, w)
+ }
+}
+
+type mockClock struct {
+ time time.Time
+}
+
+func (m *mockClock) Time() time.Time { return m.time }
+func (m *mockClock) Set(t time.Time) { m.time = t }
+
+const buckets = 6
+
+var testResolutions = []time.Duration{
+ 10 * time.Second, // level holds one minute of observations
+ 100 * time.Second, // level holds ten minutes of observations
+ 10 * time.Minute, // level holds one hour of observations
+}
+
+// TestTimeSeries uses a small number of buckets to force a higher
+// error rate on approximations from the timeseries.
+type TestTimeSeries struct {
+ timeSeries
+}
+
+func TestExpectedErrorRate(t *testing.T) {
+ ts := new(TestTimeSeries)
+ fake := new(mockClock)
+ fake.Set(time.Now())
+ ts.timeSeries.init(testResolutions, NewFloat, buckets, fake)
+ for i := 1; i <= 61*61; i++ {
+ fake.Set(fake.Time().Add(1 * time.Second))
+ ob := Float(1)
+ ts.AddWithTime(&ob, fake.Time())
+
+ // The results should be accurate within one missing bucket (1/6) of the observations recorded.
+ checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10)
+ checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100)
+ checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600)
+ }
+}
+
+func min(a, b float64) float64 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go
new file mode 100644
index 000000000..b44da9054
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/bpf_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv4"
+)
+
+func TestBPF(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ l, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ p := ipv4.NewPacketConn(l)
+
+ // This filter accepts UDP packets whose first payload byte is
+ // even.
+ prog, err := bpf.Assemble([]bpf.Instruction{
+ // Load the first byte of the payload (skipping UDP header).
+ bpf.LoadAbsolute{Off: 8, Size: 1},
+ // Select LSB of the byte.
+ bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1},
+ // Byte is even?
+ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1},
+ // Accept.
+ bpf.RetConstant{Val: 4096},
+ // Ignore.
+ bpf.RetConstant{Val: 0},
+ })
+ if err != nil {
+ t.Fatalf("compiling BPF: %s", err)
+ }
+
+ if err = p.SetBPF(prog); err != nil {
+ t.Fatalf("attaching filter to Conn: %s", err)
+ }
+
+ s, err := net.Dial("udp4", l.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+ go func() {
+ for i := byte(0); i < 10; i++ {
+ s.Write([]byte{i})
+ }
+ }()
+
+ l.SetDeadline(time.Now().Add(2 * time.Second))
+ seen := make([]bool, 5)
+ for {
+ var b [512]byte
+ n, _, err := l.ReadFrom(b[:])
+ if err != nil {
+ t.Fatalf("reading from listener: %s", err)
+ }
+ if n != 1 {
+ t.Fatalf("unexpected packet length, want 1, got %d", n)
+ }
+ if b[0] >= 10 {
+ t.Fatalf("unexpected byte, want 0-9, got %d", b[0])
+ }
+ if b[0]%2 != 0 {
+ t.Fatalf("got odd byte %d, wanted only even bytes", b[0])
+ }
+ seen[b[0]/2] = true
+
+ seenAll := true
+ for _, v := range seen {
+ if !v {
+ seenAll = false
+ break
+ }
+ }
+ if seenAll {
+ break
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/bpfopt_linux.go b/vendor/golang.org/x/net/ipv4/bpfopt_linux.go
new file mode 100644
index 000000000..e9bbda96e
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/bpfopt_linux.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/internal/netreflect"
+)
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ prog := sysSockFProg{
+ Len: uint16(len(filter)),
+ Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])),
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog))))
+}
diff --git a/vendor/golang.org/x/net/ipv4/bpfopt_stub.go b/vendor/golang.org/x/net/ipv4/bpfopt_stub.go
new file mode 100644
index 000000000..c4a8481f0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/bpfopt_stub.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv4
+
+import "golang.org/x/net/bpf"
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go
new file mode 100644
index 000000000..8cadfd7f3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control.go
@@ -0,0 +1,70 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "fmt"
+ "net"
+ "sync"
+)
+
+type rawOpt struct {
+ sync.RWMutex
+ cflags ControlFlags
+}
+
+func (c *rawOpt) set(f ControlFlags) { c.cflags |= f }
+func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f }
+func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }
+
+type ControlFlags uint
+
+const (
+ FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet
+ FlagSrc // pass the source address on the received packet
+ FlagDst // pass the destination address on the received packet
+ FlagInterface // pass the interface index on the received packet
+)
+
+// A ControlMessage represents per packet basis IP-level socket options.
+type ControlMessage struct {
+ // Receiving socket options: SetControlMessage allows to
+ // receive the options from the protocol stack using ReadFrom
+ // method of PacketConn or RawConn.
+ //
+ // Specifying socket options: ControlMessage for WriteTo
+ // method of PacketConn or RawConn allows to send the options
+ // to the protocol stack.
+ //
+ TTL int // time-to-live, receiving only
+ Src net.IP // source address, specifying only
+ Dst net.IP // destination address, receiving only
+ IfIndex int // interface index, must be 1 <= value when specifying
+}
+
+func (cm *ControlMessage) String() string {
+ if cm == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex)
+}
+
+// Ancillary data socket options
+const (
+ ctlTTL = iota // header field
+ ctlSrc // header field
+ ctlDst // header field
+ ctlInterface // inbound or outbound interface
+ ctlPacketInfo // inbound or outbound packet path
+ ctlMax
+)
+
+// A ctlOpt represents a binding for ancillary data socket option.
+type ctlOpt struct {
+ name int // option name, must be equal or greater than 1
+ length int // option length
+ marshal func([]byte, *ControlMessage) []byte
+ parse func(*ControlMessage, []byte)
+}
diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go
new file mode 100644
index 000000000..33d8bc8b3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control_bsd.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func marshalDst(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIP
+ m.Type = sysIP_RECVDSTADDR
+ m.SetLen(syscall.CmsgLen(net.IPv4len))
+ return b[syscall.CmsgSpace(net.IPv4len):]
+}
+
+func parseDst(cm *ControlMessage, b []byte) {
+ cm.Dst = b[:net.IPv4len]
+}
+
+func marshalInterface(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIP
+ m.Type = sysIP_RECVIF
+ m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink))
+ return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):]
+}
+
+func parseInterface(cm *ControlMessage, b []byte) {
+ sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0]))
+ cm.IfIndex = int(sadl.Index)
+}
diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go
new file mode 100644
index 000000000..444782f39
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin linux
+
+package ipv4
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func marshalPacketInfo(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIP
+ m.Type = sysIP_PKTINFO
+ m.SetLen(syscall.CmsgLen(sysSizeofInetPktinfo))
+ if cm != nil {
+ pi := (*sysInetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+ if ip := cm.Src.To4(); ip != nil {
+ copy(pi.Spec_dst[:], ip)
+ }
+ if cm.IfIndex > 0 {
+ pi.setIfindex(cm.IfIndex)
+ }
+ }
+ return b[syscall.CmsgSpace(sysSizeofInetPktinfo):]
+}
+
+func parsePacketInfo(cm *ControlMessage, b []byte) {
+ pi := (*sysInetPktinfo)(unsafe.Pointer(&b[0]))
+ cm.IfIndex = int(pi.Ifindex)
+ cm.Dst = pi.Addr[:]
+}
diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go
new file mode 100644
index 000000000..5f5a1bd0d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv4
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ return errOpNoSupport
+}
+
+func newControlMessage(opt *rawOpt) []byte {
+ return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ return nil, errOpNoSupport
+}
+
+func marshalControlMessage(cm *ControlMessage) []byte {
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go
new file mode 100644
index 000000000..6b6682d65
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control_unix.go
@@ -0,0 +1,164 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package ipv4
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ opt.Lock()
+ defer opt.Unlock()
+ if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagTTL)
+ } else {
+ opt.clear(FlagTTL)
+ }
+ }
+ if sockOpts[ssoPacketInfo].name > 0 {
+ if cf&(FlagSrc|FlagDst|FlagInterface) != 0 {
+ if err := setInt(s, &sockOpts[ssoPacketInfo], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(cf & (FlagSrc | FlagDst | FlagInterface))
+ } else {
+ opt.clear(cf & (FlagSrc | FlagDst | FlagInterface))
+ }
+ }
+ } else {
+ if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceiveDst], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagDst)
+ } else {
+ opt.clear(FlagDst)
+ }
+ }
+ if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagInterface)
+ } else {
+ opt.clear(FlagInterface)
+ }
+ }
+ }
+ return nil
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+ opt.RLock()
+ var l int
+ if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlTTL].length)
+ }
+ if ctlOpts[ctlPacketInfo].name > 0 {
+ if opt.isset(FlagSrc | FlagDst | FlagInterface) {
+ l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+ }
+ } else {
+ if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlDst].length)
+ }
+ if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlInterface].length)
+ }
+ }
+ if l > 0 {
+ oob = make([]byte, l)
+ b := oob
+ if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 {
+ b = ctlOpts[ctlTTL].marshal(b, nil)
+ }
+ if ctlOpts[ctlPacketInfo].name > 0 {
+ if opt.isset(FlagSrc | FlagDst | FlagInterface) {
+ b = ctlOpts[ctlPacketInfo].marshal(b, nil)
+ }
+ } else {
+ if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 {
+ b = ctlOpts[ctlDst].marshal(b, nil)
+ }
+ if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 {
+ b = ctlOpts[ctlInterface].marshal(b, nil)
+ }
+ }
+ }
+ opt.RUnlock()
+ return
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ if len(b) == 0 {
+ return nil, nil
+ }
+ cmsgs, err := syscall.ParseSocketControlMessage(b)
+ if err != nil {
+ return nil, os.NewSyscallError("parse socket control message", err)
+ }
+ cm := &ControlMessage{}
+ for _, m := range cmsgs {
+ if m.Header.Level != iana.ProtocolIP {
+ continue
+ }
+ switch int(m.Header.Type) {
+ case ctlOpts[ctlTTL].name:
+ ctlOpts[ctlTTL].parse(cm, m.Data[:])
+ case ctlOpts[ctlDst].name:
+ ctlOpts[ctlDst].parse(cm, m.Data[:])
+ case ctlOpts[ctlInterface].name:
+ ctlOpts[ctlInterface].parse(cm, m.Data[:])
+ case ctlOpts[ctlPacketInfo].name:
+ ctlOpts[ctlPacketInfo].parse(cm, m.Data[:])
+ }
+ }
+ return cm, nil
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+ if cm == nil {
+ return nil
+ }
+ var l int
+ pktinfo := false
+ if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) {
+ pktinfo = true
+ l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+ }
+ if l > 0 {
+ oob = make([]byte, l)
+ b := oob
+ if pktinfo {
+ b = ctlOpts[ctlPacketInfo].marshal(b, cm)
+ }
+ }
+ return
+}
+
+func marshalTTL(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIP
+ m.Type = sysIP_RECVTTL
+ m.SetLen(syscall.CmsgLen(1))
+ return b[syscall.CmsgSpace(1):]
+}
+
+func parseTTL(cm *ControlMessage, b []byte) {
+ cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0])))
+}
diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go
new file mode 100644
index 000000000..49a113b58
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/control_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "syscall"
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ // TODO(mikio): implement this
+ return syscall.EWINDOWS
+}
+
+func newControlMessage(opt *rawOpt) []byte {
+ // TODO(mikio): implement this
+ return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ // TODO(mikio): implement this
+ return nil, syscall.EWINDOWS
+}
+
+func marshalControlMessage(cm *ControlMessage) []byte {
+ // TODO(mikio): implement this
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go
new file mode 100644
index 000000000..731d56a71
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go
@@ -0,0 +1,77 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_STRIPHDR = C.IP_STRIPHDR
+ sysIP_RECVTTL = C.IP_RECVTTL
+ sysIP_BOUND_IF = C.IP_BOUND_IF
+ sysIP_PKTINFO = C.IP_PKTINFO
+ sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+ sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
+ sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX
+ sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
+ sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+ sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
+ sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+
+ sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+ sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+ sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+)
+
+type sysSockaddrStorage C.struct_sockaddr_storage
+
+type sysSockaddrInet C.struct_sockaddr_in
+
+type sysInetPktinfo C.struct_in_pktinfo
+
+type sysIPMreq C.struct_ip_mreq
+
+type sysIPMreqn C.struct_ip_mreqn
+
+type sysIPMreqSource C.struct_ip_mreq_source
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go
new file mode 100644
index 000000000..08e3b855d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go
@@ -0,0 +1,38 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_RECVTTL = C.IP_RECVTTL
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type sysIPMreq C.struct_ip_mreq
diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go
new file mode 100644
index 000000000..f12ca327b
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_SENDSRCADDR = C.IP_SENDSRCADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_ONESBCAST = C.IP_ONESBCAST
+ sysIP_BINDANY = C.IP_BINDANY
+ sysIP_RECVTTL = C.IP_RECVTTL
+ sysIP_MINTTL = C.IP_MINTTL
+ sysIP_DONTFRAG = C.IP_DONTFRAG
+ sysIP_RECVTOS = C.IP_RECVTOS
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+ sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
+ sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
+ sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+ sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
+ sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+
+ sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+ sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+ sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+)
+
+type sysSockaddrStorage C.struct_sockaddr_storage
+
+type sysSockaddrInet C.struct_sockaddr_in
+
+type sysIPMreq C.struct_ip_mreq
+
+type sysIPMreqn C.struct_ip_mreqn
+
+type sysIPMreqSource C.struct_ip_mreq_source
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go
new file mode 100644
index 000000000..c4042eb60
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_linux.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <time.h>
+
+#include <linux/errqueue.h>
+#include <linux/icmp.h>
+#include <linux/in.h>
+#include <linux/filter.h>
+#include <sys/socket.h>
+*/
+import "C"
+
+const (
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_PKTINFO = C.IP_PKTINFO
+ sysIP_PKTOPTIONS = C.IP_PKTOPTIONS
+ sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER
+ sysIP_RECVERR = C.IP_RECVERR
+ sysIP_RECVTTL = C.IP_RECVTTL
+ sysIP_RECVTOS = C.IP_RECVTOS
+ sysIP_MTU = C.IP_MTU
+ sysIP_FREEBIND = C.IP_FREEBIND
+ sysIP_TRANSPARENT = C.IP_TRANSPARENT
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR
+ sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR
+ sysIP_MINTTL = C.IP_MINTTL
+ sysIP_NODEFRAG = C.IP_NODEFRAG
+ sysIP_UNICAST_IF = C.IP_UNICAST_IF
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+ sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
+ sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
+ sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
+ sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+ sysIP_MSFILTER = C.IP_MSFILTER
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+ sysMCAST_MSFILTER = C.MCAST_MSFILTER
+ sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL
+
+ //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT
+ //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT
+ //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO
+ //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE
+ //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE
+ //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT
+
+ sysICMP_FILTER = C.ICMP_FILTER
+
+ sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE
+ sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL
+ sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP
+ sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6
+ sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS
+ sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING
+
+ sysSOL_SOCKET = C.SOL_SOCKET
+ sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
+
+ sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
+ sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in
+ sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo
+ sysSizeofSockExtendedErr = C.sizeof_struct_sock_extended_err
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+ sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+ sysSizeofICMPFilter = C.sizeof_struct_icmp_filter
+)
+
+type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage
+
+type sysSockaddrInet C.struct_sockaddr_in
+
+type sysInetPktinfo C.struct_in_pktinfo
+
+type sysSockExtendedErr C.struct_sock_extended_err
+
+type sysIPMreq C.struct_ip_mreq
+
+type sysIPMreqn C.struct_ip_mreqn
+
+type sysIPMreqSource C.struct_ip_mreq_source
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
+
+type sysICMPFilter C.struct_icmp_filter
+
+type sysSockFProg C.struct_sock_fprog
+
+type sysSockFilter C.struct_sock_filter
diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go
new file mode 100644
index 000000000..8642354f4
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_RECVTTL = C.IP_RECVTTL
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type sysIPMreq C.struct_ip_mreq
diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go
new file mode 100644
index 000000000..8642354f4
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_RECVTTL = C.IP_RECVTTL
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type sysIPMreq C.struct_ip_mreq
diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go
new file mode 100644
index 000000000..bb74afa49
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go
@@ -0,0 +1,57 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+ sysIP_OPTIONS = C.IP_OPTIONS
+ sysIP_HDRINCL = C.IP_HDRINCL
+ sysIP_TOS = C.IP_TOS
+ sysIP_TTL = C.IP_TTL
+ sysIP_RECVOPTS = C.IP_RECVOPTS
+ sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+ sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+ sysIP_RETOPTS = C.IP_RETOPTS
+ sysIP_RECVIF = C.IP_RECVIF
+ sysIP_RECVSLLA = C.IP_RECVSLLA
+ sysIP_RECVTTL = C.IP_RECVTTL
+ sysIP_NEXTHOP = C.IP_NEXTHOP
+ sysIP_PKTINFO = C.IP_PKTINFO
+ sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
+ sysIP_DONTFRAG = C.IP_DONTFRAG
+ sysIP_BOUND_IF = C.IP_BOUND_IF
+ sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC
+ sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL
+ sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF
+
+ sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
+ sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
+ sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
+ sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
+ sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+ sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
+ sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
+ sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
+ sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+
+ sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo
+
+ sysSizeofIPMreq = C.sizeof_struct_ip_mreq
+ sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
+)
+
+type sysInetPktinfo C.struct_in_pktinfo
+
+type sysIPMreq C.struct_ip_mreq
+
+type sysIPMreqSource C.struct_ip_mreq_source
diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go
new file mode 100644
index 000000000..40b5e1cdc
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go
@@ -0,0 +1,253 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// MulticastTTL returns the time-to-live field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastTTL() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoMulticastTTL])
+}
+
+// SetMulticastTTL sets the time-to-live field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastTTL(ttl int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoMulticastTTL], ttl)
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return nil, err
+ }
+ return getInterface(s, &sockOpts[ssoMulticastInterface])
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInterface(s, &sockOpts[ssoMulticastInterface], ifi)
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+ if !c.ok() {
+ return false, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return false, err
+ }
+ on, err := getInt(s, &sockOpts[ssoMulticastLoopback])
+ if err != nil {
+ return false, err
+ }
+ return on == 1, nil
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoMulticastLoopback], boolint(on))
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ return setGroup(s, &sockOpts[ssoJoinGroup], ifi, grp)
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ return setGroup(s, &sockOpts[ssoLeaveGroup], ifi, grp)
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP4(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoJoinSourceGroup], ifi, grp, src)
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP4(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src)
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP4(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoBlockSourceGroup], ifi, grp, src)
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP4(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP4(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src)
+}
+
+// ICMPFilter returns an ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return nil, err
+ }
+ return getICMPFilter(s, &sockOpts[ssoICMPFilter])
+}
+
+// SetICMPFilter deploys the ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setICMPFilter(s, &sockOpts[ssoICMPFilter], f)
+}
diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go
new file mode 100644
index 000000000..b74df6931
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go
@@ -0,0 +1,106 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv4
+
+import "net"
+
+// MulticastTTL returns the time-to-live field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastTTL() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetMulticastTTL sets the time-to-live field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastTTL(ttl int) error {
+ return errOpNoSupport
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+ return nil, errOpNoSupport
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+ return errOpNoSupport
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+ return false, errOpNoSupport
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+ return errOpNoSupport
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+ return errOpNoSupport
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+ return errOpNoSupport
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// ICMPFilter returns an ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+ return nil, errOpNoSupport
+}
+
+// SetICMPFilter deploys the ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go
new file mode 100644
index 000000000..d58a4c9e6
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/doc.go
@@ -0,0 +1,242 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ipv4 implements IP-level socket options for the Internet
+// Protocol version 4.
+//
+// The package provides IP-level socket options that allow
+// manipulation of IPv4 facilities.
+//
+// The IPv4 protocol and basic host requirements for IPv4 are defined
+// in RFC 791 and RFC 1122.
+// Host extensions for multicasting and socket interface extensions
+// for multicast source filters are defined in RFC 1112 and RFC 3678.
+// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC
+// 3376.
+// Source-specific multicast is defined in RFC 4607.
+//
+//
+// Unicasting
+//
+// The options for unicasting are available for net.TCPConn,
+// net.UDPConn and net.IPConn which are created as network connections
+// that use the IPv4 transport. When a single TCP connection carrying
+// a data flow of multiple packets needs to indicate the flow is
+// important, ipv4.Conn is used to set the type-of-service field on
+// the IPv4 header for each packet.
+//
+// ln, err := net.Listen("tcp4", "0.0.0.0:1024")
+// if err != nil {
+// // error handling
+// }
+// defer ln.Close()
+// for {
+// c, err := ln.Accept()
+// if err != nil {
+// // error handling
+// }
+// go func(c net.Conn) {
+// defer c.Close()
+//
+// The outgoing packets will be labeled DiffServ assured forwarding
+// class 1 low drop precedence, known as AF11 packets.
+//
+// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil {
+// // error handling
+// }
+// if _, err := c.Write(data); err != nil {
+// // error handling
+// }
+// }(c)
+// }
+//
+//
+// Multicasting
+//
+// The options for multicasting are available for net.UDPConn and
+// net.IPconn which are created as network connections that use the
+// IPv4 transport. A few network facilities must be prepared before
+// you begin multicasting, at a minimum joining network interfaces and
+// multicast groups.
+//
+// en0, err := net.InterfaceByName("en0")
+// if err != nil {
+// // error handling
+// }
+// en1, err := net.InterfaceByIndex(911)
+// if err != nil {
+// // error handling
+// }
+// group := net.IPv4(224, 0, 0, 250)
+//
+// First, an application listens to an appropriate address with an
+// appropriate service port.
+//
+// c, err := net.ListenPacket("udp4", "0.0.0.0:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c.Close()
+//
+// Second, the application joins multicast groups, starts listening to
+// the groups on the specified network interfaces. Note that the
+// service port for transport layer protocol does not matter with this
+// operation as joining groups affects only network and link layer
+// protocols, such as IPv4 and Ethernet.
+//
+// p := ipv4.NewPacketConn(c)
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {
+// // error handling
+// }
+//
+// The application might set per packet control message transmissions
+// between the protocol stack within the kernel. When the application
+// needs a destination address on an incoming packet,
+// SetControlMessage of ipv4.PacketConn is used to enable control
+// message transmissions.
+//
+// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil {
+// // error handling
+// }
+//
+// The application could identify whether the received packets are
+// of interest by using the control message that contains the
+// destination address of the received packet.
+//
+// b := make([]byte, 1500)
+// for {
+// n, cm, src, err := p.ReadFrom(b)
+// if err != nil {
+// // error handling
+// }
+// if cm.Dst.IsMulticast() {
+// if cm.Dst.Equal(group) {
+// // joined group, do something
+// } else {
+// // unknown group, discard
+// continue
+// }
+// }
+//
+// The application can also send both unicast and multicast packets.
+//
+// p.SetTOS(0x0)
+// p.SetTTL(16)
+// if _, err := p.WriteTo(data, nil, src); err != nil {
+// // error handling
+// }
+// dst := &net.UDPAddr{IP: group, Port: 1024}
+// for _, ifi := range []*net.Interface{en0, en1} {
+// if err := p.SetMulticastInterface(ifi); err != nil {
+// // error handling
+// }
+// p.SetMulticastTTL(2)
+// if _, err := p.WriteTo(data, nil, dst); err != nil {
+// // error handling
+// }
+// }
+// }
+//
+//
+// More multicasting
+//
+// An application that uses PacketConn or RawConn may join multiple
+// multicast groups. For example, a UDP listener with port 1024 might
+// join two different groups across over two different network
+// interfaces by using:
+//
+// c, err := net.ListenPacket("udp4", "0.0.0.0:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c.Close()
+// p := ipv4.NewPacketConn(c)
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {
+// // error handling
+// }
+//
+// It is possible for multiple UDP listeners that listen on the same
+// UDP port to join the same multicast group. The net package will
+// provide a socket that listens to a wildcard address with reusable
+// UDP port when an appropriate multicast address prefix is passed to
+// the net.ListenPacket or net.ListenUDP.
+//
+// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c1.Close()
+// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c2.Close()
+// p1 := ipv4.NewPacketConn(c1)
+// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+// // error handling
+// }
+// p2 := ipv4.NewPacketConn(c2)
+// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+// // error handling
+// }
+//
+// Also it is possible for the application to leave or rejoin a
+// multicast group on the network interface.
+//
+// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil {
+// // error handling
+// }
+//
+//
+// Source-specific multicasting
+//
+// An application that uses PacketConn or RawConn on IGMPv3 supported
+// platform is able to join source-specific multicast groups.
+// The application may use JoinSourceSpecificGroup and
+// LeaveSourceSpecificGroup for the operation known as "include" mode,
+//
+// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)}
+// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)})
+// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+// // error handling
+// }
+// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+// // error handling
+// }
+//
+// or JoinGroup, ExcludeSourceSpecificGroup,
+// IncludeSourceSpecificGroup and LeaveGroup for the operation known
+// as "exclude" mode.
+//
+// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)}
+// if err := p.JoinGroup(en0, &ssmgroup); err != nil {
+// // error handling
+// }
+// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {
+// // error handling
+// }
+// if err := p.LeaveGroup(en0, &ssmgroup); err != nil {
+// // error handling
+// }
+//
+// Note that it depends on each platform implementation what happens
+// when an application which runs on IGMPv3 unsupported platform uses
+// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.
+// In general the platform tries to fall back to conversations using
+// IGMPv1 or IGMPv2 and starts to listen to multicast traffic.
+// In the fallback case, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup may return an error.
+package ipv4 // import "golang.org/x/net/ipv4"
diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go
new file mode 100644
index 000000000..a8ca2ff67
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/endpoint.go
@@ -0,0 +1,189 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// A Conn represents a network endpoint that uses the IPv4 transport.
+// It is used to control basic IP-level socket options such as TOS and
+// TTL.
+type Conn struct {
+ genericOpt
+}
+
+type genericOpt struct {
+ net.Conn
+}
+
+func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }
+
+// NewConn returns a new Conn.
+func NewConn(c net.Conn) *Conn {
+ return &Conn{
+ genericOpt: genericOpt{Conn: c},
+ }
+}
+
+// A PacketConn represents a packet network endpoint that uses the
+// IPv4 transport. It is used to control several IP-level socket
+// options including multicasting. It also provides datagram based
+// network I/O methods specific to the IPv4 and higher layer protocols
+// such as UDP.
+type PacketConn struct {
+ genericOpt
+ dgramOpt
+ payloadHandler
+}
+
+type dgramOpt struct {
+ net.PacketConn
+}
+
+func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil }
+
+// SetControlMessage sets the per packet IP-level socket options.
+func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setControlMessage(s, &c.payloadHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.PacketConn.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.PacketConn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.PacketConn.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.PacketConn.Close()
+}
+
+// NewPacketConn returns a new PacketConn using c as its underlying
+// transport.
+func NewPacketConn(c net.PacketConn) *PacketConn {
+ p := &PacketConn{
+ genericOpt: genericOpt{Conn: c.(net.Conn)},
+ dgramOpt: dgramOpt{PacketConn: c},
+ payloadHandler: payloadHandler{PacketConn: c},
+ }
+ if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 {
+ if s, err := netreflect.PacketSocketOf(c); err == nil {
+ setInt(s, &sockOpts[ssoStripHeader], boolint(true))
+ }
+ }
+ return p
+}
+
+// A RawConn represents a packet network endpoint that uses the IPv4
+// transport. It is used to control several IP-level socket options
+// including IPv4 header manipulation. It also provides datagram
+// based network I/O methods specific to the IPv4 and higher layer
+// protocols that handle IPv4 datagram directly such as OSPF, GRE.
+type RawConn struct {
+ genericOpt
+ dgramOpt
+ packetHandler
+}
+
+// SetControlMessage sets the per packet IP-level socket options.
+func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error {
+ if !c.packetHandler.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setControlMessage(s, &c.packetHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *RawConn) SetDeadline(t time.Time) error {
+ if !c.packetHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.packetHandler.c.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *RawConn) SetReadDeadline(t time.Time) error {
+ if !c.packetHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.packetHandler.c.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *RawConn) SetWriteDeadline(t time.Time) error {
+ if !c.packetHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.packetHandler.c.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *RawConn) Close() error {
+ if !c.packetHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.packetHandler.c.Close()
+}
+
+// NewRawConn returns a new RawConn using c as its underlying
+// transport.
+func NewRawConn(c net.PacketConn) (*RawConn, error) {
+ r := &RawConn{
+ genericOpt: genericOpt{Conn: c.(net.Conn)},
+ dgramOpt: dgramOpt{PacketConn: c},
+ packetHandler: packetHandler{c: c.(*net.IPConn)},
+ }
+ s, err := netreflect.PacketSocketOf(c)
+ if err != nil {
+ return nil, err
+ }
+ if err := setInt(s, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go
new file mode 100644
index 000000000..4f5e2f312
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/example_test.go
@@ -0,0 +1,224 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "runtime"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/ipv4"
+)
+
+func ExampleConn_markingTCP() {
+ ln, err := net.Listen("tcp", "0.0.0.0:1024")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer ln.Close()
+
+ for {
+ c, err := ln.Accept()
+ if err != nil {
+ log.Fatal(err)
+ }
+ go func(c net.Conn) {
+ defer c.Close()
+ if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil {
+ p := ipv4.NewConn(c)
+ if err := p.SetTOS(0x28); err != nil { // DSCP AF11
+ log.Fatal(err)
+ }
+ if err := p.SetTTL(128); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil {
+ log.Fatal(err)
+ }
+ }(c)
+ }
+}
+
+func ExamplePacketConn_servingOneShotMulticastDNS() {
+ c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv4.NewPacketConn(c)
+
+ en0, err := net.InterfaceByName("en0")
+ if err != nil {
+ log.Fatal(err)
+ }
+ mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)}
+ if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil {
+ log.Fatal(err)
+ }
+ defer p.LeaveGroup(en0, &mDNSLinkLocal)
+ if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil {
+ log.Fatal(err)
+ }
+
+ b := make([]byte, 1500)
+ for {
+ _, cm, peer, err := p.ReadFrom(b)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) {
+ continue
+ }
+ answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this
+ if _, err := p.WriteTo(answers, nil, peer); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func ExamplePacketConn_tracingIPPacketRoute() {
+ // Tracing an IP packet route to www.google.com.
+
+ const host = "www.google.com"
+ ips, err := net.LookupIP(host)
+ if err != nil {
+ log.Fatal(err)
+ }
+ var dst net.IPAddr
+ for _, ip := range ips {
+ if ip.To4() != nil {
+ dst.IP = ip
+ fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host)
+ break
+ }
+ }
+ if dst.IP == nil {
+ log.Fatal("no A record found")
+ }
+
+ c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv4.NewPacketConn(c)
+
+ if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil {
+ log.Fatal(err)
+ }
+ wm := icmp.Message{
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }
+
+ rb := make([]byte, 1500)
+ for i := 1; i <= 64; i++ { // up to 64 hops
+ wm.Body.(*icmp.Echo).Seq = i
+ wb, err := wm.Marshal(nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := p.SetTTL(i); err != nil {
+ log.Fatal(err)
+ }
+
+ // In the real world usually there are several
+ // multiple traffic-engineered paths for each hop.
+ // You may need to probe a few times to each hop.
+ begin := time.Now()
+ if _, err := p.WriteTo(wb, nil, &dst); err != nil {
+ log.Fatal(err)
+ }
+ if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {
+ log.Fatal(err)
+ }
+ n, cm, peer, err := p.ReadFrom(rb)
+ if err != nil {
+ if err, ok := err.(net.Error); ok && err.Timeout() {
+ fmt.Printf("%v\t*\n", i)
+ continue
+ }
+ log.Fatal(err)
+ }
+ rm, err := icmp.ParseMessage(1, rb[:n])
+ if err != nil {
+ log.Fatal(err)
+ }
+ rtt := time.Since(begin)
+
+ // In the real world you need to determine whether the
+ // received message is yours using ControlMessage.Src,
+ // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq.
+ switch rm.Type {
+ case ipv4.ICMPTypeTimeExceeded:
+ names, _ := net.LookupAddr(peer.String())
+ fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm)
+ case ipv4.ICMPTypeEchoReply:
+ names, _ := net.LookupAddr(peer.String())
+ fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm)
+ return
+ default:
+ log.Printf("unknown ICMP message: %+v\n", rm)
+ }
+ }
+}
+
+func ExampleRawConn_advertisingOSPFHello() {
+ c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ en0, err := net.InterfaceByName("en0")
+ if err != nil {
+ log.Fatal(err)
+ }
+ allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)}
+ if err := r.JoinGroup(en0, &allSPFRouters); err != nil {
+ log.Fatal(err)
+ }
+ defer r.LeaveGroup(en0, &allSPFRouters)
+
+ hello := make([]byte, 24) // fake hello data, you need to implement this
+ ospf := make([]byte, 24) // fake ospf header, you need to implement this
+ ospf[0] = 2 // version 2
+ ospf[1] = 1 // hello packet
+ ospf = append(ospf, hello...)
+ iph := &ipv4.Header{
+ Version: ipv4.Version,
+ Len: ipv4.HeaderLen,
+ TOS: 0xc0, // DSCP CS6
+ TotalLen: ipv4.HeaderLen + len(ospf),
+ TTL: 1,
+ Protocol: 89,
+ Dst: allSPFRouters.IP.To4(),
+ }
+
+ var cm *ipv4.ControlMessage
+ switch runtime.GOOS {
+ case "darwin", "linux":
+ cm = &ipv4.ControlMessage{IfIndex: en0.Index}
+ default:
+ if err := r.SetMulticastInterface(en0); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if err := r.WriteTo(iph, ospf, cm); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go
new file mode 100644
index 000000000..cbe70327b
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/gen.go
@@ -0,0 +1,208 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates system adaptation constants and types,
+// internet protocol constants and tables by reading template files
+// and IANA protocol registries.
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+func main() {
+ if err := genzsys(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ if err := geniana(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func genzsys() error {
+ defs := "defs_" + runtime.GOOS + ".go"
+ f, err := os.Open(defs)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ f.Close()
+ cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
+ b, err := cmd.Output()
+ if err != nil {
+ return err
+ }
+ // The ipv4 package still supports go1.2, and so we need to
+ // take care of additional platforms in go1.3 and above for
+ // working with go1.2.
+ switch {
+ case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris":
+ b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1)
+ case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"):
+ b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1)
+ }
+ b, err = format.Source(b)
+ if err != nil {
+ return err
+ }
+ zsys := "zsys_" + runtime.GOOS + ".go"
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
+ }
+ if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+ return err
+ }
+ return nil
+}
+
+var registries = []struct {
+ url string
+ parse func(io.Writer, io.Reader) error
+}{
+ {
+ "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
+ parseICMPv4Parameters,
+ },
+}
+
+func geniana() error {
+ var bb bytes.Buffer
+ fmt.Fprintf(&bb, "// go generate gen.go\n")
+ fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+ fmt.Fprintf(&bb, "package ipv4\n\n")
+ for _, r := range registries {
+ resp, err := http.Get(r.url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+ }
+ if err := r.parse(&bb, resp.Body); err != nil {
+ return err
+ }
+ fmt.Fprintf(&bb, "\n")
+ }
+ b, err := format.Source(bb.Bytes())
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseICMPv4Parameters(w io.Writer, r io.Reader) error {
+ dec := xml.NewDecoder(r)
+ var icp icmpv4Parameters
+ if err := dec.Decode(&icp); err != nil {
+ return err
+ }
+ prs := icp.escape()
+ fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+ fmt.Fprintf(w, "const (\n")
+ for _, pr := range prs {
+ if pr.Descr == "" {
+ continue
+ }
+ fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value)
+ fmt.Fprintf(w, "// %s\n", pr.OrigDescr)
+ }
+ fmt.Fprintf(w, ")\n\n")
+ fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+ fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
+ for _, pr := range prs {
+ if pr.Descr == "" {
+ continue
+ }
+ fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr))
+ }
+ fmt.Fprintf(w, "}\n")
+ return nil
+}
+
+type icmpv4Parameters struct {
+ XMLName xml.Name `xml:"registry"`
+ Title string `xml:"title"`
+ Updated string `xml:"updated"`
+ Registries []struct {
+ Title string `xml:"title"`
+ Records []struct {
+ Value string `xml:"value"`
+ Descr string `xml:"description"`
+ } `xml:"record"`
+ } `xml:"registry"`
+}
+
+type canonICMPv4ParamRecord struct {
+ OrigDescr string
+ Descr string
+ Value int
+}
+
+func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {
+ id := -1
+ for i, r := range icp.Registries {
+ if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
+ id = i
+ break
+ }
+ }
+ if id < 0 {
+ return nil
+ }
+ prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))
+ sr := strings.NewReplacer(
+ "Messages", "",
+ "Message", "",
+ "ICMP", "",
+ "+", "P",
+ "-", "",
+ "/", "",
+ ".", "",
+ " ", "",
+ )
+ for i, pr := range icp.Registries[id].Records {
+ if strings.Contains(pr.Descr, "Reserved") ||
+ strings.Contains(pr.Descr, "Unassigned") ||
+ strings.Contains(pr.Descr, "Deprecated") ||
+ strings.Contains(pr.Descr, "Experiment") ||
+ strings.Contains(pr.Descr, "experiment") {
+ continue
+ }
+ ss := strings.Split(pr.Descr, "\n")
+ if len(ss) > 1 {
+ prs[i].Descr = strings.Join(ss, " ")
+ } else {
+ prs[i].Descr = ss[0]
+ }
+ s := strings.TrimSpace(prs[i].Descr)
+ prs[i].OrigDescr = s
+ prs[i].Descr = sr.Replace(s)
+ prs[i].Value, _ = strconv.Atoi(pr.Value)
+ }
+ return prs
+}
diff --git a/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/vendor/golang.org/x/net/ipv4/genericopt_posix.go
new file mode 100644
index 000000000..53bc79ff5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/genericopt_posix.go
@@ -0,0 +1,63 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv4
+
+import (
+ "syscall"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// TOS returns the type-of-service field value for outgoing packets.
+func (c *genericOpt) TOS() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoTOS])
+}
+
+// SetTOS sets the type-of-service field value for future outgoing
+// packets.
+func (c *genericOpt) SetTOS(tos int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoTOS], tos)
+}
+
+// TTL returns the time-to-live field value for outgoing packets.
+func (c *genericOpt) TTL() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoTTL])
+}
+
+// SetTTL sets the time-to-live field value for future outgoing
+// packets.
+func (c *genericOpt) SetTTL(ttl int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoTTL], ttl)
+}
diff --git a/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/vendor/golang.org/x/net/ipv4/genericopt_stub.go
new file mode 100644
index 000000000..1817badb1
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/genericopt_stub.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv4
+
+// TOS returns the type-of-service field value for outgoing packets.
+func (c *genericOpt) TOS() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetTOS sets the type-of-service field value for future outgoing
+// packets.
+func (c *genericOpt) SetTOS(tos int) error {
+ return errOpNoSupport
+}
+
+// TTL returns the time-to-live field value for outgoing packets.
+func (c *genericOpt) TTL() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetTTL sets the time-to-live field value for future outgoing
+// packets.
+func (c *genericOpt) SetTTL(ttl int) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go
new file mode 100644
index 000000000..363d9c21a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/header.go
@@ -0,0 +1,132 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "runtime"
+ "syscall"
+)
+
+const (
+ Version = 4 // protocol version
+ HeaderLen = 20 // header length without extension headers
+ maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields
+)
+
+type HeaderFlags int
+
+const (
+ MoreFragments HeaderFlags = 1 << iota // more fragments flag
+ DontFragment // don't fragment flag
+)
+
+// A Header represents an IPv4 header.
+type Header struct {
+ Version int // protocol version
+ Len int // header length
+ TOS int // type-of-service
+ TotalLen int // packet total length
+ ID int // identification
+ Flags HeaderFlags // flags
+ FragOff int // fragment offset
+ TTL int // time-to-live
+ Protocol int // next protocol
+ Checksum int // checksum
+ Src net.IP // source address
+ Dst net.IP // destination address
+ Options []byte // options, extension headers
+}
+
+func (h *Header) String() string {
+ if h == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst)
+}
+
+// Marshal returns the binary encoding of the IPv4 header h.
+func (h *Header) Marshal() ([]byte, error) {
+ if h == nil {
+ return nil, syscall.EINVAL
+ }
+ if h.Len < HeaderLen {
+ return nil, errHeaderTooShort
+ }
+ hdrlen := HeaderLen + len(h.Options)
+ b := make([]byte, hdrlen)
+ b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f))
+ b[1] = byte(h.TOS)
+ flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13)
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "freebsd", "netbsd":
+ nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+ nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+ default:
+ binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+ binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+ }
+ binary.BigEndian.PutUint16(b[4:6], uint16(h.ID))
+ b[8] = byte(h.TTL)
+ b[9] = byte(h.Protocol)
+ binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum))
+ if ip := h.Src.To4(); ip != nil {
+ copy(b[12:16], ip[:net.IPv4len])
+ }
+ if ip := h.Dst.To4(); ip != nil {
+ copy(b[16:20], ip[:net.IPv4len])
+ } else {
+ return nil, errMissingAddress
+ }
+ if len(h.Options) > 0 {
+ copy(b[HeaderLen:], h.Options)
+ }
+ return b, nil
+}
+
+// ParseHeader parses b as an IPv4 header.
+func ParseHeader(b []byte) (*Header, error) {
+ if len(b) < HeaderLen {
+ return nil, errHeaderTooShort
+ }
+ hdrlen := int(b[0]&0x0f) << 2
+ if hdrlen > len(b) {
+ return nil, errBufferTooShort
+ }
+ h := &Header{
+ Version: int(b[0] >> 4),
+ Len: hdrlen,
+ TOS: int(b[1]),
+ ID: int(binary.BigEndian.Uint16(b[4:6])),
+ TTL: int(b[8]),
+ Protocol: int(b[9]),
+ Checksum: int(binary.BigEndian.Uint16(b[10:12])),
+ Src: net.IPv4(b[12], b[13], b[14], b[15]),
+ Dst: net.IPv4(b[16], b[17], b[18], b[19]),
+ }
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "netbsd":
+ h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen
+ h.FragOff = int(nativeEndian.Uint16(b[6:8]))
+ case "freebsd":
+ h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+ if freebsdVersion < 1000000 {
+ h.TotalLen += hdrlen
+ }
+ h.FragOff = int(nativeEndian.Uint16(b[6:8]))
+ default:
+ h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+ h.FragOff = int(binary.BigEndian.Uint16(b[6:8]))
+ }
+ h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13
+ h.FragOff = h.FragOff & 0x1fff
+ if hdrlen-HeaderLen > 0 {
+ h.Options = make([]byte, hdrlen-HeaderLen)
+ copy(h.Options, b[HeaderLen:])
+ }
+ return h, nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go
new file mode 100644
index 000000000..85cb9c489
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/header_test.go
@@ -0,0 +1,138 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "bytes"
+ "encoding/binary"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type headerTest struct {
+ wireHeaderFromKernel [HeaderLen]byte
+ wireHeaderToKernel [HeaderLen]byte
+ wireHeaderFromTradBSDKernel [HeaderLen]byte
+ wireHeaderFromFreeBSD10Kernel [HeaderLen]byte
+ wireHeaderToTradBSDKernel [HeaderLen]byte
+ *Header
+}
+
+var headerLittleEndianTest = headerTest{
+ // TODO(mikio): Add platform dependent wire header formats when
+ // we support new platforms.
+ wireHeaderFromKernel: [HeaderLen]byte{
+ 0x45, 0x01, 0xbe, 0xef,
+ 0xca, 0xfe, 0x45, 0xdc,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ wireHeaderToKernel: [HeaderLen]byte{
+ 0x45, 0x01, 0xbe, 0xef,
+ 0xca, 0xfe, 0x45, 0xdc,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ wireHeaderFromTradBSDKernel: [HeaderLen]byte{
+ 0x45, 0x01, 0xdb, 0xbe,
+ 0xca, 0xfe, 0xdc, 0x45,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ wireHeaderFromFreeBSD10Kernel: [HeaderLen]byte{
+ 0x45, 0x01, 0xef, 0xbe,
+ 0xca, 0xfe, 0xdc, 0x45,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ wireHeaderToTradBSDKernel: [HeaderLen]byte{
+ 0x45, 0x01, 0xef, 0xbe,
+ 0xca, 0xfe, 0xdc, 0x45,
+ 0xff, 0x01, 0xde, 0xad,
+ 172, 16, 254, 254,
+ 192, 168, 0, 1,
+ },
+ Header: &Header{
+ Version: Version,
+ Len: HeaderLen,
+ TOS: 1,
+ TotalLen: 0xbeef,
+ ID: 0xcafe,
+ Flags: DontFragment,
+ FragOff: 1500,
+ TTL: 255,
+ Protocol: 1,
+ Checksum: 0xdead,
+ Src: net.IPv4(172, 16, 254, 254),
+ Dst: net.IPv4(192, 168, 0, 1),
+ },
+}
+
+func TestMarshalHeader(t *testing.T) {
+ tt := &headerLittleEndianTest
+ if nativeEndian != binary.LittleEndian {
+ t.Skip("no test for non-little endian machine yet")
+ }
+
+ b, err := tt.Header.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var wh []byte
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "netbsd":
+ wh = tt.wireHeaderToTradBSDKernel[:]
+ case "freebsd":
+ if freebsdVersion < 1000000 {
+ wh = tt.wireHeaderToTradBSDKernel[:]
+ } else {
+ wh = tt.wireHeaderFromFreeBSD10Kernel[:]
+ }
+ default:
+ wh = tt.wireHeaderToKernel[:]
+ }
+ if !bytes.Equal(b, wh) {
+ t.Fatalf("got %#v; want %#v", b, wh)
+ }
+}
+
+func TestParseHeader(t *testing.T) {
+ tt := &headerLittleEndianTest
+ if nativeEndian != binary.LittleEndian {
+ t.Skip("no test for big endian machine yet")
+ }
+
+ var wh []byte
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "netbsd":
+ wh = tt.wireHeaderFromTradBSDKernel[:]
+ case "freebsd":
+ if freebsdVersion < 1000000 {
+ wh = tt.wireHeaderFromTradBSDKernel[:]
+ } else {
+ wh = tt.wireHeaderFromFreeBSD10Kernel[:]
+ }
+ default:
+ wh = tt.wireHeaderFromKernel[:]
+ }
+ h, err := ParseHeader(wh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(h, tt.Header) {
+ t.Fatalf("got %#v; want %#v", h, tt.Header)
+ }
+ s := h.String()
+ if strings.Contains(s, ",") {
+ t.Fatalf("should be space-separated values: %s", s)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go
new file mode 100644
index 000000000..acecfd0d3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/helper.go
@@ -0,0 +1,59 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "unsafe"
+)
+
+var (
+ errMissingAddress = errors.New("missing address")
+ errMissingHeader = errors.New("missing header")
+ errHeaderTooShort = errors.New("header too short")
+ errBufferTooShort = errors.New("buffer too short")
+ errInvalidConnType = errors.New("invalid conn type")
+ errOpNoSupport = errors.New("operation not supported")
+ errNoSuchInterface = errors.New("no such interface")
+ errNoSuchMulticastInterface = errors.New("no such multicast interface")
+
+ // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
+ freebsdVersion uint32
+
+ nativeEndian binary.ByteOrder
+)
+
+func init() {
+ i := uint32(1)
+ b := (*[4]byte)(unsafe.Pointer(&i))
+ if b[0] == 1 {
+ nativeEndian = binary.LittleEndian
+ } else {
+ nativeEndian = binary.BigEndian
+ }
+}
+
+func boolint(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func netAddrToIP4(a net.Addr) net.IP {
+ switch v := a.(type) {
+ case *net.UDPAddr:
+ if ip := v.IP.To4(); ip != nil {
+ return ip
+ }
+ case *net.IPAddr:
+ if ip := v.IP.To4(); ip != nil {
+ return ip
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go
new file mode 100644
index 000000000..be10c9488
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/iana.go
@@ -0,0 +1,34 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package ipv4
+
+// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
+const (
+ ICMPTypeEchoReply ICMPType = 0 // Echo Reply
+ ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable
+ ICMPTypeRedirect ICMPType = 5 // Redirect
+ ICMPTypeEcho ICMPType = 8 // Echo
+ ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement
+ ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation
+ ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded
+ ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem
+ ICMPTypeTimestamp ICMPType = 13 // Timestamp
+ ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply
+ ICMPTypePhoturis ICMPType = 40 // Photuris
+)
+
+// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
+var icmpTypes = map[ICMPType]string{
+ 0: "echo reply",
+ 3: "destination unreachable",
+ 5: "redirect",
+ 8: "echo",
+ 9: "router advertisement",
+ 10: "router solicitation",
+ 11: "time exceeded",
+ 12: "parameter problem",
+ 13: "timestamp",
+ 14: "timestamp reply",
+ 40: "photuris",
+}
diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go
new file mode 100644
index 000000000..dbd05cff2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/icmp.go
@@ -0,0 +1,57 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "golang.org/x/net/internal/iana"
+
+// An ICMPType represents a type of ICMP message.
+type ICMPType int
+
+func (typ ICMPType) String() string {
+ s, ok := icmpTypes[typ]
+ if !ok {
+ return "<nil>"
+ }
+ return s
+}
+
+// Protocol returns the ICMPv4 protocol number.
+func (typ ICMPType) Protocol() int {
+ return iana.ProtocolICMP
+}
+
+// An ICMPFilter represents an ICMP message filter for incoming
+// packets. The filter belongs to a packet delivery path on a host and
+// it cannot interact with forwarding packets or tunnel-outer packets.
+//
+// Note: RFC 2460 defines a reasonable role model and it works not
+// only for IPv6 but IPv4. A node means a device that implements IP.
+// A router means a node that forwards IP packets not explicitly
+// addressed to itself, and a host means a node that is not a router.
+type ICMPFilter struct {
+ sysICMPFilter
+}
+
+// Accept accepts incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Accept(typ ICMPType) {
+ f.accept(typ)
+}
+
+// Block blocks incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Block(typ ICMPType) {
+ f.block(typ)
+}
+
+// SetAll sets the filter action to the filter.
+func (f *ICMPFilter) SetAll(block bool) {
+ f.setAll(block)
+}
+
+// WillBlock reports whether the ICMP type will be blocked.
+func (f *ICMPFilter) WillBlock(typ ICMPType) bool {
+ return f.willBlock(typ)
+}
diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go
new file mode 100644
index 000000000..c91225335
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+func (f *sysICMPFilter) accept(typ ICMPType) {
+ f.Data &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPFilter) block(typ ICMPType) {
+ f.Data |= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPFilter) setAll(block bool) {
+ if block {
+ f.Data = 1<<32 - 1
+ } else {
+ f.Data = 0
+ }
+}
+
+func (f *sysICMPFilter) willBlock(typ ICMPType) bool {
+ return f.Data&(1<<(uint32(typ)&31)) != 0
+}
diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go
new file mode 100644
index 000000000..9ee9b6a32
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv4
+
+const sysSizeofICMPFilter = 0x0
+
+type sysICMPFilter struct {
+}
+
+func (f *sysICMPFilter) accept(typ ICMPType) {
+}
+
+func (f *sysICMPFilter) block(typ ICMPType) {
+}
+
+func (f *sysICMPFilter) setAll(block bool) {
+}
+
+func (f *sysICMPFilter) willBlock(typ ICMPType) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go
new file mode 100644
index 000000000..3324b54df
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/icmp_test.go
@@ -0,0 +1,95 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+var icmpStringTests = []struct {
+ in ipv4.ICMPType
+ out string
+}{
+ {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"},
+
+ {256, "<nil>"},
+}
+
+func TestICMPString(t *testing.T) {
+ for _, tt := range icmpStringTests {
+ s := tt.in.String()
+ if s != tt.out {
+ t.Errorf("got %s; want %s", s, tt.out)
+ }
+ }
+}
+
+func TestICMPFilter(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux":
+ default:
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ var f ipv4.ICMPFilter
+ for _, toggle := range []bool{false, true} {
+ f.SetAll(toggle)
+ for _, typ := range []ipv4.ICMPType{
+ ipv4.ICMPTypeDestinationUnreachable,
+ ipv4.ICMPTypeEchoReply,
+ ipv4.ICMPTypeTimeExceeded,
+ ipv4.ICMPTypeParameterProblem,
+ } {
+ f.Accept(typ)
+ if f.WillBlock(typ) {
+ t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ)
+ }
+ f.Block(typ)
+ if !f.WillBlock(typ) {
+ t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ)
+ }
+ }
+ }
+}
+
+func TestSetICMPFilter(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux":
+ default:
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket("ip4:icmp", "127.0.0.1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv4.NewPacketConn(c)
+
+ var f ipv4.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv4.ICMPTypeEcho)
+ f.Accept(ipv4.ICMPTypeEchoReply)
+ if err := p.SetICMPFilter(&f); err != nil {
+ t.Fatal(err)
+ }
+ kf, err := p.ICMPFilter()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(kf, &f) {
+ t.Fatalf("got %#v; want %#v", kf, f)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/mocktransponder_test.go b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go
new file mode 100644
index 000000000..e55aaee91
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go
@@ -0,0 +1,21 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "testing"
+)
+
+func acceptor(t *testing.T, ln net.Listener, done chan<- bool) {
+ defer func() { done <- true }()
+
+ c, err := ln.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ c.Close()
+}
diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go
new file mode 100644
index 000000000..d2bcf8533
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/multicast_test.go
@@ -0,0 +1,330 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+var packetConnReadWriteMulticastUDPTests = []struct {
+ addr string
+ grp, src *net.UDPAddr
+}{
+ {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
+
+ {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+}
+
+func TestPacketConnReadWriteMulticastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range packetConnReadWriteMulticastUDPTests {
+ c, err := net.ListenPacket("udp4", tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ grp := *tt.grp
+ grp.Port = c.LocalAddr().(*net.UDPAddr).Port
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+ if tt.src == nil {
+ if err := p.JoinGroup(ifi, &grp); err != nil {
+ t.Fatal(err)
+ }
+ defer p.LeaveGroup(ifi, &grp)
+ } else {
+ if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support IGMPv2/3 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src)
+ }
+ if err := p.SetMulticastInterface(ifi); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastInterface(); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetMulticastLoopback(true); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastLoopback(); err != nil {
+ t.Fatal(err)
+ }
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+ wb := []byte("HELLO-R-U-THERE")
+
+ for i, toggle := range []bool{true, false, true} {
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ p.SetMulticastTTL(i + 1)
+ if n, err := p.WriteTo(wb, nil, &grp); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Fatalf("got %v; want %v", rb[:n], wb)
+ }
+ }
+ }
+}
+
+var packetConnReadWriteMulticastICMPTests = []struct {
+ grp, src *net.IPAddr
+}{
+ {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
+
+ {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+}
+
+func TestPacketConnReadWriteMulticastICMP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range packetConnReadWriteMulticastICMPTests {
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+ if tt.src == nil {
+ if err := p.JoinGroup(ifi, tt.grp); err != nil {
+ t.Fatal(err)
+ }
+ defer p.LeaveGroup(ifi, tt.grp)
+ } else {
+ if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support IGMPv2/3 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)
+ }
+ if err := p.SetMulticastInterface(ifi); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastInterface(); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetMulticastLoopback(true); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastLoopback(); err != nil {
+ t.Fatal(err)
+ }
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+
+ for i, toggle := range []bool{true, false, true} {
+ wb, err := (&icmp.Message{
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ p.SetMulticastTTL(i + 1)
+ if n, err := p.WriteTo(wb, nil, tt.grp); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else {
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n])
+ if err != nil {
+ t.Fatal(err)
+ }
+ switch {
+ case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1
+ case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0
+ default:
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+ }
+}
+
+var rawConnReadWriteMulticastICMPTests = []struct {
+ grp, src *net.IPAddr
+}{
+ {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
+
+ {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+}
+
+func TestRawConnReadWriteMulticastICMP(t *testing.T) {
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range rawConnReadWriteMulticastICMPTests {
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ if tt.src == nil {
+ if err := r.JoinGroup(ifi, tt.grp); err != nil {
+ t.Fatal(err)
+ }
+ defer r.LeaveGroup(ifi, tt.grp)
+ } else {
+ if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support IGMPv2/3 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)
+ }
+ if err := r.SetMulticastInterface(ifi); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := r.MulticastInterface(); err != nil {
+ t.Fatal(err)
+ }
+ if err := r.SetMulticastLoopback(true); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := r.MulticastLoopback(); err != nil {
+ t.Fatal(err)
+ }
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+
+ for i, toggle := range []bool{true, false, true} {
+ wb, err := (&icmp.Message{
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wh := &ipv4.Header{
+ Version: ipv4.Version,
+ Len: ipv4.HeaderLen,
+ TOS: i + 1,
+ TotalLen: ipv4.HeaderLen + len(wb),
+ Protocol: 1,
+ Dst: tt.grp.IP,
+ }
+ if err := r.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ r.SetMulticastTTL(i + 1)
+ if err := r.WriteTo(wh, wb, nil); err != nil {
+ t.Fatal(err)
+ }
+ rb := make([]byte, ipv4.HeaderLen+128)
+ if rh, b, _, err := r.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else {
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ switch {
+ case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1
+ case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0
+ default:
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go
new file mode 100644
index 000000000..e342bf1d9
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go
@@ -0,0 +1,249 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+var udpMultipleGroupListenerTests = []net.Addr{
+ &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727
+ &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)},
+ &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)},
+}
+
+func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+
+ for _, gaddr := range udpMultipleGroupListenerTests {
+ c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv4.NewPacketConn(c)
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok {
+ continue
+ }
+ if err := p.JoinGroup(&ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ if err := p.LeaveGroup(ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+
+ for _, gaddr := range udpMultipleGroupListenerTests {
+ c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c1.Close()
+
+ c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c2.Close()
+
+ var ps [2]*ipv4.PacketConn
+ ps[0] = ipv4.NewPacketConn(c1)
+ ps[1] = ipv4.NewPacketConn(c2)
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok {
+ continue
+ }
+ for _, p := range ps {
+ if err := p.JoinGroup(&ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ for _, p := range ps {
+ if err := p.LeaveGroup(ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ }
+}
+
+func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+
+ gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727
+ type ml struct {
+ c *ipv4.PacketConn
+ ifi *net.Interface
+ }
+ var mlt []*ml
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ ip, ok := nettest.IsMulticastCapable("ip4", &ifi)
+ if !ok {
+ continue
+ }
+ c, err := net.ListenPacket("udp4", ip.String()+":"+"1024") // unicast address with non-reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv4.NewPacketConn(c)
+ if err := p.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mlt = append(mlt, &ml{p, &ift[i]})
+ }
+ for _, m := range mlt {
+ if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok {
+ continue
+ }
+ if err := r.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ if err := r.LeaveGroup(ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Skip("to avoid external network")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727
+ type ml struct {
+ c *ipv4.RawConn
+ ifi *net.Interface
+ }
+ var mlt []*ml
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ ip, ok := nettest.IsMulticastCapable("ip4", &ifi)
+ if !ok {
+ continue
+ }
+ c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := r.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mlt = append(mlt, &ml{r, &ift[i]})
+ }
+ for _, m := range mlt {
+ if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go
new file mode 100644
index 000000000..c76dbe4de
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go
@@ -0,0 +1,195 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+var packetConnMulticastSocketOptionTests = []struct {
+ net, proto, addr string
+ grp, src net.Addr
+}{
+ {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727
+ {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727
+
+ {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+ {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+}
+
+func TestPacketConnMulticastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ m, ok := nettest.SupportsRawIPSocket()
+ for _, tt := range packetConnMulticastSocketOptionTests {
+ if tt.net == "ip4" && !ok {
+ t.Log(m)
+ continue
+ }
+ c, err := net.ListenPacket(tt.net+tt.proto, tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+
+ if tt.src == nil {
+ testMulticastSocketOptions(t, p, ifi, tt.grp)
+ } else {
+ testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src)
+ }
+ }
+}
+
+var rawConnMulticastSocketOptionTests = []struct {
+ grp, src net.Addr
+}{
+ {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727
+
+ {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
+}
+
+func TestRawConnMulticastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range rawConnMulticastSocketOptionTests {
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ if tt.src == nil {
+ testMulticastSocketOptions(t, r, ifi, tt.grp)
+ } else {
+ testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src)
+ }
+ }
+}
+
+type testIPv4MulticastConn interface {
+ MulticastTTL() (int, error)
+ SetMulticastTTL(ttl int) error
+ MulticastLoopback() (bool, error)
+ SetMulticastLoopback(bool) error
+ JoinGroup(*net.Interface, net.Addr) error
+ LeaveGroup(*net.Interface, net.Addr) error
+ JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+}
+
+func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) {
+ const ttl = 255
+ if err := c.SetMulticastTTL(ttl); err != nil {
+ t.Error(err)
+ return
+ }
+ if v, err := c.MulticastTTL(); err != nil {
+ t.Error(err)
+ return
+ } else if v != ttl {
+ t.Errorf("got %v; want %v", v, ttl)
+ return
+ }
+
+ for _, toggle := range []bool{true, false} {
+ if err := c.SetMulticastLoopback(toggle); err != nil {
+ t.Error(err)
+ return
+ }
+ if v, err := c.MulticastLoopback(); err != nil {
+ t.Error(err)
+ return
+ } else if v != toggle {
+ t.Errorf("got %v; want %v", v, toggle)
+ return
+ }
+ }
+
+ if err := c.JoinGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+}
+
+func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) {
+ // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP
+ if err := c.JoinGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support IGMPv2/3 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ return
+ }
+ t.Error(err)
+ return
+ }
+ if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP
+ if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP
+ if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go
new file mode 100644
index 000000000..09864314e
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/packet.go
@@ -0,0 +1,97 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+)
+
+// A packetHandler represents the IPv4 datagram handler.
+type packetHandler struct {
+ c *net.IPConn
+ rawOpt
+}
+
+func (c *packetHandler) ok() bool { return c != nil && c.c != nil }
+
+// ReadFrom reads an IPv4 datagram from the endpoint c, copying the
+// datagram into b. It returns the received datagram as the IPv4
+// header h, the payload p and the control message cm.
+func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) {
+ if !c.ok() {
+ return nil, nil, nil, syscall.EINVAL
+ }
+ oob := newControlMessage(&c.rawOpt)
+ n, oobn, _, src, err := c.c.ReadMsgIP(b, oob)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ var hs []byte
+ if hs, p, err = slicePacket(b[:n]); err != nil {
+ return nil, nil, nil, err
+ }
+ if h, err = ParseHeader(hs); err != nil {
+ return nil, nil, nil, err
+ }
+ if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+ return nil, nil, nil, err
+ }
+ if src != nil && cm != nil {
+ cm.Src = src.IP
+ }
+ return
+}
+
+func slicePacket(b []byte) (h, p []byte, err error) {
+ if len(b) < HeaderLen {
+ return nil, nil, errHeaderTooShort
+ }
+ hdrlen := int(b[0]&0x0f) << 2
+ return b[:hdrlen], b[hdrlen:], nil
+}
+
+// WriteTo writes an IPv4 datagram through the endpoint c, copying the
+// datagram from the IPv4 header h and the payload p. The control
+// message cm allows the datagram path and the outgoing interface to be
+// specified. Currently only Darwin and Linux support this. The cm
+// may be nil if control of the outgoing datagram is not required.
+//
+// The IPv4 header h must contain appropriate fields that include:
+//
+// Version = ipv4.Version
+// Len = <must be specified>
+// TOS = <must be specified>
+// TotalLen = <must be specified>
+// ID = platform sets an appropriate value if ID is zero
+// FragOff = <must be specified>
+// TTL = <must be specified>
+// Protocol = <must be specified>
+// Checksum = platform sets an appropriate value if Checksum is zero
+// Src = platform sets an appropriate value if Src is nil
+// Dst = <must be specified>
+// Options = optional
+func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ oob := marshalControlMessage(cm)
+ wh, err := h.Marshal()
+ if err != nil {
+ return err
+ }
+ dst := &net.IPAddr{}
+ if cm != nil {
+ if ip := cm.Dst.To4(); ip != nil {
+ dst.IP = ip
+ }
+ }
+ if dst.IP == nil {
+ dst.IP = h.Dst
+ }
+ wh = append(wh, p...)
+ _, _, err = c.c.WriteMsgIP(wh, oob, dst)
+ return err
+}
diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go
new file mode 100644
index 000000000..d7698cbd3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/payload.go
@@ -0,0 +1,15 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "net"
+
+// A payloadHandler represents the IPv4 datagram payload handler.
+type payloadHandler struct {
+ net.PacketConn
+ rawOpt
+}
+
+func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil }
diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go
new file mode 100644
index 000000000..d358fc3ac
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go
@@ -0,0 +1,81 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris,!windows
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+)
+
+// ReadFrom reads a payload of the received IPv4 datagram, from the
+// endpoint c, copying the payload into b. It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+ if !c.ok() {
+ return 0, nil, nil, syscall.EINVAL
+ }
+ oob := newControlMessage(&c.rawOpt)
+ var oobn int
+ switch c := c.PacketConn.(type) {
+ case *net.UDPConn:
+ if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {
+ return 0, nil, nil, err
+ }
+ case *net.IPConn:
+ if sockOpts[ssoStripHeader].name > 0 {
+ if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil {
+ return 0, nil, nil, err
+ }
+ } else {
+ nb := make([]byte, maxHeaderLen+len(b))
+ if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil {
+ return 0, nil, nil, err
+ }
+ hdrlen := int(nb[0]&0x0f) << 2
+ copy(b, nb[hdrlen:])
+ n -= hdrlen
+ }
+ default:
+ return 0, nil, nil, errInvalidConnType
+ }
+ if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+ return 0, nil, nil, err
+ }
+ if cm != nil {
+ cm.Src = netAddrToIP4(src)
+ }
+ return
+}
+
+// WriteTo writes a payload of the IPv4 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b. It
+// returns the number of bytes written. The control message cm allows
+// the datagram path and the outgoing interface to be specified.
+// Currently only Darwin and Linux support this. The cm may be nil if
+// control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ oob := marshalControlMessage(cm)
+ if dst == nil {
+ return 0, errMissingAddress
+ }
+ switch c := c.PacketConn.(type) {
+ case *net.UDPConn:
+ n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))
+ case *net.IPConn:
+ n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))
+ default:
+ return 0, errInvalidConnType
+ }
+ if err != nil {
+ return 0, err
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go
new file mode 100644
index 000000000..d128c9c2e
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9 solaris windows
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+)
+
+// ReadFrom reads a payload of the received IPv4 datagram, from the
+// endpoint c, copying the payload into b. It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+ if !c.ok() {
+ return 0, nil, nil, syscall.EINVAL
+ }
+ if n, src, err = c.PacketConn.ReadFrom(b); err != nil {
+ return 0, nil, nil, err
+ }
+ return
+}
+
+// WriteTo writes a payload of the IPv4 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b. It
+// returns the number of bytes written. The control message cm allows
+// the datagram path and the outgoing interface to be specified.
+// Currently only Darwin and Linux support this. The cm may be nil if
+// control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ if dst == nil {
+ return 0, errMissingAddress
+ }
+ return c.PacketConn.WriteTo(b, dst)
+}
diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go
new file mode 100644
index 000000000..247d06c1a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go
@@ -0,0 +1,174 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "bytes"
+ "net"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+func benchmarkUDPListener() (net.PacketConn, net.Addr, error) {
+ c, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String())
+ if err != nil {
+ c.Close()
+ return nil, nil, err
+ }
+ return c, dst, nil
+}
+
+func BenchmarkReadWriteNetUDP(b *testing.B) {
+ c, dst, err := benchmarkUDPListener()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer c.Close()
+
+ wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ benchmarkReadWriteNetUDP(b, c, wb, rb, dst)
+ }
+}
+
+func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) {
+ if _, err := c.WriteTo(wb, dst); err != nil {
+ b.Fatal(err)
+ }
+ if _, _, err := c.ReadFrom(rb); err != nil {
+ b.Fatal(err)
+ }
+}
+
+func BenchmarkReadWriteIPv4UDP(b *testing.B) {
+ c, dst, err := benchmarkUDPListener()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+ cf := ipv4.FlagTTL | ipv4.FlagInterface
+ if err := p.SetControlMessage(cf, true); err != nil {
+ b.Fatal(err)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+
+ wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ benchmarkReadWriteIPv4UDP(b, p, wb, rb, dst, ifi)
+ }
+}
+
+func benchmarkReadWriteIPv4UDP(b *testing.B, p *ipv4.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) {
+ cm := ipv4.ControlMessage{TTL: 1}
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ b.Fatal(err)
+ } else if n != len(wb) {
+ b.Fatalf("got %v; want %v", n, len(wb))
+ }
+ if _, _, _, err := p.ReadFrom(rb); err != nil {
+ b.Fatal(err)
+ }
+}
+
+func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ c, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+
+ dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface
+ wb := []byte("HELLO-R-U-THERE")
+
+ if err := p.SetControlMessage(cf, true); err != nil { // probe before test
+ if nettest.ProtocolNotSupported(err) {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+
+ var wg sync.WaitGroup
+ reader := func() {
+ defer wg.Done()
+ rb := make([]byte, 128)
+ if n, cm, _, err := p.ReadFrom(rb); err != nil {
+ t.Error(err)
+ return
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Errorf("got %v; want %v", rb[:n], wb)
+ return
+ } else {
+ s := cm.String()
+ if strings.Contains(s, ",") {
+ t.Errorf("should be space-separated values: %s", s)
+ }
+ }
+ }
+ writer := func(toggle bool) {
+ defer wg.Done()
+ cm := ipv4.ControlMessage{
+ Src: net.IPv4(127, 0, 0, 1),
+ }
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ t.Error(err)
+ return
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ t.Error(err)
+ return
+ } else if n != len(wb) {
+ t.Errorf("short write: %v", n)
+ return
+ }
+ }
+
+ const N = 10
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go reader()
+ }
+ wg.Add(2 * N)
+ for i := 0; i < 2*N; i++ {
+ go writer(i%2 != 0)
+ }
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go reader()
+ }
+ wg.Wait()
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go
new file mode 100644
index 000000000..ace37d30f
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+// Sticky socket options
+const (
+ ssoTOS = iota // header field for unicast packet
+ ssoTTL // header field for unicast packet
+ ssoMulticastTTL // header field for multicast packet
+ ssoMulticastInterface // outbound interface for multicast packet
+ ssoMulticastLoopback // loopback for multicast packet
+ ssoReceiveTTL // header field on received packet
+ ssoReceiveDst // header field on received packet
+ ssoReceiveInterface // inbound interface on received packet
+ ssoPacketInfo // incbound or outbound packet path
+ ssoHeaderPrepend // ipv4 header prepend
+ ssoStripHeader // strip ipv4 header
+ ssoICMPFilter // icmp filter
+ ssoJoinGroup // any-source multicast
+ ssoLeaveGroup // any-source multicast
+ ssoJoinSourceGroup // source-specific multicast
+ ssoLeaveSourceGroup // source-specific multicast
+ ssoBlockSourceGroup // any-source or source-specific multicast
+ ssoUnblockSourceGroup // any-source or source-specific multicast
+ ssoMax
+)
+
+// Sticky socket option value types
+const (
+ ssoTypeByte = iota + 1
+ ssoTypeInt
+ ssoTypeInterface
+ ssoTypeICMPFilter
+ ssoTypeIPMreq
+ ssoTypeIPMreqn
+ ssoTypeGroupReq
+ ssoTypeGroupSourceReq
+)
+
+// A sockOpt represents a binding for sticky socket option.
+type sockOpt struct {
+ name int // option name, must be equal or greater than 1
+ typ int // option value type, must be equal or greater than 1
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go
new file mode 100644
index 000000000..4a6aa78ef
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd windows
+
+package ipv4
+
+import "net"
+
+func setIPMreqInterface(mreq *sysIPMreq, ifi *net.Interface) error {
+ if ifi == nil {
+ return nil
+ }
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return err
+ }
+ for _, ifa := range ifat {
+ switch ifa := ifa.(type) {
+ case *net.IPAddr:
+ if ip := ifa.IP.To4(); ip != nil {
+ copy(mreq.Interface[:], ip)
+ return nil
+ }
+ case *net.IPNet:
+ if ip := ifa.IP.To4(); ip != nil {
+ copy(mreq.Interface[:], ip)
+ return nil
+ }
+ }
+ }
+ return errNoSuchInterface
+}
+
+func netIP4ToInterface(ip net.IP) (*net.Interface, error) {
+ ift, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifi := range ift {
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifa := range ifat {
+ switch ifa := ifa.(type) {
+ case *net.IPAddr:
+ if ip.Equal(ifa.IP) {
+ return &ifi, nil
+ }
+ case *net.IPNet:
+ if ip.Equal(ifa.IP) {
+ return &ifi, nil
+ }
+ }
+ }
+ }
+ return nil, errNoSuchInterface
+}
+
+func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) {
+ if ifi == nil {
+ return net.IPv4zero.To4(), nil
+ }
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifa := range ifat {
+ switch ifa := ifa.(type) {
+ case *net.IPAddr:
+ if ip := ifa.IP.To4(); ip != nil {
+ return ip, nil
+ }
+ case *net.IPNet:
+ if ip := ifa.IP.To4(); ip != nil {
+ return ip, nil
+ }
+ }
+ }
+ return nil, errNoSuchInterface
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go
new file mode 100644
index 000000000..ef9b13902
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd windows
+
+package ipv4
+
+import (
+ "net"
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func setsockoptIPMreq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}}
+ if err := setIPMreqInterface(&mreq, ifi); err != nil {
+ return err
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sysSizeofIPMreq))
+}
+
+func getsockoptInterface(s uintptr, name int) (*net.Interface, error) {
+ var b [4]byte
+ l := uint32(4)
+ if err := getsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3]))
+ if err != nil {
+ return nil, err
+ }
+ return ifi, nil
+}
+
+func setsockoptInterface(s uintptr, name int, ifi *net.Interface) error {
+ ip, err := netInterfaceToIP4(ifi)
+ if err != nil {
+ return err
+ }
+ var b [4]byte
+ copy(b[:], ip)
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4)))
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go
new file mode 100644
index 000000000..9f7b655f5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go
@@ -0,0 +1,21 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!windows
+
+package ipv4
+
+import "net"
+
+func setsockoptIPMreq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ return errOpNoSupport
+}
+
+func getsockoptInterface(s uintptr, name int) (*net.Interface, error) {
+ return nil, errOpNoSupport
+}
+
+func setsockoptInterface(s uintptr, name int, ifi *net.Interface) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go
new file mode 100644
index 000000000..0c7f0f816
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux
+
+package ipv4
+
+import "net"
+
+func getsockoptIPMreqn(s uintptr, name int) (*net.Interface, error) {
+ return nil, errOpNoSupport
+}
+
+func setsockoptIPMreqn(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go
new file mode 100644
index 000000000..9d4069ff5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux
+
+package ipv4
+
+import (
+ "net"
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func getsockoptIPMreqn(s uintptr, name int) (*net.Interface, error) {
+ var mreqn sysIPMreqn
+ l := uint32(sysSizeofIPMreqn)
+ if err := getsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ if mreqn.Ifindex == 0 {
+ return nil, nil
+ }
+ ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex))
+ if err != nil {
+ return nil, err
+ }
+ return ifi, nil
+}
+
+func setsockoptIPMreqn(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ var mreqn sysIPMreqn
+ if ifi != nil {
+ mreqn.Ifindex = int32(ifi.Index)
+ }
+ if grp != nil {
+ mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]}
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sysSizeofIPMreqn))
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go
new file mode 100644
index 000000000..0b7d6b659
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go
@@ -0,0 +1,122 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv4
+
+import (
+ "net"
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func getInt(s uintptr, opt *sockOpt) (int, error) {
+ if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) {
+ return 0, errOpNoSupport
+ }
+ var i int32
+ var b byte
+ p := unsafe.Pointer(&i)
+ l := uint32(4)
+ if opt.typ == ssoTypeByte {
+ p = unsafe.Pointer(&b)
+ l = 1
+ }
+ if err := getsockopt(s, iana.ProtocolIP, opt.name, p, &l); err != nil {
+ return 0, os.NewSyscallError("getsockopt", err)
+ }
+ if opt.typ == ssoTypeByte {
+ return int(b), nil
+ }
+ return int(i), nil
+}
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+ if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) {
+ return errOpNoSupport
+ }
+ i := int32(v)
+ var b byte
+ p := unsafe.Pointer(&i)
+ l := uint32(4)
+ if opt.typ == ssoTypeByte {
+ b = byte(v)
+ p = unsafe.Pointer(&b)
+ l = 1
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, opt.name, p, l))
+}
+
+func getInterface(s uintptr, opt *sockOpt) (*net.Interface, error) {
+ if opt.name < 1 {
+ return nil, errOpNoSupport
+ }
+ switch opt.typ {
+ case ssoTypeInterface:
+ return getsockoptInterface(s, opt.name)
+ case ssoTypeIPMreqn:
+ return getsockoptIPMreqn(s, opt.name)
+ default:
+ return nil, errOpNoSupport
+ }
+}
+
+func setInterface(s uintptr, opt *sockOpt, ifi *net.Interface) error {
+ if opt.name < 1 {
+ return errOpNoSupport
+ }
+ switch opt.typ {
+ case ssoTypeInterface:
+ return setsockoptInterface(s, opt.name, ifi)
+ case ssoTypeIPMreqn:
+ return setsockoptIPMreqn(s, opt.name, ifi, nil)
+ default:
+ return errOpNoSupport
+ }
+}
+
+func getICMPFilter(s uintptr, opt *sockOpt) (*ICMPFilter, error) {
+ if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+ return nil, errOpNoSupport
+ }
+ var f ICMPFilter
+ l := uint32(sysSizeofICMPFilter)
+ if err := getsockopt(s, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ return &f, nil
+}
+
+func setICMPFilter(s uintptr, opt *sockOpt, f *ICMPFilter) error {
+ if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+ return errOpNoSupport
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), sysSizeofICMPFilter))
+}
+
+func setGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+ if opt.name < 1 {
+ return errOpNoSupport
+ }
+ switch opt.typ {
+ case ssoTypeIPMreq:
+ return setsockoptIPMreq(s, opt.name, ifi, grp)
+ case ssoTypeIPMreqn:
+ return setsockoptIPMreqn(s, opt.name, ifi, grp)
+ case ssoTypeGroupReq:
+ return setsockoptGroupReq(s, opt.name, ifi, grp)
+ default:
+ return errOpNoSupport
+ }
+}
+
+func setSourceGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+ if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq {
+ return errOpNoSupport
+ }
+ return setsockoptGroupSourceReq(s, opt.name, ifi, grp, src)
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go
new file mode 100644
index 000000000..e2d98fdf5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux
+
+package ipv4
+
+import "net"
+
+func setsockoptGroupReq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ return errOpNoSupport
+}
+
+func setsockoptGroupSourceReq(s uintptr, name int, ifi *net.Interface, grp, src net.IP) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go
new file mode 100644
index 000000000..588e9b943
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux
+
+package ipv4
+
+import (
+ "net"
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var freebsd32o64 bool
+
+func setsockoptGroupReq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+ var gr sysGroupReq
+ if ifi != nil {
+ gr.Interface = uint32(ifi.Index)
+ }
+ gr.setGroup(grp)
+ var p unsafe.Pointer
+ var l uint32
+ if freebsd32o64 {
+ var d [sysSizeofGroupReq + 4]byte
+ s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr))
+ copy(d[:4], s[:4])
+ copy(d[8:], s[4:])
+ p = unsafe.Pointer(&d[0])
+ l = sysSizeofGroupReq + 4
+ } else {
+ p = unsafe.Pointer(&gr)
+ l = sysSizeofGroupReq
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, p, l))
+}
+
+func setsockoptGroupSourceReq(s uintptr, name int, ifi *net.Interface, grp, src net.IP) error {
+ var gsr sysGroupSourceReq
+ if ifi != nil {
+ gsr.Interface = uint32(ifi.Index)
+ }
+ gsr.setSourceGroup(grp, src)
+ var p unsafe.Pointer
+ var l uint32
+ if freebsd32o64 {
+ var d [sysSizeofGroupSourceReq + 4]byte
+ s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))
+ copy(d[:4], s[:4])
+ copy(d[8:], s[4:])
+ p = unsafe.Pointer(&d[0])
+ l = sysSizeofGroupSourceReq + 4
+ } else {
+ p = unsafe.Pointer(&gsr)
+ l = sysSizeofGroupSourceReq
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, p, l))
+}
diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go
new file mode 100644
index 000000000..7cfe57ca3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv4
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go
new file mode 100644
index 000000000..203033db0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go
@@ -0,0 +1,34 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly netbsd
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+ ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+ ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt},
+ ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt},
+ ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt},
+ ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt},
+ ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+ ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+ }
+)
diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go
new file mode 100644
index 000000000..b5f5bd515
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go
@@ -0,0 +1,96 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+ ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+ ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt},
+ ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt},
+ ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt},
+ ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt},
+ ssoStripHeader: {sysIP_STRIPHDR, ssoTypeInt},
+ ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+ ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+ }
+)
+
+func init() {
+ // Seems like kern.osreldate is veiled on latest OS X. We use
+ // kern.osrelease instead.
+ osver, err := syscall.Sysctl("kern.osrelease")
+ if err != nil {
+ return
+ }
+ var i int
+ for i = range osver {
+ if osver[i] == '.' {
+ break
+ }
+ }
+ // The IP_PKTINFO and protocol-independent multicast API were
+ // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like
+ // those features require OS X 10.8 (Darwin 12.0.0) and above.
+ // See http://support.apple.com/kb/HT1633.
+ if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' {
+ ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO
+ ctlOpts[ctlPacketInfo].length = sysSizeofInetPktinfo
+ ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo
+ ctlOpts[ctlPacketInfo].parse = parsePacketInfo
+ sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO
+ sockOpts[ssoPacketInfo].typ = ssoTypeInt
+ sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn
+ sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP
+ sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq
+ sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP
+ sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq
+ sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP
+ sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq
+ sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP
+ sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq
+ sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE
+ sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq
+ sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE
+ sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq
+ }
+}
+
+func (pi *sysInetPktinfo) setIfindex(i int) {
+ pi.Ifindex = uint32(i)
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Pad_cgo_0[0]))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_0[0]))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_1[0]))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go
new file mode 100644
index 000000000..163ff9a77
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go
@@ -0,0 +1,73 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "runtime"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+ ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+ ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt},
+ ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt},
+ ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt},
+ ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt},
+ ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+ ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+ ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+ ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+ }
+)
+
+func init() {
+ freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate")
+ if freebsdVersion >= 1000000 {
+ sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn
+ }
+ if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" {
+ archs, _ := syscall.Sysctl("kern.supported_archs")
+ for _, s := range strings.Fields(archs) {
+ if s == "amd64" {
+ freebsd32o64 = true
+ break
+ }
+ }
+ }
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source))
+ sa.Len = sysSizeofSockaddrInet
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go
new file mode 100644
index 000000000..73e0d4623
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_linux.go
@@ -0,0 +1,55 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL},
+ ctlPacketInfo: {sysIP_PKTINFO, sysSizeofInetPktinfo, marshalPacketInfo, parsePacketInfo},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt},
+ ssoPacketInfo: {sysIP_PKTINFO, ssoTypeInt},
+ ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt},
+ ssoICMPFilter: {sysICMP_FILTER, ssoTypeICMPFilter},
+ ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+ ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+ ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+ ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+ }
+)
+
+func (pi *sysInetPktinfo) setIfindex(i int) {
+ pi.Ifindex = int32(i)
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group))
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group))
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source))
+ sa.Family = syscall.AF_INET
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/vendor/golang.org/x/net/ipv4/sys_openbsd.go
new file mode 100644
index 000000000..d78083a28
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_openbsd.go
@@ -0,0 +1,32 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "net"
+ "syscall"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+ ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+ ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeByte},
+ ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt},
+ ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt},
+ ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt},
+ ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt},
+ ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+ ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+ }
+)
diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go
new file mode 100644
index 000000000..c8e55cbc8
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv4
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{}
+
+ sockOpts = [ssoMax]sockOpt{}
+)
diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go
new file mode 100644
index 000000000..466489fe0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/sys_windows.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+const (
+ // See ws2tcpip.h.
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+ sysIP_DONTFRAGMENT = 0xe
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0xf
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x10
+ sysIP_PKTINFO = 0x13
+
+ sysSizeofInetPktinfo = 0x8
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqSource = 0xc
+)
+
+type sysInetPktinfo struct {
+ Addr [4]byte
+ Ifindex int32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte
+ Interface [4]byte
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte
+ Sourceaddr [4]byte
+ Interface [4]byte
+}
+
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx
+var (
+ ctlOpts = [ctlMax]ctlOpt{}
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTOS: {sysIP_TOS, ssoTypeInt},
+ ssoTTL: {sysIP_TTL, ssoTypeInt},
+ ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt},
+ ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt},
+ ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+ ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+ }
+)
+
+func (pi *sysInetPktinfo) setIfindex(i int) {
+ pi.Ifindex = int32(i)
+}
diff --git a/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go
new file mode 100644
index 000000000..84f60bfb2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ sysGETSOCKOPT = 0xf
+ sysSETSOCKOPT = 0xe
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ if _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ if _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/syscall_unix.go b/vendor/golang.org/x/net/ipv4/syscall_unix.go
new file mode 100644
index 000000000..d952763f5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/syscall_unix.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!386 netbsd openbsd
+
+package ipv4
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv4/syscall_windows.go b/vendor/golang.org/x/net/ipv4/syscall_windows.go
new file mode 100644
index 000000000..0f42d22eb
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/syscall_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ return syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), (*int32)(unsafe.Pointer(l)))
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), int32(l))
+}
diff --git a/vendor/golang.org/x/net/ipv4/thunk_linux_386.s b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s
new file mode 100644
index 000000000..daa78bc02
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.2
+
+TEXT ·socketcall(SB),4,$0-36
+ JMP syscall·socketcall(SB)
diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go
new file mode 100644
index 000000000..9c632cd89
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/unicast_test.go
@@ -0,0 +1,246 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+func TestPacketConnReadWriteUnicastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ c, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+ wb := []byte("HELLO-R-U-THERE")
+
+ for i, toggle := range []bool{true, false, true} {
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ p.SetTTL(i + 1)
+ if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, err := p.WriteTo(wb, nil, dst); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Fatalf("got %v; want %v", rb[:n], wb)
+ }
+ }
+}
+
+func TestPacketConnReadWriteUnicastICMP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ dst, err := net.ResolveIPAddr("ip4", "127.0.0.1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ p := ipv4.NewPacketConn(c)
+ defer p.Close()
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+
+ for i, toggle := range []bool{true, false, true} {
+ wb, err := (&icmp.Message{
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ p.SetTTL(i + 1)
+ if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, err := p.WriteTo(wb, nil, dst); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ loop:
+ if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ } else {
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n])
+ if err != nil {
+ t.Fatal(err)
+ }
+ if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho {
+ // On Linux we must handle own sent packets.
+ goto loop
+ }
+ if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 {
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+}
+
+func TestRawConnReadWriteUnicastICMP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ dst, err := net.ResolveIPAddr("ip4", "127.0.0.1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
+
+ for i, toggle := range []bool{true, false, true} {
+ wb, err := (&icmp.Message{
+ Type: ipv4.ICMPTypeEcho, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wh := &ipv4.Header{
+ Version: ipv4.Version,
+ Len: ipv4.HeaderLen,
+ TOS: i + 1,
+ TotalLen: ipv4.HeaderLen + len(wb),
+ TTL: i + 1,
+ Protocol: 1,
+ Dst: dst.IP,
+ }
+ if err := r.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if err := r.WriteTo(wh, wb, nil); err != nil {
+ t.Fatal(err)
+ }
+ rb := make([]byte, ipv4.HeaderLen+128)
+ loop:
+ if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if _, b, _, err := r.ReadFrom(rb); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ } else {
+ m, err := icmp.ParseMessage(iana.ProtocolICMP, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho {
+ // On Linux we must handle own sent packets.
+ goto loop
+ }
+ if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 {
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go
new file mode 100644
index 000000000..25606f21d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go
@@ -0,0 +1,139 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv4"
+)
+
+func TestConnUnicastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ ln, err := net.Listen("tcp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+
+ done := make(chan bool)
+ go acceptor(t, ln, done)
+
+ c, err := net.Dial("tcp4", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ testUnicastSocketOptions(t, ipv4.NewConn(c))
+
+ <-done
+}
+
+var packetConnUnicastSocketOptionTests = []struct {
+ net, proto, addr string
+}{
+ {"udp4", "", "127.0.0.1:0"},
+ {"ip4", ":icmp", "127.0.0.1"},
+}
+
+func TestPacketConnUnicastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ m, ok := nettest.SupportsRawIPSocket()
+ for _, tt := range packetConnUnicastSocketOptionTests {
+ if tt.net == "ip4" && !ok {
+ t.Log(m)
+ continue
+ }
+ c, err := net.ListenPacket(tt.net+tt.proto, tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ testUnicastSocketOptions(t, ipv4.NewPacketConn(c))
+ }
+}
+
+func TestRawConnUnicastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ c, err := net.ListenPacket("ip4:icmp", "127.0.0.1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ r, err := ipv4.NewRawConn(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testUnicastSocketOptions(t, r)
+}
+
+type testIPv4UnicastConn interface {
+ TOS() (int, error)
+ SetTOS(int) error
+ TTL() (int, error)
+ SetTTL(int) error
+}
+
+func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) {
+ tos := iana.DiffServCS0 | iana.NotECNTransport
+ switch runtime.GOOS {
+ case "windows":
+ // IP_TOS option is supported on Windows 8 and beyond.
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ if err := c.SetTOS(tos); err != nil {
+ t.Fatal(err)
+ }
+ if v, err := c.TOS(); err != nil {
+ t.Fatal(err)
+ } else if v != tos {
+ t.Fatalf("got %v; want %v", v, tos)
+ }
+ const ttl = 255
+ if err := c.SetTTL(ttl); err != nil {
+ t.Fatal(err)
+ }
+ if v, err := c.TTL(); err != nil {
+ t.Fatal(err)
+ } else if v != ttl {
+ t.Fatalf("got %v; want %v", v, ttl)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go
new file mode 100644
index 000000000..087c63906
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go
@@ -0,0 +1,99 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_darwin.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_STRIPHDR = 0x17
+ sysIP_RECVTTL = 0x18
+ sysIP_BOUND_IF = 0x19
+ sysIP_PKTINFO = 0x1a
+ sysIP_RECVPKTINFO = 0x1a
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+ sysIP_MULTICAST_VIF = 0xe
+ sysIP_MULTICAST_IFINDEX = 0x42
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x46
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+ sysIP_BLOCK_SOURCE = 0x48
+ sysIP_UNBLOCK_SOURCE = 0x49
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type sysInetPktinfo struct {
+ Ifindex uint32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte /* in_addr */
+ Sourceaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [128]byte
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [128]byte
+ Pad_cgo_1 [128]byte
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go
new file mode 100644
index 000000000..f5c9ccec4
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go
@@ -0,0 +1,33 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_dragonfly.go
+
+// +build dragonfly
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_RECVTTL = 0x41
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_MULTICAST_VIF = 0xe
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+
+ sysSizeofIPMreq = 0x8
+)
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go
new file mode 100644
index 000000000..6fd67e1e9
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go
@@ -0,0 +1,93 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_SENDSRCADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_ONESBCAST = 0x17
+ sysIP_BINDANY = 0x18
+ sysIP_RECVTTL = 0x41
+ sysIP_MINTTL = 0x42
+ sysIP_DONTFRAG = 0x43
+ sysIP_RECVTOS = 0x44
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+ sysIP_MULTICAST_VIF = 0xe
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x46
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+ sysIP_BLOCK_SOURCE = 0x48
+ sysIP_UNBLOCK_SOURCE = 0x49
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte /* in_addr */
+ Sourceaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go
new file mode 100644
index 000000000..ebac6d792
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go
@@ -0,0 +1,95 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_SENDSRCADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_ONESBCAST = 0x17
+ sysIP_BINDANY = 0x18
+ sysIP_RECVTTL = 0x41
+ sysIP_MINTTL = 0x42
+ sysIP_DONTFRAG = 0x43
+ sysIP_RECVTOS = 0x44
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+ sysIP_MULTICAST_VIF = 0xe
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x46
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+ sysIP_BLOCK_SOURCE = 0x48
+ sysIP_UNBLOCK_SOURCE = 0x49
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte /* in_addr */
+ Sourceaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go
new file mode 100644
index 000000000..ebac6d792
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go
@@ -0,0 +1,95 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_SENDSRCADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_ONESBCAST = 0x17
+ sysIP_BINDANY = 0x18
+ sysIP_RECVTTL = 0x41
+ sysIP_MINTTL = 0x42
+ sysIP_DONTFRAG = 0x43
+ sysIP_RECVTOS = 0x44
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+ sysIP_MULTICAST_VIF = 0xe
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x46
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+ sysIP_BLOCK_SOURCE = 0x48
+ sysIP_UNBLOCK_SOURCE = 0x49
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte /* in_addr */
+ Sourceaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go
new file mode 100644
index 000000000..3733152a4
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go
new file mode 100644
index 000000000..afa451906
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go
new file mode 100644
index 000000000..3733152a4
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go
new file mode 100644
index 000000000..129a20ac6
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,arm64
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go
new file mode 100644
index 000000000..7ed9368f5
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,mips64
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go
new file mode 100644
index 000000000..19fadae62
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,mips64le
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go
new file mode 100644
index 000000000..15426beee
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]uint8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go
new file mode 100644
index 000000000..beaadd5f0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc64
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go
new file mode 100644
index 000000000..0eb262305
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc64le
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go
new file mode 100644
index 000000000..90fe99ebb
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go
@@ -0,0 +1,150 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,s390x
+
+package ipv4
+
+const (
+ sysIP_TOS = 0x1
+ sysIP_TTL = 0x2
+ sysIP_HDRINCL = 0x3
+ sysIP_OPTIONS = 0x4
+ sysIP_ROUTER_ALERT = 0x5
+ sysIP_RECVOPTS = 0x6
+ sysIP_RETOPTS = 0x7
+ sysIP_PKTINFO = 0x8
+ sysIP_PKTOPTIONS = 0x9
+ sysIP_MTU_DISCOVER = 0xa
+ sysIP_RECVERR = 0xb
+ sysIP_RECVTTL = 0xc
+ sysIP_RECVTOS = 0xd
+ sysIP_MTU = 0xe
+ sysIP_FREEBIND = 0xf
+ sysIP_TRANSPARENT = 0x13
+ sysIP_RECVRETOPTS = 0x7
+ sysIP_ORIGDSTADDR = 0x14
+ sysIP_RECVORIGDSTADDR = 0x14
+ sysIP_MINTTL = 0x15
+ sysIP_NODEFRAG = 0x16
+ sysIP_UNICAST_IF = 0x32
+
+ sysIP_MULTICAST_IF = 0x20
+ sysIP_MULTICAST_TTL = 0x21
+ sysIP_MULTICAST_LOOP = 0x22
+ sysIP_ADD_MEMBERSHIP = 0x23
+ sysIP_DROP_MEMBERSHIP = 0x24
+ sysIP_UNBLOCK_SOURCE = 0x25
+ sysIP_BLOCK_SOURCE = 0x26
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+ sysIP_MSFILTER = 0x29
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIP_MULTICAST_ALL = 0x31
+
+ sysICMP_FILTER = 0x1
+
+ sysSO_EE_ORIGIN_NONE = 0x0
+ sysSO_EE_ORIGIN_LOCAL = 0x1
+ sysSO_EE_ORIGIN_ICMP = 0x2
+ sysSO_EE_ORIGIN_ICMP6 = 0x3
+ sysSO_EE_ORIGIN_TXSTATUS = 0x4
+ sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet = 0x10
+ sysSizeofInetPktinfo = 0xc
+ sysSizeofSockExtendedErr = 0x10
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqn = 0xc
+ sysSizeofIPMreqSource = 0xc
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPFilter = 0x4
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ X__pad [8]uint8
+}
+
+type sysInetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysSockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type sysIPMreqSource struct {
+ Multiaddr uint32
+ Interface uint32
+ Sourceaddr uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPFilter struct {
+ Data uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go
new file mode 100644
index 000000000..8a440eb65
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go
@@ -0,0 +1,30 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_netbsd.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x14
+ sysIP_RECVTTL = 0x17
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+
+ sysSizeofIPMreq = 0x8
+)
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go
new file mode 100644
index 000000000..fd522b573
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go
@@ -0,0 +1,30 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_openbsd.go
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x1e
+ sysIP_RECVTTL = 0x1f
+
+ sysIP_MULTICAST_IF = 0x9
+ sysIP_MULTICAST_TTL = 0xa
+ sysIP_MULTICAST_LOOP = 0xb
+ sysIP_ADD_MEMBERSHIP = 0xc
+ sysIP_DROP_MEMBERSHIP = 0xd
+
+ sysSizeofIPMreq = 0x8
+)
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go
new file mode 100644
index 000000000..d7c23349a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go
@@ -0,0 +1,60 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_solaris.go
+
+// +build solaris
+
+package ipv4
+
+const (
+ sysIP_OPTIONS = 0x1
+ sysIP_HDRINCL = 0x2
+ sysIP_TOS = 0x3
+ sysIP_TTL = 0x4
+ sysIP_RECVOPTS = 0x5
+ sysIP_RECVRETOPTS = 0x6
+ sysIP_RECVDSTADDR = 0x7
+ sysIP_RETOPTS = 0x8
+ sysIP_RECVIF = 0x9
+ sysIP_RECVSLLA = 0xa
+ sysIP_RECVTTL = 0xb
+ sysIP_NEXTHOP = 0x19
+ sysIP_PKTINFO = 0x1a
+ sysIP_RECVPKTINFO = 0x1a
+ sysIP_DONTFRAG = 0x1b
+ sysIP_BOUND_IF = 0x41
+ sysIP_UNSPEC_SRC = 0x42
+ sysIP_BROADCAST_TTL = 0x43
+ sysIP_DHCPINIT_IF = 0x45
+
+ sysIP_MULTICAST_IF = 0x10
+ sysIP_MULTICAST_TTL = 0x11
+ sysIP_MULTICAST_LOOP = 0x12
+ sysIP_ADD_MEMBERSHIP = 0x13
+ sysIP_DROP_MEMBERSHIP = 0x14
+ sysIP_BLOCK_SOURCE = 0x15
+ sysIP_UNBLOCK_SOURCE = 0x16
+ sysIP_ADD_SOURCE_MEMBERSHIP = 0x17
+ sysIP_DROP_SOURCE_MEMBERSHIP = 0x18
+
+ sysSizeofInetPktinfo = 0xc
+
+ sysSizeofIPMreq = 0x8
+ sysSizeofIPMreqSource = 0xc
+)
+
+type sysInetPktinfo struct {
+ Ifindex uint32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type sysIPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type sysIPMreqSource struct {
+ Multiaddr [4]byte /* in_addr */
+ Sourceaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go
new file mode 100644
index 000000000..03d478dc0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/bpf_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/ipv6"
+)
+
+func TestBPF(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ l, err := net.ListenPacket("udp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ p := ipv6.NewPacketConn(l)
+
+ // This filter accepts UDP packets whose first payload byte is
+ // even.
+ prog, err := bpf.Assemble([]bpf.Instruction{
+ // Load the first byte of the payload (skipping UDP header).
+ bpf.LoadAbsolute{Off: 8, Size: 1},
+ // Select LSB of the byte.
+ bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1},
+ // Byte is even?
+ bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1},
+ // Accept.
+ bpf.RetConstant{Val: 4096},
+ // Ignore.
+ bpf.RetConstant{Val: 0},
+ })
+ if err != nil {
+ t.Fatalf("compiling BPF: %s", err)
+ }
+
+ if err = p.SetBPF(prog); err != nil {
+ t.Fatalf("attaching filter to Conn: %s", err)
+ }
+
+ s, err := net.Dial("udp6", l.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+ go func() {
+ for i := byte(0); i < 10; i++ {
+ s.Write([]byte{i})
+ }
+ }()
+
+ l.SetDeadline(time.Now().Add(2 * time.Second))
+ seen := make([]bool, 5)
+ for {
+ var b [512]byte
+ n, _, err := l.ReadFrom(b[:])
+ if err != nil {
+ t.Fatalf("reading from listener: %s", err)
+ }
+ if n != 1 {
+ t.Fatalf("unexpected packet length, want 1, got %d", n)
+ }
+ if b[0] >= 10 {
+ t.Fatalf("unexpected byte, want 0-9, got %d", b[0])
+ }
+ if b[0]%2 != 0 {
+ t.Fatalf("got odd byte %d, wanted only even bytes", b[0])
+ }
+ seen[b[0]/2] = true
+
+ seenAll := true
+ for _, v := range seen {
+ if !v {
+ seenAll = false
+ break
+ }
+ }
+ if seenAll {
+ break
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/bpfopt_linux.go b/vendor/golang.org/x/net/ipv6/bpfopt_linux.go
new file mode 100644
index 000000000..328427cba
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/bpfopt_linux.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "os"
+ "unsafe"
+
+ "golang.org/x/net/bpf"
+ "golang.org/x/net/internal/netreflect"
+)
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ prog := sysSockFProg{
+ Len: uint16(len(filter)),
+ Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])),
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog))))
+}
diff --git a/vendor/golang.org/x/net/ipv6/bpfopt_stub.go b/vendor/golang.org/x/net/ipv6/bpfopt_stub.go
new file mode 100644
index 000000000..2e4de5f0d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/bpfopt_stub.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv6
+
+import "golang.org/x/net/bpf"
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go
new file mode 100644
index 000000000..b7362aae7
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control.go
@@ -0,0 +1,85 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "fmt"
+ "net"
+ "sync"
+)
+
+// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the
+// former still support RFC 2292 only. Please be aware that almost
+// all protocol implementations prohibit using a combination of RFC
+// 2292 and RFC 3542 for some practical reasons.
+
+type rawOpt struct {
+ sync.RWMutex
+ cflags ControlFlags
+}
+
+func (c *rawOpt) set(f ControlFlags) { c.cflags |= f }
+func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f }
+func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }
+
+// A ControlFlags represents per packet basis IP-level socket option
+// control flags.
+type ControlFlags uint
+
+const (
+ FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet
+ FlagHopLimit // pass the hop limit on the received packet
+ FlagSrc // pass the source address on the received packet
+ FlagDst // pass the destination address on the received packet
+ FlagInterface // pass the interface index on the received packet
+ FlagPathMTU // pass the path MTU on the received packet path
+)
+
+const flagPacketInfo = FlagDst | FlagInterface
+
+// A ControlMessage represents per packet basis IP-level socket
+// options.
+type ControlMessage struct {
+ // Receiving socket options: SetControlMessage allows to
+ // receive the options from the protocol stack using ReadFrom
+ // method of PacketConn.
+ //
+ // Specifying socket options: ControlMessage for WriteTo
+ // method of PacketConn allows to send the options to the
+ // protocol stack.
+ //
+ TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying
+ HopLimit int // hop limit, must be 1 <= value <= 255 when specifying
+ Src net.IP // source address, specifying only
+ Dst net.IP // destination address, receiving only
+ IfIndex int // interface index, must be 1 <= value when specifying
+ NextHop net.IP // next hop address, specifying only
+ MTU int // path MTU, receiving only
+}
+
+func (cm *ControlMessage) String() string {
+ if cm == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU)
+}
+
+// Ancillary data socket options
+const (
+ ctlTrafficClass = iota // header field
+ ctlHopLimit // header field
+ ctlPacketInfo // inbound or outbound packet path
+ ctlNextHop // nexthop
+ ctlPathMTU // path mtu
+ ctlMax
+)
+
+// A ctlOpt represents a binding for ancillary data socket option.
+type ctlOpt struct {
+ name int // option name, must be equal or greater than 1
+ length int // option length
+ marshal func([]byte, *ControlMessage) []byte
+ parse func(*ControlMessage, []byte)
+}
diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go
new file mode 100644
index 000000000..80ec2e2f0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go
@@ -0,0 +1,55 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package ipv6
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_2292HOPLIMIT
+ m.SetLen(syscall.CmsgLen(4))
+ if cm != nil {
+ data := b[syscall.CmsgLen(0):]
+ nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit))
+ }
+ return b[syscall.CmsgSpace(4):]
+}
+
+func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_2292PKTINFO
+ m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo))
+ if cm != nil {
+ pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+ if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
+ copy(pi.Addr[:], ip)
+ }
+ if cm.IfIndex > 0 {
+ pi.setIfindex(cm.IfIndex)
+ }
+ }
+ return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):]
+}
+
+func marshal2292NextHop(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_2292NEXTHOP
+ m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6))
+ if cm != nil {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+ sa.setSockaddr(cm.NextHop, cm.IfIndex)
+ }
+ return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):]
+}
diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go
new file mode 100644
index 000000000..f344d16d0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package ipv6
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func marshalTrafficClass(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_TCLASS
+ m.SetLen(syscall.CmsgLen(4))
+ if cm != nil {
+ data := b[syscall.CmsgLen(0):]
+ nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass))
+ }
+ return b[syscall.CmsgSpace(4):]
+}
+
+func parseTrafficClass(cm *ControlMessage, b []byte) {
+ cm.TrafficClass = int(nativeEndian.Uint32(b[:4]))
+}
+
+func marshalHopLimit(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_HOPLIMIT
+ m.SetLen(syscall.CmsgLen(4))
+ if cm != nil {
+ data := b[syscall.CmsgLen(0):]
+ nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit))
+ }
+ return b[syscall.CmsgSpace(4):]
+}
+
+func parseHopLimit(cm *ControlMessage, b []byte) {
+ cm.HopLimit = int(nativeEndian.Uint32(b[:4]))
+}
+
+func marshalPacketInfo(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_PKTINFO
+ m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo))
+ if cm != nil {
+ pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+ if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
+ copy(pi.Addr[:], ip)
+ }
+ if cm.IfIndex > 0 {
+ pi.setIfindex(cm.IfIndex)
+ }
+ }
+ return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):]
+}
+
+func parsePacketInfo(cm *ControlMessage, b []byte) {
+ pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0]))
+ cm.Dst = pi.Addr[:]
+ cm.IfIndex = int(pi.Ifindex)
+}
+
+func marshalNextHop(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_NEXTHOP
+ m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6))
+ if cm != nil {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+ sa.setSockaddr(cm.NextHop, cm.IfIndex)
+ }
+ return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):]
+}
+
+func parseNextHop(cm *ControlMessage, b []byte) {
+}
+
+func marshalPathMTU(b []byte, cm *ControlMessage) []byte {
+ m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ m.Level = iana.ProtocolIPv6
+ m.Type = sysIPV6_PATHMTU
+ m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo))
+ return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):]
+}
+
+func parsePathMTU(cm *ControlMessage, b []byte) {
+ mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0]))
+ cm.Dst = mi.Addr.Addr[:]
+ cm.IfIndex = int(mi.Addr.Scope_id)
+ cm.MTU = int(mi.Mtu)
+}
diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go
new file mode 100644
index 000000000..952b2bd2a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv6
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ return errOpNoSupport
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+ return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ return nil, errOpNoSupport
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go
new file mode 100644
index 000000000..46fbdcbe3
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control_unix.go
@@ -0,0 +1,166 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package ipv6
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/net/internal/iana"
+)
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ opt.Lock()
+ defer opt.Unlock()
+ if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagTrafficClass)
+ } else {
+ opt.clear(FlagTrafficClass)
+ }
+ }
+ if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagHopLimit)
+ } else {
+ opt.clear(FlagHopLimit)
+ }
+ }
+ if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(cf & flagPacketInfo)
+ } else {
+ opt.clear(cf & flagPacketInfo)
+ }
+ }
+ if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 {
+ if err := setInt(s, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil {
+ return err
+ }
+ if on {
+ opt.set(FlagPathMTU)
+ } else {
+ opt.clear(FlagPathMTU)
+ }
+ }
+ return nil
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+ opt.RLock()
+ var l int
+ if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length)
+ }
+ if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length)
+ }
+ if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+ }
+ if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 {
+ l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length)
+ }
+ if l > 0 {
+ oob = make([]byte, l)
+ b := oob
+ if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 {
+ b = ctlOpts[ctlTrafficClass].marshal(b, nil)
+ }
+ if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 {
+ b = ctlOpts[ctlHopLimit].marshal(b, nil)
+ }
+ if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 {
+ b = ctlOpts[ctlPacketInfo].marshal(b, nil)
+ }
+ if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 {
+ b = ctlOpts[ctlPathMTU].marshal(b, nil)
+ }
+ }
+ opt.RUnlock()
+ return
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ if len(b) == 0 {
+ return nil, nil
+ }
+ cmsgs, err := syscall.ParseSocketControlMessage(b)
+ if err != nil {
+ return nil, os.NewSyscallError("parse socket control message", err)
+ }
+ cm := &ControlMessage{}
+ for _, m := range cmsgs {
+ if m.Header.Level != iana.ProtocolIPv6 {
+ continue
+ }
+ switch int(m.Header.Type) {
+ case ctlOpts[ctlTrafficClass].name:
+ ctlOpts[ctlTrafficClass].parse(cm, m.Data[:])
+ case ctlOpts[ctlHopLimit].name:
+ ctlOpts[ctlHopLimit].parse(cm, m.Data[:])
+ case ctlOpts[ctlPacketInfo].name:
+ ctlOpts[ctlPacketInfo].parse(cm, m.Data[:])
+ case ctlOpts[ctlPathMTU].name:
+ ctlOpts[ctlPathMTU].parse(cm, m.Data[:])
+ }
+ }
+ return cm, nil
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+ if cm == nil {
+ return
+ }
+ var l int
+ tclass := false
+ if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 {
+ tclass = true
+ l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length)
+ }
+ hoplimit := false
+ if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 {
+ hoplimit = true
+ l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length)
+ }
+ pktinfo := false
+ if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) {
+ pktinfo = true
+ l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+ }
+ nexthop := false
+ if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil {
+ nexthop = true
+ l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length)
+ }
+ if l > 0 {
+ oob = make([]byte, l)
+ b := oob
+ if tclass {
+ b = ctlOpts[ctlTrafficClass].marshal(b, cm)
+ }
+ if hoplimit {
+ b = ctlOpts[ctlHopLimit].marshal(b, cm)
+ }
+ if pktinfo {
+ b = ctlOpts[ctlPacketInfo].marshal(b, cm)
+ }
+ if nexthop {
+ b = ctlOpts[ctlNextHop].marshal(b, cm)
+ }
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go
new file mode 100644
index 000000000..2773a5204
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/control_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "syscall"
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+ // TODO(mikio): implement this
+ return syscall.EWINDOWS
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+ // TODO(mikio): implement this
+ return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+ // TODO(mikio): implement this
+ return nil, syscall.EWINDOWS
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+ // TODO(mikio): implement this
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go
new file mode 100644
index 000000000..4c7f476a8
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go
@@ -0,0 +1,112 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#define __APPLE_USE_RFC_3542
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+
+ sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
+ sysICMP6_FILTER = C.ICMP6_FILTER
+ sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
+ sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
+ sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP
+ sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
+ sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
+ sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
+
+ sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+
+ sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+ sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+ sysIPV6_MSFILTER = C.IPV6_MSFILTER
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+
+ sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
+
+ sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+ sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
+ sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
+
+ sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrStorage C.struct_sockaddr_storage
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysICMPv6Filter C.struct_icmp6_filter
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go
new file mode 100644
index 000000000..c72487ceb
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go
@@ -0,0 +1,84 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+ sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
+ sysICMP6_FILTER = C.ICMP6_FILTER
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+ sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+ sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+ sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+ sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
+ sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
+
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysICMPv6Filter C.struct_icmp6_filter
diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go
new file mode 100644
index 000000000..de199ec6a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go
@@ -0,0 +1,105 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+ sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
+ sysICMP6_FILTER = C.ICMP6_FILTER
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+ sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+ sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+ sysIPV6_BINDANY = C.IPV6_BINDANY
+
+ sysIPV6_MSFILTER = C.IPV6_MSFILTER
+
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+
+ sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+ sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
+ sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
+
+ sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrStorage C.struct_sockaddr_storage
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
+
+type sysICMPv6Filter C.struct_icmp6_filter
diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go
new file mode 100644
index 000000000..664305d8b
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_linux.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/filter.h>
+#include <sys/socket.h>
+*/
+import "C"
+
+const (
+ sysIPV6_ADDRFORM = C.IPV6_ADDRFORM
+ sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
+ sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
+ sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
+ sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
+ sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_FLOWINFO = C.IPV6_FLOWINFO
+
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP
+ sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP
+ sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
+ sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
+ sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
+ sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+ sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
+ sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
+ sysMCAST_MSFILTER = C.MCAST_MSFILTER
+ sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT
+ sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER
+ sysIPV6_MTU = C.IPV6_MTU
+ sysIPV6_RECVERR = C.IPV6_RECVERR
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+ sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST
+ sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST
+
+ //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT
+ //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT
+ //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO
+ //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE
+ //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE
+ //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT
+
+ sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR
+ sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND
+
+ sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+ sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+
+ sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES
+
+ sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP
+ sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT
+ sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA
+ sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME
+ sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA
+ sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
+
+ sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT
+
+ sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR
+ sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR
+ sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT
+ sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF
+
+ sysICMPV6_FILTER = C.ICMPV6_FILTER
+
+ sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK
+ sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS
+ sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS
+ sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY
+
+ sysSOL_SOCKET = C.SOL_SOCKET
+ sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
+
+ sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+ sysSizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ sysSizeofGroupReq = C.sizeof_struct_group_req
+ sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6FlowlabelReq C.struct_in6_flowlabel_req
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysGroupReq C.struct_group_req
+
+type sysGroupSourceReq C.struct_group_source_req
+
+type sysICMPv6Filter C.struct_icmp6_filter
+
+type sysSockFProg C.struct_sock_fprog
+
+type sysSockFilter C.struct_sock_filter
diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go
new file mode 100644
index 000000000..7bd09e8e8
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go
@@ -0,0 +1,80 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+ sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
+ sysICMP6_FILTER = C.ICMP6_FILTER
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+ sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+ sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
+ sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
+
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysICMPv6Filter C.struct_icmp6_filter
diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go
new file mode 100644
index 000000000..6796d9b2f
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+ sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
+ sysICMP6_FILTER = C.ICMP6_FILTER
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+
+ sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL
+ sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL
+ sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL
+ sysIPSEC6_OUTSA = C.IPSEC6_OUTSA
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+ sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+ sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL
+
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+ sysIPV6_PIPEX = C.IPV6_PIPEX
+
+ sysIPV6_RTABLE = C.IPV6_RTABLE
+
+ sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+ sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
+ sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
+
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysICMPv6Filter C.struct_icmp6_filter
diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go
new file mode 100644
index 000000000..972b17126
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go
@@ -0,0 +1,96 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+ sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
+ sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
+ sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+ sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+ sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
+ sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
+
+ sysIPV6_PKTINFO = C.IPV6_PKTINFO
+
+ sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+ sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
+ sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
+ sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
+
+ sysIPV6_RTHDR = C.IPV6_RTHDR
+ sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+ sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+ sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+ sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
+
+ sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+
+ sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS
+
+ sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+ sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+ sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+ sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+ sysIPV6_SEC_OPT = C.IPV6_SEC_OPT
+ sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES
+ sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+ sysIPV6_PATHMTU = C.IPV6_PATHMTU
+ sysIPV6_TCLASS = C.IPV6_TCLASS
+ sysIPV6_V6ONLY = C.IPV6_V6ONLY
+
+ sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+ sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME
+ sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA
+ sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
+ sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP
+ sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
+ sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA
+
+ sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK
+ sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT
+ sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK
+ sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT
+ sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK
+ sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT
+
+ sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK
+
+ sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT
+
+ sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
+ sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC
+
+ sysICMP6_FILTER = C.ICMP6_FILTER
+
+ sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
+
+ sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+ sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sysSockaddrInet6 C.struct_sockaddr_in6
+
+type sysInet6Pktinfo C.struct_in6_pktinfo
+
+type sysIPv6Mtuinfo C.struct_ip6_mtuinfo
+
+type sysIPv6Mreq C.struct_ipv6_mreq
+
+type sysICMPv6Filter C.struct_icmp6_filter
diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go
new file mode 100644
index 000000000..4ea7bc2d6
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go
@@ -0,0 +1,290 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// MulticastHopLimit returns the hop limit field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastHopLimit() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoMulticastHopLimit])
+}
+
+// SetMulticastHopLimit sets the hop limit field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoMulticastHopLimit], hoplim)
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return nil, err
+ }
+ return getInterface(s, &sockOpts[ssoMulticastInterface])
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInterface(s, &sockOpts[ssoMulticastInterface], ifi)
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+ if !c.ok() {
+ return false, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return false, err
+ }
+ on, err := getInt(s, &sockOpts[ssoMulticastLoopback])
+ if err != nil {
+ return false, err
+ }
+ return on == 1, nil
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoMulticastLoopback], boolint(on))
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ return setGroup(s, &sockOpts[ssoJoinGroup], ifi, grp)
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ return setGroup(s, &sockOpts[ssoLeaveGroup], ifi, grp)
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP16(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoJoinSourceGroup], ifi, grp, src)
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP16(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src)
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP16(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoBlockSourceGroup], ifi, grp, src)
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ grp := netAddrToIP16(group)
+ if grp == nil {
+ return errMissingAddress
+ }
+ src := netAddrToIP16(source)
+ if src == nil {
+ return errMissingAddress
+ }
+ return setSourceGroup(s, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src)
+}
+
+// Checksum reports whether the kernel will compute, store or verify a
+// checksum for both incoming and outgoing packets. If on is true, it
+// returns an offset in bytes into the data of where the checksum
+// field is located.
+func (c *dgramOpt) Checksum() (on bool, offset int, err error) {
+ if !c.ok() {
+ return false, 0, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return false, 0, err
+ }
+ offset, err = getInt(s, &sockOpts[ssoChecksum])
+ if err != nil {
+ return false, 0, err
+ }
+ if offset < 0 {
+ return false, 0, nil
+ }
+ return true, offset, nil
+}
+
+// SetChecksum enables the kernel checksum processing. If on is ture,
+// the offset should be an offset in bytes into the data of where the
+// checksum field is located.
+func (c *dgramOpt) SetChecksum(on bool, offset int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ if !on {
+ offset = -1
+ }
+ return setInt(s, &sockOpts[ssoChecksum], offset)
+}
+
+// ICMPFilter returns an ICMP filter.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return nil, err
+ }
+ return getICMPFilter(s, &sockOpts[ssoICMPFilter])
+}
+
+// SetICMPFilter deploys the ICMP filter.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setICMPFilter(s, &sockOpts[ssoICMPFilter], f)
+}
diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go
new file mode 100644
index 000000000..fb067fb2f
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go
@@ -0,0 +1,119 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv6
+
+import "net"
+
+// MulticastHopLimit returns the hop limit field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastHopLimit() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetMulticastHopLimit sets the hop limit field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error {
+ return errOpNoSupport
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+ return nil, errOpNoSupport
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+ return errOpNoSupport
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+ return false, errOpNoSupport
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+ return errOpNoSupport
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+ return errOpNoSupport
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+ return errOpNoSupport
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+ return errOpNoSupport
+}
+
+// Checksum reports whether the kernel will compute, store or verify a
+// checksum for both incoming and outgoing packets. If on is true, it
+// returns an offset in bytes into the data of where the checksum
+// field is located.
+func (c *dgramOpt) Checksum() (on bool, offset int, err error) {
+ return false, 0, errOpNoSupport
+}
+
+// SetChecksum enables the kernel checksum processing. If on is ture,
+// the offset should be an offset in bytes into the data of where the
+// checksum field is located.
+func (c *dgramOpt) SetChecksum(on bool, offset int) error {
+ return errOpNoSupport
+}
+
+// ICMPFilter returns an ICMP filter.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+ return nil, errOpNoSupport
+}
+
+// SetICMPFilter deploys the ICMP filter.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go
new file mode 100644
index 000000000..7d75698fb
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ipv6 implements IP-level socket options for the Internet
+// Protocol version 6.
+//
+// The package provides IP-level socket options that allow
+// manipulation of IPv6 facilities.
+//
+// The IPv6 protocol is defined in RFC 2460.
+// Basic and advanced socket interface extensions are defined in RFC
+// 3493 and RFC 3542.
+// Socket interface extensions for multicast source filters are
+// defined in RFC 3678.
+// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810.
+// Source-specific multicast is defined in RFC 4607.
+//
+//
+// Unicasting
+//
+// The options for unicasting are available for net.TCPConn,
+// net.UDPConn and net.IPConn which are created as network connections
+// that use the IPv6 transport. When a single TCP connection carrying
+// a data flow of multiple packets needs to indicate the flow is
+// important, ipv6.Conn is used to set the traffic class field on the
+// IPv6 header for each packet.
+//
+// ln, err := net.Listen("tcp6", "[::]:1024")
+// if err != nil {
+// // error handling
+// }
+// defer ln.Close()
+// for {
+// c, err := ln.Accept()
+// if err != nil {
+// // error handling
+// }
+// go func(c net.Conn) {
+// defer c.Close()
+//
+// The outgoing packets will be labeled DiffServ assured forwarding
+// class 1 low drop precedence, known as AF11 packets.
+//
+// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil {
+// // error handling
+// }
+// if _, err := c.Write(data); err != nil {
+// // error handling
+// }
+// }(c)
+// }
+//
+//
+// Multicasting
+//
+// The options for multicasting are available for net.UDPConn and
+// net.IPconn which are created as network connections that use the
+// IPv6 transport. A few network facilities must be prepared before
+// you begin multicasting, at a minimum joining network interfaces and
+// multicast groups.
+//
+// en0, err := net.InterfaceByName("en0")
+// if err != nil {
+// // error handling
+// }
+// en1, err := net.InterfaceByIndex(911)
+// if err != nil {
+// // error handling
+// }
+// group := net.ParseIP("ff02::114")
+//
+// First, an application listens to an appropriate address with an
+// appropriate service port.
+//
+// c, err := net.ListenPacket("udp6", "[::]:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c.Close()
+//
+// Second, the application joins multicast groups, starts listening to
+// the groups on the specified network interfaces. Note that the
+// service port for transport layer protocol does not matter with this
+// operation as joining groups affects only network and link layer
+// protocols, such as IPv6 and Ethernet.
+//
+// p := ipv6.NewPacketConn(c)
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {
+// // error handling
+// }
+//
+// The application might set per packet control message transmissions
+// between the protocol stack within the kernel. When the application
+// needs a destination address on an incoming packet,
+// SetControlMessage of ipv6.PacketConn is used to enable control
+// message transmissions.
+//
+// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil {
+// // error handling
+// }
+//
+// The application could identify whether the received packets are
+// of interest by using the control message that contains the
+// destination address of the received packet.
+//
+// b := make([]byte, 1500)
+// for {
+// n, rcm, src, err := p.ReadFrom(b)
+// if err != nil {
+// // error handling
+// }
+// if rcm.Dst.IsMulticast() {
+// if rcm.Dst.Equal(group) {
+// // joined group, do something
+// } else {
+// // unknown group, discard
+// continue
+// }
+// }
+//
+// The application can also send both unicast and multicast packets.
+//
+// p.SetTrafficClass(0x0)
+// p.SetHopLimit(16)
+// if _, err := p.WriteTo(data[:n], nil, src); err != nil {
+// // error handling
+// }
+// dst := &net.UDPAddr{IP: group, Port: 1024}
+// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1}
+// for _, ifi := range []*net.Interface{en0, en1} {
+// wcm.IfIndex = ifi.Index
+// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil {
+// // error handling
+// }
+// }
+// }
+//
+//
+// More multicasting
+//
+// An application that uses PacketConn may join multiple multicast
+// groups. For example, a UDP listener with port 1024 might join two
+// different groups across over two different network interfaces by
+// using:
+//
+// c, err := net.ListenPacket("udp6", "[::]:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c.Close()
+// p := ipv6.NewPacketConn(c)
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil {
+// // error handling
+// }
+//
+// It is possible for multiple UDP listeners that listen on the same
+// UDP port to join the same multicast group. The net package will
+// provide a socket that listens to a wildcard address with reusable
+// UDP port when an appropriate multicast address prefix is passed to
+// the net.ListenPacket or net.ListenUDP.
+//
+// c1, err := net.ListenPacket("udp6", "[ff02::]:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c1.Close()
+// c2, err := net.ListenPacket("udp6", "[ff02::]:1024")
+// if err != nil {
+// // error handling
+// }
+// defer c2.Close()
+// p1 := ipv6.NewPacketConn(c1)
+// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+// // error handling
+// }
+// p2 := ipv6.NewPacketConn(c2)
+// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+// // error handling
+// }
+//
+// Also it is possible for the application to leave or rejoin a
+// multicast group on the network interface.
+//
+// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+// // error handling
+// }
+// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil {
+// // error handling
+// }
+//
+//
+// Source-specific multicasting
+//
+// An application that uses PacketConn on MLDv2 supported platform is
+// able to join source-specific multicast groups.
+// The application may use JoinSourceSpecificGroup and
+// LeaveSourceSpecificGroup for the operation known as "include" mode,
+//
+// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")}
+// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")}
+// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+// // error handling
+// }
+// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+// // error handling
+// }
+//
+// or JoinGroup, ExcludeSourceSpecificGroup,
+// IncludeSourceSpecificGroup and LeaveGroup for the operation known
+// as "exclude" mode.
+//
+// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")}
+// if err := p.JoinGroup(en0, &ssmgroup); err != nil {
+// // error handling
+// }
+// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {
+// // error handling
+// }
+// if err := p.LeaveGroup(en0, &ssmgroup); err != nil {
+// // error handling
+// }
+//
+// Note that it depends on each platform implementation what happens
+// when an application which runs on MLDv2 unsupported platform uses
+// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.
+// In general the platform tries to fall back to conversations using
+// MLDv1 and starts to listen to multicast traffic.
+// In the fallback case, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup may return an error.
+package ipv6 // import "golang.org/x/net/ipv6"
diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go
new file mode 100644
index 000000000..60e7d93c2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/endpoint.go
@@ -0,0 +1,125 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// A Conn represents a network endpoint that uses IPv6 transport.
+// It allows to set basic IP-level socket options such as traffic
+// class and hop limit.
+type Conn struct {
+ genericOpt
+}
+
+type genericOpt struct {
+ net.Conn
+}
+
+func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }
+
+// PathMTU returns a path MTU value for the destination associated
+// with the endpoint.
+func (c *Conn) PathMTU() (int, error) {
+ if !c.genericOpt.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.genericOpt.Conn)
+ if err != nil {
+ return 0, err
+ }
+ _, mtu, err := getMTUInfo(s, &sockOpts[ssoPathMTU])
+ if err != nil {
+ return 0, err
+ }
+ return mtu, nil
+}
+
+// NewConn returns a new Conn.
+func NewConn(c net.Conn) *Conn {
+ return &Conn{
+ genericOpt: genericOpt{Conn: c},
+ }
+}
+
+// A PacketConn represents a packet network endpoint that uses IPv6
+// transport. It is used to control several IP-level socket options
+// including IPv6 header manipulation. It also provides datagram
+// based network I/O methods specific to the IPv6 and higher layer
+// protocols such as OSPF, GRE, and UDP.
+type PacketConn struct {
+ genericOpt
+ dgramOpt
+ payloadHandler
+}
+
+type dgramOpt struct {
+ net.PacketConn
+}
+
+func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil }
+
+// SetControlMessage allows to receive the per packet basis IP-level
+// socket options.
+func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+ if err != nil {
+ return err
+ }
+ return setControlMessage(s, &c.payloadHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+ if !c.payloadHandler.ok() {
+ return syscall.EINVAL
+ }
+ return c.payloadHandler.Close()
+}
+
+// NewPacketConn returns a new PacketConn using c as its underlying
+// transport.
+func NewPacketConn(c net.PacketConn) *PacketConn {
+ return &PacketConn{
+ genericOpt: genericOpt{Conn: c.(net.Conn)},
+ dgramOpt: dgramOpt{PacketConn: c},
+ payloadHandler: payloadHandler{PacketConn: c},
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go
new file mode 100644
index 000000000..e761aa2a1
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/example_test.go
@@ -0,0 +1,216 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/ipv6"
+)
+
+func ExampleConn_markingTCP() {
+ ln, err := net.Listen("tcp", "[::]:1024")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer ln.Close()
+
+ for {
+ c, err := ln.Accept()
+ if err != nil {
+ log.Fatal(err)
+ }
+ go func(c net.Conn) {
+ defer c.Close()
+ if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil {
+ p := ipv6.NewConn(c)
+ if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11
+ log.Fatal(err)
+ }
+ if err := p.SetHopLimit(128); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil {
+ log.Fatal(err)
+ }
+ }(c)
+ }
+}
+
+func ExamplePacketConn_servingOneShotMulticastDNS() {
+ c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+
+ en0, err := net.InterfaceByName("en0")
+ if err != nil {
+ log.Fatal(err)
+ }
+ mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")}
+ if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil {
+ log.Fatal(err)
+ }
+ defer p.LeaveGroup(en0, &mDNSLinkLocal)
+ if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil {
+ log.Fatal(err)
+ }
+
+ var wcm ipv6.ControlMessage
+ b := make([]byte, 1500)
+ for {
+ _, rcm, peer, err := p.ReadFrom(b)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) {
+ continue
+ }
+ wcm.IfIndex = rcm.IfIndex
+ answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this
+ if _, err := p.WriteTo(answers, &wcm, peer); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func ExamplePacketConn_tracingIPPacketRoute() {
+ // Tracing an IP packet route to www.google.com.
+
+ const host = "www.google.com"
+ ips, err := net.LookupIP(host)
+ if err != nil {
+ log.Fatal(err)
+ }
+ var dst net.IPAddr
+ for _, ip := range ips {
+ if ip.To16() != nil && ip.To4() == nil {
+ dst.IP = ip
+ fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host)
+ break
+ }
+ }
+ if dst.IP == nil {
+ log.Fatal("no AAAA record found")
+ }
+
+ c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+
+ if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil {
+ log.Fatal(err)
+ }
+ wm := icmp.Message{
+ Type: ipv6.ICMPTypeEchoRequest, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }
+ var f ipv6.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv6.ICMPTypeTimeExceeded)
+ f.Accept(ipv6.ICMPTypeEchoReply)
+ if err := p.SetICMPFilter(&f); err != nil {
+ log.Fatal(err)
+ }
+
+ var wcm ipv6.ControlMessage
+ rb := make([]byte, 1500)
+ for i := 1; i <= 64; i++ { // up to 64 hops
+ wm.Body.(*icmp.Echo).Seq = i
+ wb, err := wm.Marshal(nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // In the real world usually there are several
+ // multiple traffic-engineered paths for each hop.
+ // You may need to probe a few times to each hop.
+ begin := time.Now()
+ wcm.HopLimit = i
+ if _, err := p.WriteTo(wb, &wcm, &dst); err != nil {
+ log.Fatal(err)
+ }
+ if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {
+ log.Fatal(err)
+ }
+ n, rcm, peer, err := p.ReadFrom(rb)
+ if err != nil {
+ if err, ok := err.(net.Error); ok && err.Timeout() {
+ fmt.Printf("%v\t*\n", i)
+ continue
+ }
+ log.Fatal(err)
+ }
+ rm, err := icmp.ParseMessage(58, rb[:n])
+ if err != nil {
+ log.Fatal(err)
+ }
+ rtt := time.Since(begin)
+
+ // In the real world you need to determine whether the
+ // received message is yours using ControlMessage.Src,
+ // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq.
+ switch rm.Type {
+ case ipv6.ICMPTypeTimeExceeded:
+ names, _ := net.LookupAddr(peer.String())
+ fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm)
+ case ipv6.ICMPTypeEchoReply:
+ names, _ := net.LookupAddr(peer.String())
+ fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm)
+ return
+ }
+ }
+}
+
+func ExamplePacketConn_advertisingOSPFHello() {
+ c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+
+ en0, err := net.InterfaceByName("en0")
+ if err != nil {
+ log.Fatal(err)
+ }
+ allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")}
+ if err := p.JoinGroup(en0, &allSPFRouters); err != nil {
+ log.Fatal(err)
+ }
+ defer p.LeaveGroup(en0, &allSPFRouters)
+
+ hello := make([]byte, 24) // fake hello data, you need to implement this
+ ospf := make([]byte, 16) // fake ospf header, you need to implement this
+ ospf[0] = 3 // version 3
+ ospf[1] = 1 // hello packet
+ ospf = append(ospf, hello...)
+ if err := p.SetChecksum(true, 12); err != nil {
+ log.Fatal(err)
+ }
+
+ cm := ipv6.ControlMessage{
+ TrafficClass: 0xc0, // DSCP CS6
+ HopLimit: 1,
+ IfIndex: en0.Index,
+ }
+ if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go
new file mode 100644
index 000000000..826e3ae28
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/gen.go
@@ -0,0 +1,208 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates system adaptation constants and types,
+// internet protocol constants and tables by reading template files
+// and IANA protocol registries.
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+func main() {
+ if err := genzsys(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ if err := geniana(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func genzsys() error {
+ defs := "defs_" + runtime.GOOS + ".go"
+ f, err := os.Open(defs)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ f.Close()
+ cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
+ b, err := cmd.Output()
+ if err != nil {
+ return err
+ }
+ // The ipv6 package still supports go1.2, and so we need to
+ // take care of additional platforms in go1.3 and above for
+ // working with go1.2.
+ switch {
+ case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris":
+ b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1)
+ case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"):
+ b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1)
+ }
+ b, err = format.Source(b)
+ if err != nil {
+ return err
+ }
+ zsys := "zsys_" + runtime.GOOS + ".go"
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
+ }
+ if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+ return err
+ }
+ return nil
+}
+
+var registries = []struct {
+ url string
+ parse func(io.Writer, io.Reader) error
+}{
+ {
+ "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
+ parseICMPv6Parameters,
+ },
+}
+
+func geniana() error {
+ var bb bytes.Buffer
+ fmt.Fprintf(&bb, "// go generate gen.go\n")
+ fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+ fmt.Fprintf(&bb, "package ipv6\n\n")
+ for _, r := range registries {
+ resp, err := http.Get(r.url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+ }
+ if err := r.parse(&bb, resp.Body); err != nil {
+ return err
+ }
+ fmt.Fprintf(&bb, "\n")
+ }
+ b, err := format.Source(bb.Bytes())
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseICMPv6Parameters(w io.Writer, r io.Reader) error {
+ dec := xml.NewDecoder(r)
+ var icp icmpv6Parameters
+ if err := dec.Decode(&icp); err != nil {
+ return err
+ }
+ prs := icp.escape()
+ fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+ fmt.Fprintf(w, "const (\n")
+ for _, pr := range prs {
+ if pr.Name == "" {
+ continue
+ }
+ fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value)
+ fmt.Fprintf(w, "// %s\n", pr.OrigName)
+ }
+ fmt.Fprintf(w, ")\n\n")
+ fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+ fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
+ for _, pr := range prs {
+ if pr.Name == "" {
+ continue
+ }
+ fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName))
+ }
+ fmt.Fprintf(w, "}\n")
+ return nil
+}
+
+type icmpv6Parameters struct {
+ XMLName xml.Name `xml:"registry"`
+ Title string `xml:"title"`
+ Updated string `xml:"updated"`
+ Registries []struct {
+ Title string `xml:"title"`
+ Records []struct {
+ Value string `xml:"value"`
+ Name string `xml:"name"`
+ } `xml:"record"`
+ } `xml:"registry"`
+}
+
+type canonICMPv6ParamRecord struct {
+ OrigName string
+ Name string
+ Value int
+}
+
+func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord {
+ id := -1
+ for i, r := range icp.Registries {
+ if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
+ id = i
+ break
+ }
+ }
+ if id < 0 {
+ return nil
+ }
+ prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records))
+ sr := strings.NewReplacer(
+ "Messages", "",
+ "Message", "",
+ "ICMP", "",
+ "+", "P",
+ "-", "",
+ "/", "",
+ ".", "",
+ " ", "",
+ )
+ for i, pr := range icp.Registries[id].Records {
+ if strings.Contains(pr.Name, "Reserved") ||
+ strings.Contains(pr.Name, "Unassigned") ||
+ strings.Contains(pr.Name, "Deprecated") ||
+ strings.Contains(pr.Name, "Experiment") ||
+ strings.Contains(pr.Name, "experiment") {
+ continue
+ }
+ ss := strings.Split(pr.Name, "\n")
+ if len(ss) > 1 {
+ prs[i].Name = strings.Join(ss, " ")
+ } else {
+ prs[i].Name = ss[0]
+ }
+ s := strings.TrimSpace(prs[i].Name)
+ prs[i].OrigName = s
+ prs[i].Name = sr.Replace(s)
+ prs[i].Value, _ = strconv.Atoi(pr.Value)
+ }
+ return prs
+}
diff --git a/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/vendor/golang.org/x/net/ipv6/genericopt_posix.go
new file mode 100644
index 000000000..513bd8c74
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/genericopt_posix.go
@@ -0,0 +1,64 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv6
+
+import (
+ "syscall"
+
+ "golang.org/x/net/internal/netreflect"
+)
+
+// TrafficClass returns the traffic class field value for outgoing
+// packets.
+func (c *genericOpt) TrafficClass() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoTrafficClass])
+}
+
+// SetTrafficClass sets the traffic class field value for future
+// outgoing packets.
+func (c *genericOpt) SetTrafficClass(tclass int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoTrafficClass], tclass)
+}
+
+// HopLimit returns the hop limit field value for outgoing packets.
+func (c *genericOpt) HopLimit() (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return 0, err
+ }
+ return getInt(s, &sockOpts[ssoHopLimit])
+}
+
+// SetHopLimit sets the hop limit field value for future outgoing
+// packets.
+func (c *genericOpt) SetHopLimit(hoplim int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ s, err := netreflect.SocketOf(c.Conn)
+ if err != nil {
+ return err
+ }
+ return setInt(s, &sockOpts[ssoHopLimit], hoplim)
+}
diff --git a/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/vendor/golang.org/x/net/ipv6/genericopt_stub.go
new file mode 100644
index 000000000..f5c372242
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/genericopt_stub.go
@@ -0,0 +1,30 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv6
+
+// TrafficClass returns the traffic class field value for outgoing
+// packets.
+func (c *genericOpt) TrafficClass() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetTrafficClass sets the traffic class field value for future
+// outgoing packets.
+func (c *genericOpt) SetTrafficClass(tclass int) error {
+ return errOpNoSupport
+}
+
+// HopLimit returns the hop limit field value for outgoing packets.
+func (c *genericOpt) HopLimit() (int, error) {
+ return 0, errOpNoSupport
+}
+
+// SetHopLimit sets the hop limit field value for future outgoing
+// packets.
+func (c *genericOpt) SetHopLimit(hoplim int) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go
new file mode 100644
index 000000000..e05cb08b2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/header.go
@@ -0,0 +1,55 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+)
+
+const (
+ Version = 6 // protocol version
+ HeaderLen = 40 // header length
+)
+
+// A Header represents an IPv6 base header.
+type Header struct {
+ Version int // protocol version
+ TrafficClass int // traffic class
+ FlowLabel int // flow label
+ PayloadLen int // payload length
+ NextHeader int // next header
+ HopLimit int // hop limit
+ Src net.IP // source address
+ Dst net.IP // destination address
+}
+
+func (h *Header) String() string {
+ if h == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst)
+}
+
+// ParseHeader parses b as an IPv6 base header.
+func ParseHeader(b []byte) (*Header, error) {
+ if len(b) < HeaderLen {
+ return nil, errHeaderTooShort
+ }
+ h := &Header{
+ Version: int(b[0]) >> 4,
+ TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4,
+ FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]),
+ PayloadLen: int(binary.BigEndian.Uint16(b[4:6])),
+ NextHeader: int(b[6]),
+ HopLimit: int(b[7]),
+ }
+ h.Src = make(net.IP, net.IPv6len)
+ copy(h.Src, b[8:24])
+ h.Dst = make(net.IP, net.IPv6len)
+ copy(h.Dst, b[24:40])
+ return h, nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go
new file mode 100644
index 000000000..ca11dc23d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/header_test.go
@@ -0,0 +1,55 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/ipv6"
+)
+
+var (
+ wireHeaderFromKernel = [ipv6.HeaderLen]byte{
+ 0x69, 0x8b, 0xee, 0xf1,
+ 0xca, 0xfe, 0x2c, 0x01,
+ 0x20, 0x01, 0x0d, 0xb8,
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x20, 0x01, 0x0d, 0xb8,
+ 0x00, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ }
+
+ testHeader = &ipv6.Header{
+ Version: ipv6.Version,
+ TrafficClass: iana.DiffServAF43,
+ FlowLabel: 0xbeef1,
+ PayloadLen: 0xcafe,
+ NextHeader: iana.ProtocolIPv6Frag,
+ HopLimit: 1,
+ Src: net.ParseIP("2001:db8:1::1"),
+ Dst: net.ParseIP("2001:db8:2::1"),
+ }
+)
+
+func TestParseHeader(t *testing.T) {
+ h, err := ipv6.ParseHeader(wireHeaderFromKernel[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(h, testHeader) {
+ t.Fatalf("got %#v; want %#v", h, testHeader)
+ }
+ s := h.String()
+ if strings.Contains(s, ",") {
+ t.Fatalf("should be space-separated values: %s", s)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go
new file mode 100644
index 000000000..53b999905
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/helper.go
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "unsafe"
+)
+
+var (
+ errMissingAddress = errors.New("missing address")
+ errHeaderTooShort = errors.New("header too short")
+ errInvalidConnType = errors.New("invalid conn type")
+ errOpNoSupport = errors.New("operation not supported")
+ errNoSuchInterface = errors.New("no such interface")
+
+ nativeEndian binary.ByteOrder
+)
+
+func init() {
+ i := uint32(1)
+ b := (*[4]byte)(unsafe.Pointer(&i))
+ if b[0] == 1 {
+ nativeEndian = binary.LittleEndian
+ } else {
+ nativeEndian = binary.BigEndian
+ }
+}
+
+func boolint(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func netAddrToIP16(a net.Addr) net.IP {
+ switch v := a.(type) {
+ case *net.UDPAddr:
+ if ip := v.IP.To16(); ip != nil && ip.To4() == nil {
+ return ip
+ }
+ case *net.IPAddr:
+ if ip := v.IP.To16(); ip != nil && ip.To4() == nil {
+ return ip
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go
new file mode 100644
index 000000000..3c6214fb6
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/iana.go
@@ -0,0 +1,82 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package ipv6
+
+// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07
+const (
+ ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable
+ ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big
+ ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded
+ ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem
+ ICMPTypeEchoRequest ICMPType = 128 // Echo Request
+ ICMPTypeEchoReply ICMPType = 129 // Echo Reply
+ ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query
+ ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report
+ ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done
+ ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation
+ ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement
+ ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation
+ ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement
+ ICMPTypeRedirect ICMPType = 137 // Redirect Message
+ ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering
+ ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query
+ ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response
+ ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message
+ ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message
+ ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report
+ ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message
+ ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message
+ ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation
+ ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement
+ ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message
+ ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message
+ ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement
+ ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation
+ ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination
+ ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages
+ ICMPTypeRPLControl ICMPType = 155 // RPL Control Message
+ ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message
+ ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request
+ ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation
+ ICMPTypeMPLControl ICMPType = 159 // MPL Control Message
+)
+
+// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07
+var icmpTypes = map[ICMPType]string{
+ 1: "destination unreachable",
+ 2: "packet too big",
+ 3: "time exceeded",
+ 4: "parameter problem",
+ 128: "echo request",
+ 129: "echo reply",
+ 130: "multicast listener query",
+ 131: "multicast listener report",
+ 132: "multicast listener done",
+ 133: "router solicitation",
+ 134: "router advertisement",
+ 135: "neighbor solicitation",
+ 136: "neighbor advertisement",
+ 137: "redirect message",
+ 138: "router renumbering",
+ 139: "icmp node information query",
+ 140: "icmp node information response",
+ 141: "inverse neighbor discovery solicitation message",
+ 142: "inverse neighbor discovery advertisement message",
+ 143: "version 2 multicast listener report",
+ 144: "home agent address discovery request message",
+ 145: "home agent address discovery reply message",
+ 146: "mobile prefix solicitation",
+ 147: "mobile prefix advertisement",
+ 148: "certification path solicitation message",
+ 149: "certification path advertisement message",
+ 151: "multicast router advertisement",
+ 152: "multicast router solicitation",
+ 153: "multicast router termination",
+ 154: "fmipv6 messages",
+ 155: "rpl control message",
+ 156: "ilnpv6 locator update message",
+ 157: "duplicate address request",
+ 158: "duplicate address confirmation",
+ 159: "mpl control message",
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go
new file mode 100644
index 000000000..a2de65a08
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp.go
@@ -0,0 +1,57 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "golang.org/x/net/internal/iana"
+
+// An ICMPType represents a type of ICMP message.
+type ICMPType int
+
+func (typ ICMPType) String() string {
+ s, ok := icmpTypes[typ]
+ if !ok {
+ return "<nil>"
+ }
+ return s
+}
+
+// Protocol returns the ICMPv6 protocol number.
+func (typ ICMPType) Protocol() int {
+ return iana.ProtocolIPv6ICMP
+}
+
+// An ICMPFilter represents an ICMP message filter for incoming
+// packets. The filter belongs to a packet delivery path on a host and
+// it cannot interact with forwarding packets or tunnel-outer packets.
+//
+// Note: RFC 2460 defines a reasonable role model. A node means a
+// device that implements IP. A router means a node that forwards IP
+// packets not explicitly addressed to itself, and a host means a node
+// that is not a router.
+type ICMPFilter struct {
+ sysICMPv6Filter
+}
+
+// Accept accepts incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Accept(typ ICMPType) {
+ f.accept(typ)
+}
+
+// Block blocks incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Block(typ ICMPType) {
+ f.block(typ)
+}
+
+// SetAll sets the filter action to the filter.
+func (f *ICMPFilter) SetAll(block bool) {
+ f.setAll(block)
+}
+
+// WillBlock reports whether the ICMP type will be blocked.
+func (f *ICMPFilter) WillBlock(typ ICMPType) bool {
+ return f.willBlock(typ)
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go
new file mode 100644
index 000000000..30e3ce424
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package ipv6
+
+func (f *sysICMPv6Filter) accept(typ ICMPType) {
+ f.Filt[typ>>5] |= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPv6Filter) block(typ ICMPType) {
+ f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPv6Filter) setAll(block bool) {
+ for i := range f.Filt {
+ if block {
+ f.Filt[i] = 0
+ } else {
+ f.Filt[i] = 1<<32 - 1
+ }
+ }
+}
+
+func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool {
+ return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go
new file mode 100644
index 000000000..a67ecf690
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+func (f *sysICMPv6Filter) accept(typ ICMPType) {
+ f.Data[typ>>5] &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPv6Filter) block(typ ICMPType) {
+ f.Data[typ>>5] |= 1 << (uint32(typ) & 31)
+}
+
+func (f *sysICMPv6Filter) setAll(block bool) {
+ for i := range f.Data {
+ if block {
+ f.Data[i] = 1<<32 - 1
+ } else {
+ f.Data[i] = 0
+ }
+ }
+}
+
+func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool {
+ return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go
new file mode 100644
index 000000000..a942f354c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go
@@ -0,0 +1,24 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package ipv6
+
+func (f *sysICMPv6Filter) accept(typ ICMPType) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) block(typ ICMPType) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) setAll(block bool) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool {
+ // TODO(mikio): implement this
+ return false
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go
new file mode 100644
index 000000000..c1263ecac
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+type sysICMPv6Filter struct {
+}
+
+func (f *sysICMPv6Filter) accept(typ ICMPType) {
+}
+
+func (f *sysICMPv6Filter) block(typ ICMPType) {
+}
+
+func (f *sysICMPv6Filter) setAll(block bool) {
+}
+
+func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go
new file mode 100644
index 000000000..e192d6d8c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_test.go
@@ -0,0 +1,96 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+var icmpStringTests = []struct {
+ in ipv6.ICMPType
+ out string
+}{
+ {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"},
+
+ {256, "<nil>"},
+}
+
+func TestICMPString(t *testing.T) {
+ for _, tt := range icmpStringTests {
+ s := tt.in.String()
+ if s != tt.out {
+ t.Errorf("got %s; want %s", s, tt.out)
+ }
+ }
+}
+
+func TestICMPFilter(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ var f ipv6.ICMPFilter
+ for _, toggle := range []bool{false, true} {
+ f.SetAll(toggle)
+ for _, typ := range []ipv6.ICMPType{
+ ipv6.ICMPTypeDestinationUnreachable,
+ ipv6.ICMPTypeEchoReply,
+ ipv6.ICMPTypeNeighborSolicitation,
+ ipv6.ICMPTypeDuplicateAddressConfirmation,
+ } {
+ f.Accept(typ)
+ if f.WillBlock(typ) {
+ t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ)
+ }
+ f.Block(typ)
+ if !f.WillBlock(typ) {
+ t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ)
+ }
+ }
+ }
+}
+
+func TestSetICMPFilter(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket("ip6:ipv6-icmp", "::1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv6.NewPacketConn(c)
+
+ var f ipv6.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv6.ICMPTypeEchoRequest)
+ f.Accept(ipv6.ICMPTypeEchoReply)
+ if err := p.SetICMPFilter(&f); err != nil {
+ t.Fatal(err)
+ }
+ kf, err := p.ICMPFilter()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(kf, &f) {
+ t.Fatalf("got %#v; want %#v", kf, f)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go
new file mode 100644
index 000000000..f477a8c0d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+func (f *sysICMPv6Filter) accept(typ ICMPType) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) block(typ ICMPType) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) setAll(block bool) {
+ // TODO(mikio): implement this
+}
+
+func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool {
+ // TODO(mikio): implement this
+ return false
+}
diff --git a/vendor/golang.org/x/net/ipv6/main_test.go b/vendor/golang.org/x/net/ipv6/main_test.go
new file mode 100644
index 000000000..6ae53bb10
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/main_test.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if runtime.GOOS == "darwin" {
+ vers, _ := exec.Command("sw_vers", "-productVersion").Output()
+ if string(vers) == "10.8" || strings.HasPrefix(string(vers), "10.8.") {
+ fmt.Fprintf(os.Stderr, "# skipping tests on OS X 10.8 to avoid kernel panics; golang.org/issue/17015\n")
+ os.Exit(0)
+ }
+ }
+ os.Exit(m.Run())
+}
diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go
new file mode 100644
index 000000000..d587922a1
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go
@@ -0,0 +1,32 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "testing"
+)
+
+func connector(t *testing.T, network, addr string, done chan<- bool) {
+ defer func() { done <- true }()
+
+ c, err := net.Dial(network, addr)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ c.Close()
+}
+
+func acceptor(t *testing.T, ln net.Listener, done chan<- bool) {
+ defer func() { done <- true }()
+
+ c, err := ln.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ c.Close()
+}
diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go
new file mode 100644
index 000000000..a3a8979d2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/multicast_test.go
@@ -0,0 +1,260 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+var packetConnReadWriteMulticastUDPTests = []struct {
+ addr string
+ grp, src *net.UDPAddr
+}{
+ {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727
+
+ {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771
+}
+
+func TestPacketConnReadWriteMulticastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "freebsd": // due to a bug on loopback marking
+ // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065.
+ t.Skipf("not supported on %s", runtime.GOOS)
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range packetConnReadWriteMulticastUDPTests {
+ c, err := net.ListenPacket("udp6", tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ grp := *tt.grp
+ grp.Port = c.LocalAddr().(*net.UDPAddr).Port
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+ if tt.src == nil {
+ if err := p.JoinGroup(ifi, &grp); err != nil {
+ t.Fatal(err)
+ }
+ defer p.LeaveGroup(ifi, &grp)
+ } else {
+ if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support MLDv2 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src)
+ }
+ if err := p.SetMulticastInterface(ifi); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastInterface(); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetMulticastLoopback(true); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastLoopback(); err != nil {
+ t.Fatal(err)
+ }
+
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ Src: net.IPv6loopback,
+ IfIndex: ifi.Index,
+ }
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+ wb := []byte("HELLO-R-U-THERE")
+
+ for i, toggle := range []bool{true, false, true} {
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ cm.HopLimit = i + 1
+ if n, err := p.WriteTo(wb, &cm, &grp); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatal(err)
+ }
+ rb := make([]byte, 128)
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Fatalf("got %v; want %v", rb[:n], wb)
+ }
+ }
+ }
+}
+
+var packetConnReadWriteMulticastICMPTests = []struct {
+ grp, src *net.IPAddr
+}{
+ {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727
+
+ {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771
+}
+
+func TestPacketConnReadWriteMulticastICMP(t *testing.T) {
+ switch runtime.GOOS {
+ case "freebsd": // due to a bug on loopback marking
+ // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065.
+ t.Skipf("not supported on %s", runtime.GOOS)
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ for _, tt := range packetConnReadWriteMulticastICMPTests {
+ c, err := net.ListenPacket("ip6:ipv6-icmp", "::")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP)
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+ if tt.src == nil {
+ if err := p.JoinGroup(ifi, tt.grp); err != nil {
+ t.Fatal(err)
+ }
+ defer p.LeaveGroup(ifi, tt.grp)
+ } else {
+ if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support MLDv2 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)
+ }
+ if err := p.SetMulticastInterface(ifi); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastInterface(); err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetMulticastLoopback(true); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := p.MulticastLoopback(); err != nil {
+ t.Fatal(err)
+ }
+
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ Src: net.IPv6loopback,
+ IfIndex: ifi.Index,
+ }
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+
+ var f ipv6.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv6.ICMPTypeEchoReply)
+ if err := p.SetICMPFilter(&f); err != nil {
+ t.Fatal(err)
+ }
+
+ var psh []byte
+ for i, toggle := range []bool{true, false, true} {
+ if toggle {
+ psh = nil
+ if err := p.SetChecksum(true, 2); err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ psh = pshicmp
+ // Some platforms never allow to
+ // disable the kernel checksum
+ // processing.
+ p.SetChecksum(false, -1)
+ }
+ wb, err := (&icmp.Message{
+ Type: ipv6.ICMPTypeEchoRequest, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(psh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ }
+ if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ cm.HopLimit = i + 1
+ if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ } else {
+ if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil {
+ t.Fatal(err)
+ } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 {
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go
new file mode 100644
index 000000000..9711f7513
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go
@@ -0,0 +1,246 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "fmt"
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+var udpMultipleGroupListenerTests = []net.Addr{
+ &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727
+ &net.UDPAddr{IP: net.ParseIP("ff02::1:114")},
+ &net.UDPAddr{IP: net.ParseIP("ff02::2:114")},
+}
+
+func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ for _, gaddr := range udpMultipleGroupListenerTests {
+ c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv6.NewPacketConn(c)
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok {
+ continue
+ }
+ if err := p.JoinGroup(&ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ if err := p.LeaveGroup(ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ for _, gaddr := range udpMultipleGroupListenerTests {
+ c1, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c1.Close()
+
+ c2, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c2.Close()
+
+ var ps [2]*ipv6.PacketConn
+ ps[0] = ipv6.NewPacketConn(c1)
+ ps[1] = ipv6.NewPacketConn(c2)
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok {
+ continue
+ }
+ for _, p := range ps {
+ if err := p.JoinGroup(&ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ for _, p := range ps {
+ if err := p.LeaveGroup(ifi, gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ }
+}
+
+func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727
+ type ml struct {
+ c *ipv6.PacketConn
+ ifi *net.Interface
+ }
+ var mlt []*ml
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ ip, ok := nettest.IsMulticastCapable("ip6", &ifi)
+ if !ok {
+ continue
+ }
+ c, err := net.ListenPacket("udp6", fmt.Sprintf("[%s%%%s]:1024", ip.String(), ifi.Name)) // unicast address with non-reusable port
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ if err := p.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mlt = append(mlt, &ml{p, &ift[i]})
+ }
+ for _, m := range mlt {
+ if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv6.NewPacketConn(c)
+ gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727
+ var mift []*net.Interface
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok {
+ continue
+ }
+ if err := p.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mift = append(mift, &ift[i])
+ }
+ for _, ifi := range mift {
+ if err := p.LeaveGroup(ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address
+ t.Skipf("not supported on %s", runtime.GOOS)
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727
+ type ml struct {
+ c *ipv6.PacketConn
+ ifi *net.Interface
+ }
+ var mlt []*ml
+
+ ift, err := net.Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, ifi := range ift {
+ ip, ok := nettest.IsMulticastCapable("ip6", &ifi)
+ if !ok {
+ continue
+ }
+ c, err := net.ListenPacket("ip6:ipv6-icmp", fmt.Sprintf("%s%%%s", ip.String(), ifi.Name)) // unicast address
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ if err := p.JoinGroup(&ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ mlt = append(mlt, &ml{p, &ift[i]})
+ }
+ for _, m := range mlt {
+ if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go
new file mode 100644
index 000000000..fe0e6e1b1
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go
@@ -0,0 +1,157 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+var packetConnMulticastSocketOptionTests = []struct {
+ net, proto, addr string
+ grp, src net.Addr
+}{
+ {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727
+ {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727
+
+ {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771
+ {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771
+}
+
+func TestPacketConnMulticastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
+ if ifi == nil {
+ t.Skipf("not available on %s", runtime.GOOS)
+ }
+
+ m, ok := nettest.SupportsRawIPSocket()
+ for _, tt := range packetConnMulticastSocketOptionTests {
+ if tt.net == "ip6" && !ok {
+ t.Log(m)
+ continue
+ }
+ c, err := net.ListenPacket(tt.net+tt.proto, tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+
+ if tt.src == nil {
+ testMulticastSocketOptions(t, p, ifi, tt.grp)
+ } else {
+ testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src)
+ }
+ }
+}
+
+type testIPv6MulticastConn interface {
+ MulticastHopLimit() (int, error)
+ SetMulticastHopLimit(ttl int) error
+ MulticastLoopback() (bool, error)
+ SetMulticastLoopback(bool) error
+ JoinGroup(*net.Interface, net.Addr) error
+ LeaveGroup(*net.Interface, net.Addr) error
+ JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+ IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error
+}
+
+func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) {
+ const hoplim = 255
+ if err := c.SetMulticastHopLimit(hoplim); err != nil {
+ t.Error(err)
+ return
+ }
+ if v, err := c.MulticastHopLimit(); err != nil {
+ t.Error(err)
+ return
+ } else if v != hoplim {
+ t.Errorf("got %v; want %v", v, hoplim)
+ return
+ }
+
+ for _, toggle := range []bool{true, false} {
+ if err := c.SetMulticastLoopback(toggle); err != nil {
+ t.Error(err)
+ return
+ }
+ if v, err := c.MulticastLoopback(); err != nil {
+ t.Error(err)
+ return
+ } else if v != toggle {
+ t.Errorf("got %v; want %v", v, toggle)
+ return
+ }
+ }
+
+ if err := c.JoinGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+}
+
+func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) {
+ // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP
+ if err := c.JoinGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil {
+ switch runtime.GOOS {
+ case "freebsd", "linux":
+ default: // platforms that don't support MLDv2 fail here
+ t.Logf("not supported on %s", runtime.GOOS)
+ return
+ }
+ t.Error(err)
+ return
+ }
+ if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP
+ if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP
+ if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {
+ t.Error(err)
+ return
+ }
+ if err := c.LeaveGroup(ifi, grp); err != nil {
+ t.Error(err)
+ return
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go
new file mode 100644
index 000000000..529b20bca
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/payload.go
@@ -0,0 +1,15 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "net"
+
+// A payloadHandler represents the IPv6 datagram payload handler.
+type payloadHandler struct {
+ net.PacketConn
+ rawOpt
+}
+
+func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil }
diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go
new file mode 100644
index 000000000..8e90d324d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go
@@ -0,0 +1,70 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl,!plan9,!windows
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+)
+
+// ReadFrom reads a payload of the received IPv6 datagram, from the
+// endpoint c, copying the payload into b. It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+ if !c.ok() {
+ return 0, nil, nil, syscall.EINVAL
+ }
+ oob := newControlMessage(&c.rawOpt)
+ var oobn int
+ switch c := c.PacketConn.(type) {
+ case *net.UDPConn:
+ if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {
+ return 0, nil, nil, err
+ }
+ case *net.IPConn:
+ if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil {
+ return 0, nil, nil, err
+ }
+ default:
+ return 0, nil, nil, errInvalidConnType
+ }
+ if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+ return 0, nil, nil, err
+ }
+ if cm != nil {
+ cm.Src = netAddrToIP16(src)
+ }
+ return
+}
+
+// WriteTo writes a payload of the IPv6 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b. It
+// returns the number of bytes written. The control message cm allows
+// the IPv6 header fields and the datagram path to be specified. The
+// cm may be nil if control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ oob := marshalControlMessage(cm)
+ if dst == nil {
+ return 0, errMissingAddress
+ }
+ switch c := c.PacketConn.(type) {
+ case *net.UDPConn:
+ n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))
+ case *net.IPConn:
+ n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))
+ default:
+ return 0, errInvalidConnType
+ }
+ if err != nil {
+ return 0, err
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go
new file mode 100644
index 000000000..499204d0c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go
@@ -0,0 +1,41 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 windows
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+)
+
+// ReadFrom reads a payload of the received IPv6 datagram, from the
+// endpoint c, copying the payload into b. It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+ if !c.ok() {
+ return 0, nil, nil, syscall.EINVAL
+ }
+ if n, src, err = c.PacketConn.ReadFrom(b); err != nil {
+ return 0, nil, nil, err
+ }
+ return
+}
+
+// WriteTo writes a payload of the IPv6 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b. It
+// returns the number of bytes written. The control message cm allows
+// the IPv6 header fields and the datagram path to be specified. The
+// cm may be nil if control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ if dst == nil {
+ return 0, errMissingAddress
+ }
+ return c.PacketConn.WriteTo(b, dst)
+}
diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go
new file mode 100644
index 000000000..8c8c6fde0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go
@@ -0,0 +1,189 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "bytes"
+ "net"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+func benchmarkUDPListener() (net.PacketConn, net.Addr, error) {
+ c, err := net.ListenPacket("udp6", "[::1]:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String())
+ if err != nil {
+ c.Close()
+ return nil, nil, err
+ }
+ return c, dst, nil
+}
+
+func BenchmarkReadWriteNetUDP(b *testing.B) {
+ if !supportsIPv6 {
+ b.Skip("ipv6 is not supported")
+ }
+
+ c, dst, err := benchmarkUDPListener()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer c.Close()
+
+ wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ benchmarkReadWriteNetUDP(b, c, wb, rb, dst)
+ }
+}
+
+func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) {
+ if _, err := c.WriteTo(wb, dst); err != nil {
+ b.Fatal(err)
+ }
+ if _, _, err := c.ReadFrom(rb); err != nil {
+ b.Fatal(err)
+ }
+}
+
+func BenchmarkReadWriteIPv6UDP(b *testing.B) {
+ if !supportsIPv6 {
+ b.Skip("ipv6 is not supported")
+ }
+
+ c, dst, err := benchmarkUDPListener()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv6.NewPacketConn(c)
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+ if err := p.SetControlMessage(cf, true); err != nil {
+ b.Fatal(err)
+ }
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback)
+
+ wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ benchmarkReadWriteIPv6UDP(b, p, wb, rb, dst, ifi)
+ }
+}
+
+func benchmarkReadWriteIPv6UDP(b *testing.B, p *ipv6.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) {
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ HopLimit: 1,
+ }
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ b.Fatal(err)
+ } else if n != len(wb) {
+ b.Fatalf("got %v; want %v", n, len(wb))
+ }
+ if _, _, _, err := p.ReadFrom(rb); err != nil {
+ b.Fatal(err)
+ }
+}
+
+func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ c, err := net.ListenPacket("udp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+
+ dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback)
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+ wb := []byte("HELLO-R-U-THERE")
+
+ if err := p.SetControlMessage(cf, true); err != nil { // probe before test
+ if nettest.ProtocolNotSupported(err) {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+
+ var wg sync.WaitGroup
+ reader := func() {
+ defer wg.Done()
+ rb := make([]byte, 128)
+ if n, cm, _, err := p.ReadFrom(rb); err != nil {
+ t.Error(err)
+ return
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Errorf("got %v; want %v", rb[:n], wb)
+ return
+ } else {
+ s := cm.String()
+ if strings.Contains(s, ",") {
+ t.Errorf("should be space-separated values: %s", s)
+ }
+ }
+ }
+ writer := func(toggle bool) {
+ defer wg.Done()
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ Src: net.IPv6loopback,
+ }
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ t.Error(err)
+ return
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ t.Error(err)
+ return
+ } else if n != len(wb) {
+ t.Errorf("got %v; want %v", n, len(wb))
+ return
+ }
+ }
+
+ const N = 10
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go reader()
+ }
+ wg.Add(2 * N)
+ for i := 0; i < 2*N; i++ {
+ go writer(i%2 != 0)
+ }
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go reader()
+ }
+ wg.Wait()
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go
new file mode 100644
index 000000000..f0cfc2f94
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+// Sticky socket options
+const (
+ ssoTrafficClass = iota // header field for unicast packet, RFC 3542
+ ssoHopLimit // header field for unicast packet, RFC 3493
+ ssoMulticastInterface // outbound interface for multicast packet, RFC 3493
+ ssoMulticastHopLimit // header field for multicast packet, RFC 3493
+ ssoMulticastLoopback // loopback for multicast packet, RFC 3493
+ ssoReceiveTrafficClass // header field on received packet, RFC 3542
+ ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542
+ ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542
+ ssoReceivePathMTU // path mtu, RFC 3542
+ ssoPathMTU // path mtu, RFC 3542
+ ssoChecksum // packet checksum, RFC 2292 or 3542
+ ssoICMPFilter // icmp filter, RFC 2292 or 3542
+ ssoJoinGroup // any-source multicast, RFC 3493
+ ssoLeaveGroup // any-source multicast, RFC 3493
+ ssoJoinSourceGroup // source-specific multicast
+ ssoLeaveSourceGroup // source-specific multicast
+ ssoBlockSourceGroup // any-source or source-specific multicast
+ ssoUnblockSourceGroup // any-source or source-specific multicast
+ ssoMax
+)
+
+// Sticky socket option value types
+const (
+ ssoTypeInt = iota + 1
+ ssoTypeInterface
+ ssoTypeICMPFilter
+ ssoTypeMTUInfo
+ ssoTypeIPMreq
+ ssoTypeGroupReq
+ ssoTypeGroupSourceReq
+)
+
+// A sockOpt represents a binding for sticky socket option.
+type sockOpt struct {
+ level int // option level
+ name int // option name, must be equal or greater than 1
+ typ int // option value type, must be equal or greater than 1
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go
new file mode 100644
index 000000000..092e39839
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv6
+
+import (
+ "net"
+ "os"
+ "unsafe"
+)
+
+func setsockoptIPMreq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+ var mreq sysIPv6Mreq
+ copy(mreq.Multiaddr[:], grp)
+ if ifi != nil {
+ mreq.setIfindex(ifi.Index)
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&mreq), sysSizeofIPv6Mreq))
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go
new file mode 100644
index 000000000..651aa9326
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go
@@ -0,0 +1,122 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
+
+package ipv6
+
+import (
+ "net"
+ "os"
+ "unsafe"
+)
+
+func getInt(s uintptr, opt *sockOpt) (int, error) {
+ if opt.name < 1 || opt.typ != ssoTypeInt {
+ return 0, errOpNoSupport
+ }
+ var i int32
+ l := uint32(4)
+ if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil {
+ return 0, os.NewSyscallError("getsockopt", err)
+ }
+ return int(i), nil
+}
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+ if opt.name < 1 || opt.typ != ssoTypeInt {
+ return errOpNoSupport
+ }
+ i := int32(v)
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), 4))
+}
+
+func getInterface(s uintptr, opt *sockOpt) (*net.Interface, error) {
+ if opt.name < 1 || opt.typ != ssoTypeInterface {
+ return nil, errOpNoSupport
+ }
+ var i int32
+ l := uint32(4)
+ if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ if i == 0 {
+ return nil, nil
+ }
+ ifi, err := net.InterfaceByIndex(int(i))
+ if err != nil {
+ return nil, err
+ }
+ return ifi, nil
+}
+
+func setInterface(s uintptr, opt *sockOpt, ifi *net.Interface) error {
+ if opt.name < 1 || opt.typ != ssoTypeInterface {
+ return errOpNoSupport
+ }
+ var i int32
+ if ifi != nil {
+ i = int32(ifi.Index)
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), 4))
+}
+
+func getICMPFilter(s uintptr, opt *sockOpt) (*ICMPFilter, error) {
+ if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+ return nil, errOpNoSupport
+ }
+ var f ICMPFilter
+ l := uint32(sysSizeofICMPv6Filter)
+ if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ return &f, nil
+}
+
+func setICMPFilter(s uintptr, opt *sockOpt, f *ICMPFilter) error {
+ if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+ return errOpNoSupport
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), sysSizeofICMPv6Filter))
+}
+
+func getMTUInfo(s uintptr, opt *sockOpt) (*net.Interface, int, error) {
+ if opt.name < 1 || opt.typ != ssoTypeMTUInfo {
+ return nil, 0, errOpNoSupport
+ }
+ var mi sysIPv6Mtuinfo
+ l := uint32(sysSizeofIPv6Mtuinfo)
+ if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil {
+ return nil, 0, os.NewSyscallError("getsockopt", err)
+ }
+ if mi.Addr.Scope_id == 0 {
+ return nil, int(mi.Mtu), nil
+ }
+ ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id))
+ if err != nil {
+ return nil, 0, err
+ }
+ return ifi, int(mi.Mtu), nil
+}
+
+func setGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+ if opt.name < 1 {
+ return errOpNoSupport
+ }
+ switch opt.typ {
+ case ssoTypeIPMreq:
+ return setsockoptIPMreq(s, opt, ifi, grp)
+ case ssoTypeGroupReq:
+ return setsockoptGroupReq(s, opt, ifi, grp)
+ default:
+ return errOpNoSupport
+ }
+}
+
+func setSourceGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+ if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq {
+ return errOpNoSupport
+ }
+ return setsockoptGroupSourceReq(s, opt, ifi, grp, src)
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go
new file mode 100644
index 000000000..a17723265
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux
+
+package ipv6
+
+import "net"
+
+func setsockoptGroupReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+ return errOpNoSupport
+}
+
+func setsockoptGroupSourceReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+ return errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go
new file mode 100644
index 000000000..88f118ca2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go
@@ -0,0 +1,59 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux
+
+package ipv6
+
+import (
+ "net"
+ "os"
+ "unsafe"
+)
+
+var freebsd32o64 bool
+
+func setsockoptGroupReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+ var gr sysGroupReq
+ if ifi != nil {
+ gr.Interface = uint32(ifi.Index)
+ }
+ gr.setGroup(grp)
+ var p unsafe.Pointer
+ var l uint32
+ if freebsd32o64 {
+ var d [sysSizeofGroupReq + 4]byte
+ s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr))
+ copy(d[:4], s[:4])
+ copy(d[8:], s[4:])
+ p = unsafe.Pointer(&d[0])
+ l = sysSizeofGroupReq + 4
+ } else {
+ p = unsafe.Pointer(&gr)
+ l = sysSizeofGroupReq
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, p, l))
+}
+
+func setsockoptGroupSourceReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+ var gsr sysGroupSourceReq
+ if ifi != nil {
+ gsr.Interface = uint32(ifi.Index)
+ }
+ gsr.setSourceGroup(grp, src)
+ var p unsafe.Pointer
+ var l uint32
+ if freebsd32o64 {
+ var d [sysSizeofGroupSourceReq + 4]byte
+ s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))
+ copy(d[:4], s[:4])
+ copy(d[8:], s[4:])
+ p = unsafe.Pointer(&d[0])
+ l = sysSizeofGroupSourceReq + 4
+ } else {
+ p = unsafe.Pointer(&gsr)
+ l = sysSizeofGroupSourceReq
+ }
+ return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, p, l))
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go
new file mode 100644
index 000000000..b6b9c4b45
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv6
+
+import "net"
+
+func getMTUInfo(s uintptr, opt *sockOpt) (*net.Interface, int, error) {
+ return nil, 0, errOpNoSupport
+}
diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go
new file mode 100644
index 000000000..9c2190316
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go
@@ -0,0 +1,133 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "fmt"
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+var supportsIPv6 bool = nettest.SupportsIPv6()
+
+func TestConnInitiatorPathMTU(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ ln, err := net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+
+ done := make(chan bool)
+ go acceptor(t, ln, done)
+
+ c, err := net.Dial("tcp6", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels don't support IPV6_PATHMTU option
+ t.Logf("not supported on %s", runtime.GOOS)
+ default:
+ t.Fatal(err)
+ }
+ } else {
+ t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu)
+ }
+
+ <-done
+}
+
+func TestConnResponderPathMTU(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ ln, err := net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+
+ done := make(chan bool)
+ go connector(t, "tcp6", ln.Addr().String(), done)
+
+ c, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels don't support IPV6_PATHMTU option
+ t.Logf("not supported on %s", runtime.GOOS)
+ default:
+ t.Fatal(err)
+ }
+ } else {
+ t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu)
+ }
+
+ <-done
+}
+
+func TestPacketConnChecksum(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ p := ipv6.NewPacketConn(c)
+ offset := 12 // see RFC 5340
+
+ for _, toggle := range []bool{false, true} {
+ if err := p.SetChecksum(toggle, offset); err != nil {
+ if toggle {
+ t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err)
+ } else {
+ // Some platforms never allow to disable the kernel
+ // checksum processing.
+ t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err)
+ }
+ }
+ if on, offset, err := p.Checksum(); err != nil {
+ t.Fatal(err)
+ } else {
+ t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go
new file mode 100644
index 000000000..0ee43e6d2
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go
@@ -0,0 +1,56 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly netbsd openbsd
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+ ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+ ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+ ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop},
+ ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+ ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+ ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+ ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+ ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+ ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+ ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+ ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+ ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+ ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+ ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+ ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+ }
+)
+
+func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) {
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], ip)
+ sa.Scope_id = uint32(i)
+}
+
+func (pi *sysInet6Pktinfo) setIfindex(i int) {
+ pi.Ifindex = uint32(i)
+}
+
+func (mreq *sysIPv6Mreq) setIfindex(i int) {
+ mreq.Interface = uint32(i)
+}
diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go
new file mode 100644
index 000000000..1e1c4aefa
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "net"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit},
+ ctlPacketInfo: {sysIPV6_2292PKTINFO, sysSizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+ ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+ ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt},
+ ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt},
+ ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+ ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+ ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+ ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+ }
+)
+
+func init() {
+ // Seems like kern.osreldate is veiled on latest OS X. We use
+ // kern.osrelease instead.
+ s, err := syscall.Sysctl("kern.osrelease")
+ if err != nil {
+ return
+ }
+ ss := strings.Split(s, ".")
+ if len(ss) == 0 {
+ return
+ }
+ // The IP_PKTINFO and protocol-independent multicast API were
+ // introduced in OS X 10.7 (Darwin 11). But it looks like
+ // those features require OS X 10.8 (Darwin 12) or above.
+ // See http://support.apple.com/kb/HT1633.
+ if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 {
+ return
+ }
+ ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}
+ ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}
+ ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}
+ ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}
+ ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}
+ sockOpts[ssoTrafficClass] = sockOpt{iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}
+ sockOpts[ssoReceiveTrafficClass] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}
+ sockOpts[ssoReceiveHopLimit] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}
+ sockOpts[ssoReceivePacketInfo] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}
+ sockOpts[ssoReceivePathMTU] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}
+ sockOpts[ssoPathMTU] = sockOpt{iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}
+ sockOpts[ssoJoinGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}
+ sockOpts[ssoLeaveGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}
+ sockOpts[ssoJoinSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}
+ sockOpts[ssoLeaveSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}
+ sockOpts[ssoBlockSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}
+ sockOpts[ssoUnblockSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}
+}
+
+func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) {
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], ip)
+ sa.Scope_id = uint32(i)
+}
+
+func (pi *sysInet6Pktinfo) setIfindex(i int) {
+ pi.Ifindex = uint32(i)
+}
+
+func (mreq *sysIPv6Mreq) setIfindex(i int) {
+ mreq.Interface = uint32(i)
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Pad_cgo_0[0]))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_0[0]))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_1[0]))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go
new file mode 100644
index 000000000..5527001f1
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go
@@ -0,0 +1,91 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "net"
+ "runtime"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+ ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+ ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+ ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop},
+ ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+ ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+ ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+ ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+ ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+ ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+ ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+ ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+ ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+ ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+ ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+ ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+ ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+ ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+ }
+)
+
+func init() {
+ if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" {
+ archs, _ := syscall.Sysctl("kern.supported_archs")
+ for _, s := range strings.Fields(archs) {
+ if s == "amd64" {
+ freebsd32o64 = true
+ break
+ }
+ }
+ }
+}
+
+func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) {
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], ip)
+ sa.Scope_id = uint32(i)
+}
+
+func (pi *sysInet6Pktinfo) setIfindex(i int) {
+ pi.Ifindex = uint32(i)
+}
+
+func (mreq *sysIPv6Mreq) setIfindex(i int) {
+ mreq.Interface = uint32(i)
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source))
+ sa.Len = sysSizeofSockaddrInet6
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go
new file mode 100644
index 000000000..fd7d5b188
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_linux.go
@@ -0,0 +1,72 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/net/internal/iana"
+)
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{
+ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+ ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+ ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+ ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+ }
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+ ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+ ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+ ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+ ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+ ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+ ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+ ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+ ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+ ssoChecksum: {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt},
+ ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter},
+ ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+ ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+ ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+ ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+ ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+ }
+)
+
+func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) {
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], ip)
+ sa.Scope_id = uint32(i)
+}
+
+func (pi *sysInet6Pktinfo) setIfindex(i int) {
+ pi.Ifindex = int32(i)
+}
+
+func (mreq *sysIPv6Mreq) setIfindex(i int) {
+ mreq.Ifindex = int32(i)
+}
+
+func (gr *sysGroupReq) setGroup(grp net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group))
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+}
+
+func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) {
+ sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group))
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], grp)
+ sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source))
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], src)
+}
diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go
new file mode 100644
index 000000000..ead0f4d11
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package ipv6
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{}
+
+ sockOpts = [ssoMax]sockOpt{}
+)
diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go
new file mode 100644
index 000000000..405190684
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/sys_windows.go
@@ -0,0 +1,74 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "net"
+ "syscall"
+
+ "golang.org/x/net/internal/iana"
+)
+
+const (
+ // See ws2tcpip.h.
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PKTINFO = 0x13
+
+ sysSizeofSockaddrInet6 = 0x1c
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofICMPv6Filter = 0
+)
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysICMPv6Filter struct {
+ // TODO(mikio): implement this
+}
+
+var (
+ ctlOpts = [ctlMax]ctlOpt{}
+
+ sockOpts = [ssoMax]sockOpt{
+ ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+ ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+ ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+ ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+ ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+ ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+ }
+)
+
+func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) {
+ sa.Family = syscall.AF_INET6
+ copy(sa.Addr[:], ip)
+ sa.Scope_id = uint32(i)
+}
+
+func (mreq *sysIPv6Mreq) setIfindex(i int) {
+ mreq.Interface = uint32(i)
+}
diff --git a/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go
new file mode 100644
index 000000000..5184dbe88
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ sysGETSOCKOPT = 0xf
+ sysSETSOCKOPT = 0xe
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ if _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ if _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/syscall_unix.go b/vendor/golang.org/x/net/ipv6/syscall_unix.go
new file mode 100644
index 000000000..52eb9bd79
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/syscall_unix.go
@@ -0,0 +1,26 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!386 netbsd openbsd
+
+package ipv6
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/ipv6/syscall_windows.go b/vendor/golang.org/x/net/ipv6/syscall_windows.go
new file mode 100644
index 000000000..c1f649d38
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/syscall_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+ return syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), (*int32)(unsafe.Pointer(l)))
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+ return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), int32(l))
+}
diff --git a/vendor/golang.org/x/net/ipv6/thunk_linux_386.s b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s
new file mode 100644
index 000000000..daa78bc02
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.2
+
+TEXT ·socketcall(SB),4,$0-36
+ JMP syscall·socketcall(SB)
diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go
new file mode 100644
index 000000000..db5b08a28
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/unicast_test.go
@@ -0,0 +1,182 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/net/icmp"
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+func TestPacketConnReadWriteUnicastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ c, err := net.ListenPacket("udp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+
+ dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ Src: net.IPv6loopback,
+ }
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback)
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+ wb := []byte("HELLO-R-U-THERE")
+
+ for i, toggle := range []bool{true, false, true} {
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+ cm.HopLimit = i + 1
+ if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal(rb[:n], wb) {
+ t.Fatalf("got %v; want %v", rb[:n], wb)
+ }
+ }
+}
+
+func TestPacketConnReadWriteUnicastICMP(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+ if m, ok := nettest.SupportsRawIPSocket(); !ok {
+ t.Skip(m)
+ }
+
+ c, err := net.ListenPacket("ip6:ipv6-icmp", "::1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ p := ipv6.NewPacketConn(c)
+ defer p.Close()
+
+ dst, err := net.ResolveIPAddr("ip6", "::1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP)
+ cm := ipv6.ControlMessage{
+ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,
+ Src: net.IPv6loopback,
+ }
+ cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU
+ ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback)
+ if ifi != nil {
+ cm.IfIndex = ifi.Index
+ }
+
+ var f ipv6.ICMPFilter
+ f.SetAll(true)
+ f.Accept(ipv6.ICMPTypeEchoReply)
+ if err := p.SetICMPFilter(&f); err != nil {
+ t.Fatal(err)
+ }
+
+ var psh []byte
+ for i, toggle := range []bool{true, false, true} {
+ if toggle {
+ psh = nil
+ if err := p.SetChecksum(true, 2); err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ psh = pshicmp
+ // Some platforms never allow to disable the
+ // kernel checksum processing.
+ p.SetChecksum(false, -1)
+ }
+ wb, err := (&icmp.Message{
+ Type: ipv6.ICMPTypeEchoRequest, Code: 0,
+ Body: &icmp.Echo{
+ ID: os.Getpid() & 0xffff, Seq: i + 1,
+ Data: []byte("HELLO-R-U-THERE"),
+ },
+ }).Marshal(psh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := p.SetControlMessage(cf, toggle); err != nil {
+ if nettest.ProtocolNotSupported(err) {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+ cm.HopLimit = i + 1
+ if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, err := p.WriteTo(wb, &cm, dst); err != nil {
+ t.Fatal(err)
+ } else if n != len(wb) {
+ t.Fatalf("got %v; want %v", n, len(wb))
+ }
+ rb := make([]byte, 128)
+ if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
+ t.Fatal(err)
+ }
+ if n, _, _, err := p.ReadFrom(rb); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket
+ t.Logf("not supported on %s", runtime.GOOS)
+ continue
+ }
+ t.Fatal(err)
+ } else {
+ if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil {
+ t.Fatal(err)
+ } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 {
+ t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0)
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go
new file mode 100644
index 000000000..7bb2e440a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go
@@ -0,0 +1,111 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6_test
+
+import (
+ "net"
+ "runtime"
+ "testing"
+
+ "golang.org/x/net/internal/iana"
+ "golang.org/x/net/internal/nettest"
+ "golang.org/x/net/ipv6"
+)
+
+func TestConnUnicastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ ln, err := net.Listen("tcp6", "[::1]:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+
+ done := make(chan bool)
+ go acceptor(t, ln, done)
+
+ c, err := net.Dial("tcp6", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ testUnicastSocketOptions(t, ipv6.NewConn(c))
+
+ <-done
+}
+
+var packetConnUnicastSocketOptionTests = []struct {
+ net, proto, addr string
+}{
+ {"udp6", "", "[::1]:0"},
+ {"ip6", ":ipv6-icmp", "::1"},
+}
+
+func TestPacketConnUnicastSocketOptions(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl", "plan9", "solaris", "windows":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if !supportsIPv6 {
+ t.Skip("ipv6 is not supported")
+ }
+
+ m, ok := nettest.SupportsRawIPSocket()
+ for _, tt := range packetConnUnicastSocketOptionTests {
+ if tt.net == "ip6" && !ok {
+ t.Log(m)
+ continue
+ }
+ c, err := net.ListenPacket(tt.net+tt.proto, tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ testUnicastSocketOptions(t, ipv6.NewPacketConn(c))
+ }
+}
+
+type testIPv6UnicastConn interface {
+ TrafficClass() (int, error)
+ SetTrafficClass(int) error
+ HopLimit() (int, error)
+ SetHopLimit(int) error
+}
+
+func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) {
+ tclass := iana.DiffServCS0 | iana.NotECNTransport
+ if err := c.SetTrafficClass(tclass); err != nil {
+ switch runtime.GOOS {
+ case "darwin": // older darwin kernels don't support IPV6_TCLASS option
+ t.Logf("not supported on %s", runtime.GOOS)
+ goto next
+ }
+ t.Fatal(err)
+ }
+ if v, err := c.TrafficClass(); err != nil {
+ t.Fatal(err)
+ } else if v != tclass {
+ t.Fatalf("got %v; want %v", v, tclass)
+ }
+
+next:
+ hoplim := 255
+ if err := c.SetHopLimit(hoplim); err != nil {
+ t.Fatal(err)
+ }
+ if v, err := c.HopLimit(); err != nil {
+ t.Fatal(err)
+ } else if v != hoplim {
+ t.Fatalf("got %v; want %v", v, hoplim)
+ }
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go
new file mode 100644
index 000000000..cb044b033
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go
@@ -0,0 +1,131 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_darwin.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+ sysIPV6_2292PKTINFO = 0x13
+ sysIPV6_2292HOPLIMIT = 0x14
+ sysIPV6_2292NEXTHOP = 0x15
+ sysIPV6_2292HOPOPTS = 0x16
+ sysIPV6_2292DSTOPTS = 0x17
+ sysIPV6_2292RTHDR = 0x18
+
+ sysIPV6_2292PKTOPTIONS = 0x19
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RECVTCLASS = 0x23
+ sysIPV6_TCLASS = 0x24
+
+ sysIPV6_RTHDRDSTOPTS = 0x39
+
+ sysIPV6_RECVPKTINFO = 0x3d
+
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PREFER_TEMPADDR = 0x3f
+
+ sysIPV6_MSFILTER = 0x4a
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysIPV6_BOUND_IF = 0x7d
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [128]byte
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [128]byte
+ Pad_cgo_1 [128]byte
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go
new file mode 100644
index 000000000..5a03ab734
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go
@@ -0,0 +1,90 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_dragonfly.go
+
+// +build dragonfly
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PREFER_TEMPADDR = 0x3f
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go
new file mode 100644
index 000000000..4ace96f0c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go
@@ -0,0 +1,122 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PREFER_TEMPADDR = 0x3f
+
+ sysIPV6_BINDANY = 0x40
+
+ sysIPV6_MSFILTER = 0x4a
+
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go
new file mode 100644
index 000000000..4a62c2d5c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go
@@ -0,0 +1,124 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PREFER_TEMPADDR = 0x3f
+
+ sysIPV6_BINDANY = 0x40
+
+ sysIPV6_MSFILTER = 0x4a
+
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go
new file mode 100644
index 000000000..4a62c2d5c
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go
@@ -0,0 +1,124 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PREFER_TEMPADDR = 0x3f
+
+ sysIPV6_BINDANY = 0x40
+
+ sysIPV6_MSFILTER = 0x4a
+
+ sysMCAST_JOIN_GROUP = 0x50
+ sysMCAST_LEAVE_GROUP = 0x51
+ sysMCAST_JOIN_SOURCE_GROUP = 0x52
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+ sysMCAST_BLOCK_SOURCE = 0x54
+ sysMCAST_UNBLOCK_SOURCE = 0x55
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrStorage struct {
+ Len uint8
+ Family uint8
+ X__ss_pad1 [6]int8
+ X__ss_align int64
+ X__ss_pad2 [112]int8
+}
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysSockaddrStorage
+ Source sysSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go
new file mode 100644
index 000000000..36fccbb62
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go
new file mode 100644
index 000000000..7461e7e03
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go
new file mode 100644
index 000000000..36fccbb62
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go
new file mode 100644
index 000000000..ed35f6039
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,arm64
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go
new file mode 100644
index 000000000..141c86977
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,mips64
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go
new file mode 100644
index 000000000..d50eb633e
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,mips64le
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go
new file mode 100644
index 000000000..4c58ea67d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x84
+ sysSizeofGroupSourceReq = 0x104
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]uint8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go
new file mode 100644
index 000000000..c1d775f77
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc64
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go
new file mode 100644
index 000000000..e385fb7aa
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,ppc64le
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go
new file mode 100644
index 000000000..28d69b1b0
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go
@@ -0,0 +1,172 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+// +build linux,s390x
+
+package ipv6
+
+const (
+ sysIPV6_ADDRFORM = 0x1
+ sysIPV6_2292PKTINFO = 0x2
+ sysIPV6_2292HOPOPTS = 0x3
+ sysIPV6_2292DSTOPTS = 0x4
+ sysIPV6_2292RTHDR = 0x5
+ sysIPV6_2292PKTOPTIONS = 0x6
+ sysIPV6_CHECKSUM = 0x7
+ sysIPV6_2292HOPLIMIT = 0x8
+ sysIPV6_NEXTHOP = 0x9
+ sysIPV6_FLOWINFO = 0xb
+
+ sysIPV6_UNICAST_HOPS = 0x10
+ sysIPV6_MULTICAST_IF = 0x11
+ sysIPV6_MULTICAST_HOPS = 0x12
+ sysIPV6_MULTICAST_LOOP = 0x13
+ sysIPV6_ADD_MEMBERSHIP = 0x14
+ sysIPV6_DROP_MEMBERSHIP = 0x15
+ sysMCAST_JOIN_GROUP = 0x2a
+ sysMCAST_LEAVE_GROUP = 0x2d
+ sysMCAST_JOIN_SOURCE_GROUP = 0x2e
+ sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+ sysMCAST_BLOCK_SOURCE = 0x2b
+ sysMCAST_UNBLOCK_SOURCE = 0x2c
+ sysMCAST_MSFILTER = 0x30
+ sysIPV6_ROUTER_ALERT = 0x16
+ sysIPV6_MTU_DISCOVER = 0x17
+ sysIPV6_MTU = 0x18
+ sysIPV6_RECVERR = 0x19
+ sysIPV6_V6ONLY = 0x1a
+ sysIPV6_JOIN_ANYCAST = 0x1b
+ sysIPV6_LEAVE_ANYCAST = 0x1c
+
+ sysIPV6_FLOWLABEL_MGR = 0x20
+ sysIPV6_FLOWINFO_SEND = 0x21
+
+ sysIPV6_IPSEC_POLICY = 0x22
+ sysIPV6_XFRM_POLICY = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x31
+ sysIPV6_PKTINFO = 0x32
+ sysIPV6_RECVHOPLIMIT = 0x33
+ sysIPV6_HOPLIMIT = 0x34
+ sysIPV6_RECVHOPOPTS = 0x35
+ sysIPV6_HOPOPTS = 0x36
+ sysIPV6_RTHDRDSTOPTS = 0x37
+ sysIPV6_RECVRTHDR = 0x38
+ sysIPV6_RTHDR = 0x39
+ sysIPV6_RECVDSTOPTS = 0x3a
+ sysIPV6_DSTOPTS = 0x3b
+ sysIPV6_RECVPATHMTU = 0x3c
+ sysIPV6_PATHMTU = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_RECVTCLASS = 0x42
+ sysIPV6_TCLASS = 0x43
+
+ sysIPV6_ADDR_PREFERENCES = 0x48
+
+ sysIPV6_PREFER_SRC_TMP = 0x1
+ sysIPV6_PREFER_SRC_PUBLIC = 0x2
+ sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+ sysIPV6_PREFER_SRC_COA = 0x4
+ sysIPV6_PREFER_SRC_HOME = 0x400
+ sysIPV6_PREFER_SRC_CGA = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x800
+
+ sysIPV6_MINHOPCOUNT = 0x49
+
+ sysIPV6_ORIGDSTADDR = 0x4a
+ sysIPV6_RECVORIGDSTADDR = 0x4a
+ sysIPV6_TRANSPARENT = 0x4b
+ sysIPV6_UNICAST_IF = 0x4c
+
+ sysICMPV6_FILTER = 0x1
+
+ sysICMPV6_FILTER_BLOCK = 0x1
+ sysICMPV6_FILTER_PASS = 0x2
+ sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+ sysICMPV6_FILTER_PASSONLY = 0x4
+
+ sysSOL_SOCKET = 0x1
+ sysSO_ATTACH_FILTER = 0x1a
+
+ sysSizeofKernelSockaddrStorage = 0x80
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+ sysSizeofIPv6FlowlabelReq = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+ sysSizeofGroupReq = 0x88
+ sysSizeofGroupSourceReq = 0x108
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysKernelSockaddrStorage struct {
+ Family uint16
+ X__data [126]int8
+}
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6FlowlabelReq struct {
+ Dst [16]byte /* in6_addr */
+ Label uint32
+ Action uint8
+ Share uint8
+ Flags uint16
+ Expires uint16
+ Linger uint16
+ X__flr_pad uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type sysGroupReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+}
+
+type sysGroupSourceReq struct {
+ Interface uint32
+ Pad_cgo_0 [4]byte
+ Group sysKernelSockaddrStorage
+ Source sysKernelSockaddrStorage
+}
+
+type sysICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type sysSockFProg struct {
+ Len uint16
+ Pad_cgo_0 [6]byte
+ Filter *sysSockFilter
+}
+
+type sysSockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go
new file mode 100644
index 000000000..d6ec88e39
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go
@@ -0,0 +1,84 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_netbsd.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_IPSEC_POLICY = 0x1c
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go
new file mode 100644
index 000000000..3e080b78a
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go
@@ -0,0 +1,93 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_openbsd.go
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x4
+ sysIPV6_MULTICAST_IF = 0x9
+ sysIPV6_MULTICAST_HOPS = 0xa
+ sysIPV6_MULTICAST_LOOP = 0xb
+ sysIPV6_JOIN_GROUP = 0xc
+ sysIPV6_LEAVE_GROUP = 0xd
+ sysIPV6_PORTRANGE = 0xe
+ sysICMP6_FILTER = 0x12
+
+ sysIPV6_CHECKSUM = 0x1a
+ sysIPV6_V6ONLY = 0x1b
+
+ sysIPV6_RTHDRDSTOPTS = 0x23
+
+ sysIPV6_RECVPKTINFO = 0x24
+ sysIPV6_RECVHOPLIMIT = 0x25
+ sysIPV6_RECVRTHDR = 0x26
+ sysIPV6_RECVHOPOPTS = 0x27
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_USE_MIN_MTU = 0x2a
+ sysIPV6_RECVPATHMTU = 0x2b
+
+ sysIPV6_PATHMTU = 0x2c
+
+ sysIPV6_PKTINFO = 0x2e
+ sysIPV6_HOPLIMIT = 0x2f
+ sysIPV6_NEXTHOP = 0x30
+ sysIPV6_HOPOPTS = 0x31
+ sysIPV6_DSTOPTS = 0x32
+ sysIPV6_RTHDR = 0x33
+
+ sysIPV6_AUTH_LEVEL = 0x35
+ sysIPV6_ESP_TRANS_LEVEL = 0x36
+ sysIPV6_ESP_NETWORK_LEVEL = 0x37
+ sysIPSEC6_OUTSA = 0x38
+ sysIPV6_RECVTCLASS = 0x39
+
+ sysIPV6_AUTOFLOWLABEL = 0x3b
+ sysIPV6_IPCOMP_LEVEL = 0x3c
+
+ sysIPV6_TCLASS = 0x3d
+ sysIPV6_DONTFRAG = 0x3e
+ sysIPV6_PIPEX = 0x3f
+
+ sysIPV6_RTABLE = 0x1021
+
+ sysIPV6_PORTRANGE_DEFAULT = 0x0
+ sysIPV6_PORTRANGE_HIGH = 0x1
+ sysIPV6_PORTRANGE_LOW = 0x2
+
+ sysSizeofSockaddrInet6 = 0x1c
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x20
+
+ sysSizeofIPv6Mreq = 0x14
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysICMPv6Filter struct {
+ Filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go
new file mode 100644
index 000000000..cdf00c25d
--- /dev/null
+++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go
@@ -0,0 +1,105 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_solaris.go
+
+// +build solaris
+
+package ipv6
+
+const (
+ sysIPV6_UNICAST_HOPS = 0x5
+ sysIPV6_MULTICAST_IF = 0x6
+ sysIPV6_MULTICAST_HOPS = 0x7
+ sysIPV6_MULTICAST_LOOP = 0x8
+ sysIPV6_JOIN_GROUP = 0x9
+ sysIPV6_LEAVE_GROUP = 0xa
+
+ sysIPV6_PKTINFO = 0xb
+
+ sysIPV6_HOPLIMIT = 0xc
+ sysIPV6_NEXTHOP = 0xd
+ sysIPV6_HOPOPTS = 0xe
+ sysIPV6_DSTOPTS = 0xf
+
+ sysIPV6_RTHDR = 0x10
+ sysIPV6_RTHDRDSTOPTS = 0x11
+
+ sysIPV6_RECVPKTINFO = 0x12
+ sysIPV6_RECVHOPLIMIT = 0x13
+ sysIPV6_RECVHOPOPTS = 0x14
+
+ sysIPV6_RECVRTHDR = 0x16
+
+ sysIPV6_RECVRTHDRDSTOPTS = 0x17
+
+ sysIPV6_CHECKSUM = 0x18
+ sysIPV6_RECVTCLASS = 0x19
+ sysIPV6_USE_MIN_MTU = 0x20
+ sysIPV6_DONTFRAG = 0x21
+ sysIPV6_SEC_OPT = 0x22
+ sysIPV6_SRC_PREFERENCES = 0x23
+ sysIPV6_RECVPATHMTU = 0x24
+ sysIPV6_PATHMTU = 0x25
+ sysIPV6_TCLASS = 0x26
+ sysIPV6_V6ONLY = 0x27
+
+ sysIPV6_RECVDSTOPTS = 0x28
+
+ sysIPV6_PREFER_SRC_HOME = 0x1
+ sysIPV6_PREFER_SRC_COA = 0x2
+ sysIPV6_PREFER_SRC_PUBLIC = 0x4
+ sysIPV6_PREFER_SRC_TMP = 0x8
+ sysIPV6_PREFER_SRC_NONCGA = 0x10
+ sysIPV6_PREFER_SRC_CGA = 0x20
+
+ sysIPV6_PREFER_SRC_MIPMASK = 0x3
+ sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1
+ sysIPV6_PREFER_SRC_TMPMASK = 0xc
+ sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4
+ sysIPV6_PREFER_SRC_CGAMASK = 0x30
+ sysIPV6_PREFER_SRC_CGADEFAULT = 0x10
+
+ sysIPV6_PREFER_SRC_MASK = 0x3f
+
+ sysIPV6_PREFER_SRC_DEFAULT = 0x15
+
+ sysIPV6_BOUND_IF = 0x41
+ sysIPV6_UNSPEC_SRC = 0x42
+
+ sysICMP6_FILTER = 0x1
+
+ sysSizeofSockaddrInet6 = 0x20
+ sysSizeofInet6Pktinfo = 0x14
+ sysSizeofIPv6Mtuinfo = 0x24
+
+ sysSizeofIPv6Mreq = 0x14
+
+ sysSizeofICMPv6Filter = 0x20
+)
+
+type sysSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+ X__sin6_src_id uint32
+}
+
+type sysInet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type sysIPv6Mtuinfo struct {
+ Addr sysSockaddrInet6
+ Mtu uint32
+}
+
+type sysIPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type sysICMPv6Filter struct {
+ X__icmp6_filt [8]uint32
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go
new file mode 100644
index 000000000..20f2b8940
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex.go
@@ -0,0 +1,351 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httplex contains rules around lexical matters of various
+// HTTP-related specifications.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httplex
+
+import (
+ "net"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/idna"
+)
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ i := int(r)
+ return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+ return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !IsTokenRune(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// PunycodeHostPort returns the IDNA Punycode version
+// of the provided "host" or "host:port" string.
+func PunycodeHostPort(v string) (string, error) {
+ if isASCII(v) {
+ return v, nil
+ }
+
+ host, port, err := net.SplitHostPort(v)
+ if err != nil {
+ // The input 'v' argument was just a "host" argument,
+ // without a port. This error should not be returned
+ // to the caller.
+ host = v
+ port = ""
+ }
+ host, err = idna.ToASCII(host)
+ if err != nil {
+ // Non-UTF-8? Not representable in Punycode, in any
+ // case.
+ return "", err
+ }
+ if port == "" {
+ return host, nil
+ }
+ return net.JoinHostPort(host, port), nil
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go
new file mode 100644
index 000000000..f47adc939
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go
@@ -0,0 +1,119 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httplex
+
+import (
+ "testing"
+)
+
+func isChar(c rune) bool { return c <= 127 }
+
+func isCtl(c rune) bool { return c <= 31 || c == 127 }
+
+func isSeparator(c rune) bool {
+ switch c {
+ case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
+ return true
+ }
+ return false
+}
+
+func TestIsToken(t *testing.T) {
+ for i := 0; i <= 130; i++ {
+ r := rune(i)
+ expected := isChar(r) && !isCtl(r) && !isSeparator(r)
+ if IsTokenRune(r) != expected {
+ t.Errorf("isToken(0x%x) = %v", r, !expected)
+ }
+ }
+}
+
+func TestHeaderValuesContainsToken(t *testing.T) {
+ tests := []struct {
+ vals []string
+ token string
+ want bool
+ }{
+ {
+ vals: []string{"foo"},
+ token: "foo",
+ want: true,
+ },
+ {
+ vals: []string{"bar", "foo"},
+ token: "foo",
+ want: true,
+ },
+ {
+ vals: []string{"foo"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo"},
+ token: "bar",
+ want: false,
+ },
+ {
+ vals: []string{" foo "},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar,foo,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar , foo"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo ,bar "},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar, foo ,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar , foo"},
+ token: "FOO",
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ got := HeaderValuesContainsToken(tt.vals, tt.token)
+ if got != tt.want {
+ t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want)
+ }
+ }
+}
+
+func TestPunycodeHostPort(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"www.google.com", "www.google.com"},
+ {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"},
+ {"bücher.de", "xn--bcher-kva.de"},
+ {"bücher.de:8080", "xn--bcher-kva.de:8080"},
+ {"[1::6]:8080", "[1::6]:8080"},
+ }
+ for _, tt := range tests {
+ got, err := PunycodeHostPort(tt.in)
+ if tt.want != got || err != nil {
+ t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go
new file mode 100644
index 000000000..b317ba2e6
--- /dev/null
+++ b/vendor/golang.org/x/net/netutil/listen.go
@@ -0,0 +1,48 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netutil provides network utility functions, complementing the more
+// common ones in the net package.
+package netutil // import "golang.org/x/net/netutil"
+
+import (
+ "net"
+ "sync"
+)
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+ return &limitListener{l, make(chan struct{}, n)}
+}
+
+type limitListener struct {
+ net.Listener
+ sem chan struct{}
+}
+
+func (l *limitListener) acquire() { l.sem <- struct{}{} }
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+ l.acquire()
+ c, err := l.Listener.Accept()
+ if err != nil {
+ l.release()
+ return nil, err
+ }
+ return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+type limitListenerConn struct {
+ net.Conn
+ releaseOnce sync.Once
+ release func()
+}
+
+func (l *limitListenerConn) Close() error {
+ err := l.Conn.Close()
+ l.releaseOnce.Do(l.release)
+ return err
+}
diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go
new file mode 100644
index 000000000..c1a3d5527
--- /dev/null
+++ b/vendor/golang.org/x/net/netutil/listen_test.go
@@ -0,0 +1,101 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package netutil
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/internal/nettest"
+)
+
+func TestLimitListener(t *testing.T) {
+ const max = 5
+ attempts := (nettest.MaxOpenFiles() - max) / 2
+ if attempts > 256 { // maximum length of accept queue is 128 by default
+ attempts = 256
+ }
+
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+ l = LimitListener(l, max)
+
+ var open int32
+ go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if n := atomic.AddInt32(&open, 1); n > max {
+ t.Errorf("%d open connections, want <= %d", n, max)
+ }
+ defer atomic.AddInt32(&open, -1)
+ time.Sleep(10 * time.Millisecond)
+ fmt.Fprint(w, "some body")
+ }))
+
+ var wg sync.WaitGroup
+ var failed int32
+ for i := 0; i < attempts; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ c := http.Client{Timeout: 3 * time.Second}
+ r, err := c.Get("http://" + l.Addr().String())
+ if err != nil {
+ t.Log(err)
+ atomic.AddInt32(&failed, 1)
+ return
+ }
+ defer r.Body.Close()
+ io.Copy(ioutil.Discard, r.Body)
+ }()
+ }
+ wg.Wait()
+
+ // We expect some Gets to fail as the kernel's accept queue is filled,
+ // but most should succeed.
+ if int(failed) >= attempts/2 {
+ t.Errorf("%d requests failed within %d attempts", failed, attempts)
+ }
+}
+
+type errorListener struct {
+ net.Listener
+}
+
+func (errorListener) Accept() (net.Conn, error) {
+ return nil, errFake
+}
+
+var errFake = errors.New("fake error from errorListener")
+
+// This used to hang.
+func TestLimitListenerError(t *testing.T) {
+ donec := make(chan bool, 1)
+ go func() {
+ const n = 2
+ ll := LimitListener(errorListener{}, n)
+ for i := 0; i < n+1; i++ {
+ _, err := ll.Accept()
+ if err != errFake {
+ t.Fatalf("Accept error = %v; want errFake", err)
+ }
+ }
+ donec <- true
+ }()
+ select {
+ case <-donec:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout. deadlock?")
+ }
+}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
new file mode 100644
index 000000000..4c5ad88b1
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/direct.go
@@ -0,0 +1,18 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "net"
+)
+
+type direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var Direct = direct{}
+
+func (direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
new file mode 100644
index 000000000..f540b196f
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "net"
+ "strings"
+)
+
+// A PerHost directs connections to a default Dialer unless the hostname
+// requested matches one of a number of exceptions.
+type PerHost struct {
+ def, bypass Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
+ return &PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *PerHost) dialerForRequest(host string) Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone "example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a hostname
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a hostname that will use the bypass proxy.
+func (p *PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go
new file mode 100644
index 000000000..a7d809571
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/per_host_test.go
@@ -0,0 +1,55 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "errors"
+ "net"
+ "reflect"
+ "testing"
+)
+
+type recordingProxy struct {
+ addrs []string
+}
+
+func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) {
+ r.addrs = append(r.addrs, addr)
+ return nil, errors.New("recordingProxy")
+}
+
+func TestPerHost(t *testing.T) {
+ var def, bypass recordingProxy
+ perHost := NewPerHost(&def, &bypass)
+ perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16")
+
+ expectedDef := []string{
+ "example.com:123",
+ "1.2.3.4:123",
+ "[1001::]:123",
+ }
+ expectedBypass := []string{
+ "localhost:123",
+ "zone:123",
+ "foo.zone:123",
+ "127.0.0.1:123",
+ "10.1.2.3:123",
+ "[1000::]:123",
+ }
+
+ for _, addr := range expectedDef {
+ perHost.Dial("tcp", addr)
+ }
+ for _, addr := range expectedBypass {
+ perHost.Dial("tcp", addr)
+ }
+
+ if !reflect.DeepEqual(expectedDef, def.addrs) {
+ t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef)
+ }
+ if !reflect.DeepEqual(expectedBypass, bypass.addrs) {
+ t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass)
+ }
+}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
new file mode 100644
index 000000000..78a8b7bee
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/proxy.go
@@ -0,0 +1,94 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+package proxy // import "golang.org/x/net/proxy"
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "os"
+)
+
+// A Dialer is a means to establish a connection.
+type Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func FromEnvironment() Dialer {
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ return Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return Direct
+ }
+ proxy, err := FromURL(proxyURL, Direct)
+ if err != nil {
+ return Direct
+ }
+
+ noProxy := os.Getenv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := NewPerHost(proxy, Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
+ if proxySchemes == nil {
+ proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
+ }
+ proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
+ var auth *Auth
+ if u.User != nil {
+ auth = new(Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxySchemes != nil {
+ if f, ok := proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go
new file mode 100644
index 000000000..c19a5c063
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/proxy_test.go
@@ -0,0 +1,142 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "io"
+ "net"
+ "net/url"
+ "strconv"
+ "sync"
+ "testing"
+)
+
+func TestFromURL(t *testing.T) {
+ endSystem, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("net.Listen failed: %v", err)
+ }
+ defer endSystem.Close()
+ gateway, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("net.Listen failed: %v", err)
+ }
+ defer gateway.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg)
+
+ url, err := url.Parse("socks5://user:password@" + gateway.Addr().String())
+ if err != nil {
+ t.Fatalf("url.Parse failed: %v", err)
+ }
+ proxy, err := FromURL(url, Direct)
+ if err != nil {
+ t.Fatalf("FromURL failed: %v", err)
+ }
+ _, port, err := net.SplitHostPort(endSystem.Addr().String())
+ if err != nil {
+ t.Fatalf("net.SplitHostPort failed: %v", err)
+ }
+ if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil {
+ t.Fatalf("FromURL.Dial failed: %v", err)
+ } else {
+ c.Close()
+ }
+
+ wg.Wait()
+}
+
+func TestSOCKS5(t *testing.T) {
+ endSystem, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("net.Listen failed: %v", err)
+ }
+ defer endSystem.Close()
+ gateway, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("net.Listen failed: %v", err)
+ }
+ defer gateway.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg)
+
+ proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct)
+ if err != nil {
+ t.Fatalf("SOCKS5 failed: %v", err)
+ }
+ if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil {
+ t.Fatalf("SOCKS5.Dial failed: %v", err)
+ } else {
+ c.Close()
+ }
+
+ wg.Wait()
+}
+
+func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ c, err := gateway.Accept()
+ if err != nil {
+ t.Errorf("net.Listener.Accept failed: %v", err)
+ return
+ }
+ defer c.Close()
+
+ b := make([]byte, 32)
+ var n int
+ if typ == socks5Domain {
+ n = 4
+ } else {
+ n = 3
+ }
+ if _, err := io.ReadFull(c, b[:n]); err != nil {
+ t.Errorf("io.ReadFull failed: %v", err)
+ return
+ }
+ if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil {
+ t.Errorf("net.Conn.Write failed: %v", err)
+ return
+ }
+ if typ == socks5Domain {
+ n = 16
+ } else {
+ n = 10
+ }
+ if _, err := io.ReadFull(c, b[:n]); err != nil {
+ t.Errorf("io.ReadFull failed: %v", err)
+ return
+ }
+ if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ {
+ t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3])
+ return
+ }
+ if typ == socks5Domain {
+ copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9})
+ b = append(b, []byte("localhost")...)
+ } else {
+ copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4})
+ }
+ host, port, err := net.SplitHostPort(endSystem.Addr().String())
+ if err != nil {
+ t.Errorf("net.SplitHostPort failed: %v", err)
+ return
+ }
+ b = append(b, []byte(net.ParseIP(host).To4())...)
+ p, err := strconv.Atoi(port)
+ if err != nil {
+ t.Errorf("strconv.Atoi failed: %v", err)
+ return
+ }
+ b = append(b, []byte{byte(p >> 8), byte(p)}...)
+ if _, err := c.Write(b); err != nil {
+ t.Errorf("net.Conn.Write failed: %v", err)
+ return
+ }
+}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
new file mode 100644
index 000000000..9b9628239
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/socks5.go
@@ -0,0 +1,210 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928.
+func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {
+ s := &socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type socks5 struct {
+ user, password string
+ network, addr string
+ forward Dialer
+}
+
+const socks5Version = 5
+
+const (
+ socks5AuthNone = 0
+ socks5AuthPassword = 2
+)
+
+const socks5Connect = 1
+
+const (
+ socks5IP4 = 1
+ socks5Domain = 3
+ socks5IP6 = 4
+)
+
+var socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the network net via the SOCKS5 proxy.
+func (s *socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ closeConn := &conn
+ defer func() {
+ if closeConn != nil {
+ (*closeConn).Close()
+ }
+ }()
+
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return nil, errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ if buf[1] == socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return nil, errors.New("proxy: destination hostname too long: " + host)
+ }
+ buf = append(buf, socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(socks5Errors) {
+ failure = socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ closeConn = nil
+ return conn, nil
+}
diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go
new file mode 100644
index 000000000..a2d499529
--- /dev/null
+++ b/vendor/golang.org/x/net/publicsuffix/gen.go
@@ -0,0 +1,713 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates table.go and table_test.go based on the authoritative
+// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat
+//
+// The version is derived from
+// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat
+// and a human-readable form is at
+// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat
+//
+// To fetch a particular git revision, such as 5c70ccd250, pass
+// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat"
+// and -version "an explicit version string".
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+
+ "golang.org/x/net/idna"
+)
+
+const (
+ // These sum of these four values must be no greater than 32.
+ nodesBitsChildren = 9
+ nodesBitsICANN = 1
+ nodesBitsTextOffset = 15
+ nodesBitsTextLength = 6
+
+ // These sum of these four values must be no greater than 32.
+ childrenBitsWildcard = 1
+ childrenBitsNodeType = 2
+ childrenBitsHi = 14
+ childrenBitsLo = 14
+)
+
+var (
+ maxChildren int
+ maxTextOffset int
+ maxTextLength int
+ maxHi uint32
+ maxLo uint32
+)
+
+func max(a, b int) int {
+ if a < b {
+ return b
+ }
+ return a
+}
+
+func u32max(a, b uint32) uint32 {
+ if a < b {
+ return b
+ }
+ return a
+}
+
+const (
+ nodeTypeNormal = 0
+ nodeTypeException = 1
+ nodeTypeParentOnly = 2
+ numNodeType = 3
+)
+
+func nodeTypeStr(n int) string {
+ switch n {
+ case nodeTypeNormal:
+ return "+"
+ case nodeTypeException:
+ return "!"
+ case nodeTypeParentOnly:
+ return "o"
+ }
+ panic("unreachable")
+}
+
+const (
+ defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat"
+ gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat"
+)
+
+var (
+ labelEncoding = map[string]uint32{}
+ labelsList = []string{}
+ labelsMap = map[string]bool{}
+ rules = []string{}
+
+ // validSuffixRE is used to check that the entries in the public suffix
+ // list are in canonical form (after Punycode encoding). Specifically,
+ // capital letters are not allowed.
+ validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`)
+
+ shaRE = regexp.MustCompile(`"sha":"([^"]+)"`)
+ dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`)
+
+ comments = flag.Bool("comments", false, "generate table.go comments, for debugging")
+ subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging")
+ url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead")
+ v = flag.Bool("v", false, "verbose output (to stderr)")
+ version = flag.String("version", "", "the effective_tld_names.dat version")
+)
+
+func main() {
+ if err := main1(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func main1() error {
+ flag.Parse()
+ if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 {
+ return fmt.Errorf("not enough bits to encode the nodes table")
+ }
+ if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 {
+ return fmt.Errorf("not enough bits to encode the children table")
+ }
+ if *version == "" {
+ if *url != defaultURL {
+ return fmt.Errorf("-version was not specified, and the -url is not the default one")
+ }
+ sha, date, err := gitCommit()
+ if err != nil {
+ return err
+ }
+ *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date)
+ }
+ var r io.Reader = os.Stdin
+ if *url != "" {
+ res, err := http.Get(*url)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad GET status for %s: %d", *url, res.Status)
+ }
+ r = res.Body
+ defer res.Body.Close()
+ }
+
+ var root node
+ icann := false
+ br := bufio.NewReader(r)
+ for {
+ s, err := br.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ s = strings.TrimSpace(s)
+ if strings.Contains(s, "BEGIN ICANN DOMAINS") {
+ icann = true
+ continue
+ }
+ if strings.Contains(s, "END ICANN DOMAINS") {
+ icann = false
+ continue
+ }
+ if s == "" || strings.HasPrefix(s, "//") {
+ continue
+ }
+ s, err = idna.ToASCII(s)
+ if err != nil {
+ return err
+ }
+ if !validSuffixRE.MatchString(s) {
+ return fmt.Errorf("bad publicsuffix.org list data: %q", s)
+ }
+
+ if *subset {
+ switch {
+ case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"):
+ case s == "ak.us" || strings.HasSuffix(s, ".ak.us"):
+ case s == "ao" || strings.HasSuffix(s, ".ao"):
+ case s == "ar" || strings.HasSuffix(s, ".ar"):
+ case s == "arpa" || strings.HasSuffix(s, ".arpa"):
+ case s == "cy" || strings.HasSuffix(s, ".cy"):
+ case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"):
+ case s == "jp":
+ case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"):
+ case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"):
+ case s == "om" || strings.HasSuffix(s, ".om"):
+ case s == "uk" || strings.HasSuffix(s, ".uk"):
+ case s == "uk.com" || strings.HasSuffix(s, ".uk.com"):
+ case s == "tw" || strings.HasSuffix(s, ".tw"):
+ case s == "zw" || strings.HasSuffix(s, ".zw"):
+ case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"):
+ // xn--p1ai is Russian-Cyrillic "рф".
+ default:
+ continue
+ }
+ }
+
+ rules = append(rules, s)
+
+ nt, wildcard := nodeTypeNormal, false
+ switch {
+ case strings.HasPrefix(s, "*."):
+ s, nt = s[2:], nodeTypeParentOnly
+ wildcard = true
+ case strings.HasPrefix(s, "!"):
+ s, nt = s[1:], nodeTypeException
+ }
+ labels := strings.Split(s, ".")
+ for n, i := &root, len(labels)-1; i >= 0; i-- {
+ label := labels[i]
+ n = n.child(label)
+ if i == 0 {
+ if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly {
+ n.nodeType = nt
+ }
+ n.icann = n.icann && icann
+ n.wildcard = n.wildcard || wildcard
+ }
+ labelsMap[label] = true
+ }
+ }
+ labelsList = make([]string, 0, len(labelsMap))
+ for label := range labelsMap {
+ labelsList = append(labelsList, label)
+ }
+ sort.Strings(labelsList)
+
+ if err := generate(printReal, &root, "table.go"); err != nil {
+ return err
+ }
+ if err := generate(printTest, &root, "table_test.go"); err != nil {
+ return err
+ }
+ return nil
+}
+
+func generate(p func(io.Writer, *node) error, root *node, filename string) error {
+ buf := new(bytes.Buffer)
+ if err := p(buf, root); err != nil {
+ return err
+ }
+ b, err := format.Source(buf.Bytes())
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filename, b, 0644)
+}
+
+func gitCommit() (sha, date string, retErr error) {
+ res, err := http.Get(gitCommitURL)
+ if err != nil {
+ return "", "", err
+ }
+ if res.StatusCode != http.StatusOK {
+ return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status)
+ }
+ defer res.Body.Close()
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ if m := shaRE.FindSubmatch(b); m != nil {
+ sha = string(m[1])
+ }
+ if m := dateRE.FindSubmatch(b); m != nil {
+ date = string(m[1])
+ }
+ if sha == "" || date == "" {
+ retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL)
+ }
+ return sha, date, retErr
+}
+
+func printTest(w io.Writer, n *node) error {
+ fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n")
+ fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n")
+ for _, rule := range rules {
+ fmt.Fprintf(w, "%q,\n", rule)
+ }
+ fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n")
+ if err := n.walk(w, printNodeLabel); err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "}\n")
+ return nil
+}
+
+func printReal(w io.Writer, n *node) error {
+ const header = `// generated by go run gen.go; DO NOT EDIT
+
+package publicsuffix
+
+const version = %q
+
+const (
+ nodesBitsChildren = %d
+ nodesBitsICANN = %d
+ nodesBitsTextOffset = %d
+ nodesBitsTextLength = %d
+
+ childrenBitsWildcard = %d
+ childrenBitsNodeType = %d
+ childrenBitsHi = %d
+ childrenBitsLo = %d
+)
+
+const (
+ nodeTypeNormal = %d
+ nodeTypeException = %d
+ nodeTypeParentOnly = %d
+)
+
+// numTLD is the number of top level domains.
+const numTLD = %d
+
+`
+ fmt.Fprintf(w, header, *version,
+ nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength,
+ childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo,
+ nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children))
+
+ text := combineText(labelsList)
+ if text == "" {
+ return fmt.Errorf("internal error: makeText returned no text")
+ }
+ for _, label := range labelsList {
+ offset, length := strings.Index(text, label), len(label)
+ if offset < 0 {
+ return fmt.Errorf("internal error: could not find %q in text %q", label, text)
+ }
+ maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length)
+ if offset >= 1<<nodesBitsTextOffset {
+ return fmt.Errorf("text offset %d is too large, or nodeBitsTextOffset is too small", offset)
+ }
+ if length >= 1<<nodesBitsTextLength {
+ return fmt.Errorf("text length %d is too large, or nodeBitsTextLength is too small", length)
+ }
+ labelEncoding[label] = uint32(offset)<<nodesBitsTextLength | uint32(length)
+ }
+ fmt.Fprintf(w, "// Text is the combined text of all labels.\nconst text = ")
+ for len(text) > 0 {
+ n, plus := len(text), ""
+ if n > 64 {
+ n, plus = 64, " +"
+ }
+ fmt.Fprintf(w, "%q%s\n", text[:n], plus)
+ text = text[n:]
+ }
+
+ if err := n.walk(w, assignIndexes); err != nil {
+ return err
+ }
+
+ fmt.Fprintf(w, `
+
+// nodes is the list of nodes. Each node is represented as a uint32, which
+// encodes the node's children, wildcard bit and node type (as an index into
+// the children array), ICANN bit and text.
+//
+// If the table was generated with the -comments flag, there is a //-comment
+// after each node's data. In it is the nodes-array indexes of the children,
+// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The
+// nodeType is printed as + for normal, ! for exception, and o for parent-only
+// nodes that have children but don't match a domain label in their own right.
+// An I denotes an ICANN domain.
+//
+// The layout within the uint32, from MSB to LSB, is:
+// [%2d bits] unused
+// [%2d bits] children index
+// [%2d bits] ICANN bit
+// [%2d bits] text index
+// [%2d bits] text length
+var nodes = [...]uint32{
+`,
+ 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength,
+ nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength)
+ if err := n.walk(w, printNode); err != nil {
+ return err
+ }
+ fmt.Fprintf(w, `}
+
+// children is the list of nodes' children, the parent's wildcard bit and the
+// parent's node type. If a node has no children then their children index
+// will be in the range [0, 6), depending on the wildcard bit and node type.
+//
+// The layout within the uint32, from MSB to LSB, is:
+// [%2d bits] unused
+// [%2d bits] wildcard bit
+// [%2d bits] node type
+// [%2d bits] high nodes index (exclusive) of children
+// [%2d bits] low nodes index (inclusive) of children
+var children=[...]uint32{
+`,
+ 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo,
+ childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo)
+ for i, c := range childrenEncoding {
+ s := "---------------"
+ lo := c & (1<<childrenBitsLo - 1)
+ hi := (c >> childrenBitsLo) & (1<<childrenBitsHi - 1)
+ if lo != hi {
+ s = fmt.Sprintf("n0x%04x-n0x%04x", lo, hi)
+ }
+ nodeType := int(c>>(childrenBitsLo+childrenBitsHi)) & (1<<childrenBitsNodeType - 1)
+ wildcard := c>>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0
+ if *comments {
+ fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n",
+ c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType))
+ } else {
+ fmt.Fprintf(w, "0x%x,\n", c)
+ }
+ }
+ fmt.Fprintf(w, "}\n\n")
+ fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<<nodesBitsChildren-1)
+ fmt.Fprintf(w, "// max text offset %d (capacity %d)\n", maxTextOffset, 1<<nodesBitsTextOffset-1)
+ fmt.Fprintf(w, "// max text length %d (capacity %d)\n", maxTextLength, 1<<nodesBitsTextLength-1)
+ fmt.Fprintf(w, "// max hi %d (capacity %d)\n", maxHi, 1<<childrenBitsHi-1)
+ fmt.Fprintf(w, "// max lo %d (capacity %d)\n", maxLo, 1<<childrenBitsLo-1)
+ return nil
+}
+
+type node struct {
+ label string
+ nodeType int
+ icann bool
+ wildcard bool
+ // nodesIndex and childrenIndex are the index of this node in the nodes
+ // and the index of its children offset/length in the children arrays.
+ nodesIndex, childrenIndex int
+ // firstChild is the index of this node's first child, or zero if this
+ // node has no children.
+ firstChild int
+ // children are the node's children, in strictly increasing node label order.
+ children []*node
+}
+
+func (n *node) walk(w io.Writer, f func(w1 io.Writer, n1 *node) error) error {
+ if err := f(w, n); err != nil {
+ return err
+ }
+ for _, c := range n.children {
+ if err := c.walk(w, f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// child returns the child of n with the given label. The child is created if
+// it did not exist beforehand.
+func (n *node) child(label string) *node {
+ for _, c := range n.children {
+ if c.label == label {
+ return c
+ }
+ }
+ c := &node{
+ label: label,
+ nodeType: nodeTypeParentOnly,
+ icann: true,
+ }
+ n.children = append(n.children, c)
+ sort.Sort(byLabel(n.children))
+ return c
+}
+
+type byLabel []*node
+
+func (b byLabel) Len() int { return len(b) }
+func (b byLabel) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byLabel) Less(i, j int) bool { return b[i].label < b[j].label }
+
+var nextNodesIndex int
+
+// childrenEncoding are the encoded entries in the generated children array.
+// All these pre-defined entries have no children.
+var childrenEncoding = []uint32{
+ 0 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeNormal.
+ 1 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeException.
+ 2 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeParentOnly.
+ 4 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeNormal.
+ 5 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeException.
+ 6 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeParentOnly.
+}
+
+var firstCallToAssignIndexes = true
+
+func assignIndexes(w io.Writer, n *node) error {
+ if len(n.children) != 0 {
+ // Assign nodesIndex.
+ n.firstChild = nextNodesIndex
+ for _, c := range n.children {
+ c.nodesIndex = nextNodesIndex
+ nextNodesIndex++
+ }
+
+ // The root node's children is implicit.
+ if firstCallToAssignIndexes {
+ firstCallToAssignIndexes = false
+ return nil
+ }
+
+ // Assign childrenIndex.
+ maxChildren = max(maxChildren, len(childrenEncoding))
+ if len(childrenEncoding) >= 1<<nodesBitsChildren {
+ return fmt.Errorf("children table size %d is too large, or nodeBitsChildren is too small", len(childrenEncoding))
+ }
+ n.childrenIndex = len(childrenEncoding)
+ lo := uint32(n.firstChild)
+ hi := lo + uint32(len(n.children))
+ maxLo, maxHi = u32max(maxLo, lo), u32max(maxHi, hi)
+ if lo >= 1<<childrenBitsLo {
+ return fmt.Errorf("children lo %d is too large, or childrenBitsLo is too small", lo)
+ }
+ if hi >= 1<<childrenBitsHi {
+ return fmt.Errorf("children hi %d is too large, or childrenBitsHi is too small", hi)
+ }
+ enc := hi<<childrenBitsLo | lo
+ enc |= uint32(n.nodeType) << (childrenBitsLo + childrenBitsHi)
+ if n.wildcard {
+ enc |= 1 << (childrenBitsLo + childrenBitsHi + childrenBitsNodeType)
+ }
+ childrenEncoding = append(childrenEncoding, enc)
+ } else {
+ n.childrenIndex = n.nodeType
+ if n.wildcard {
+ n.childrenIndex += numNodeType
+ }
+ }
+ return nil
+}
+
+func printNode(w io.Writer, n *node) error {
+ for _, c := range n.children {
+ s := "---------------"
+ if len(c.children) != 0 {
+ s = fmt.Sprintf("n0x%04x-n0x%04x", c.firstChild, c.firstChild+len(c.children))
+ }
+ encoding := labelEncoding[c.label]
+ if c.icann {
+ encoding |= 1 << (nodesBitsTextLength + nodesBitsTextOffset)
+ }
+ encoding |= uint32(c.childrenIndex) << (nodesBitsTextLength + nodesBitsTextOffset + nodesBitsICANN)
+ if *comments {
+ fmt.Fprintf(w, "0x%08x, // n0x%04x c0x%04x (%s)%s %s %s %s\n",
+ encoding, c.nodesIndex, c.childrenIndex, s, wildcardStr(c.wildcard),
+ nodeTypeStr(c.nodeType), icannStr(c.icann), c.label,
+ )
+ } else {
+ fmt.Fprintf(w, "0x%x,\n", encoding)
+ }
+ }
+ return nil
+}
+
+func printNodeLabel(w io.Writer, n *node) error {
+ for _, c := range n.children {
+ fmt.Fprintf(w, "%q,\n", c.label)
+ }
+ return nil
+}
+
+func icannStr(icann bool) string {
+ if icann {
+ return "I"
+ }
+ return " "
+}
+
+func wildcardStr(wildcard bool) string {
+ if wildcard {
+ return "*"
+ }
+ return " "
+}
+
+// combineText combines all the strings in labelsList to form one giant string.
+// Overlapping strings will be merged: "arpa" and "parliament" could yield
+// "arparliament".
+func combineText(labelsList []string) string {
+ beforeLength := 0
+ for _, s := range labelsList {
+ beforeLength += len(s)
+ }
+
+ text := crush(removeSubstrings(labelsList))
+ if *v {
+ fmt.Fprintf(os.Stderr, "crushed %d bytes to become %d bytes\n", beforeLength, len(text))
+ }
+ return text
+}
+
+type byLength []string
+
+func (s byLength) Len() int { return len(s) }
+func (s byLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byLength) Less(i, j int) bool { return len(s[i]) < len(s[j]) }
+
+// removeSubstrings returns a copy of its input with any strings removed
+// that are substrings of other provided strings.
+func removeSubstrings(input []string) []string {
+ // Make a copy of input.
+ ss := append(make([]string, 0, len(input)), input...)
+ sort.Sort(byLength(ss))
+
+ for i, shortString := range ss {
+ // For each string, only consider strings higher than it in sort order, i.e.
+ // of equal length or greater.
+ for _, longString := range ss[i+1:] {
+ if strings.Contains(longString, shortString) {
+ ss[i] = ""
+ break
+ }
+ }
+ }
+
+ // Remove the empty strings.
+ sort.Strings(ss)
+ for len(ss) > 0 && ss[0] == "" {
+ ss = ss[1:]
+ }
+ return ss
+}
+
+// crush combines a list of strings, taking advantage of overlaps. It returns a
+// single string that contains each input string as a substring.
+func crush(ss []string) string {
+ maxLabelLen := 0
+ for _, s := range ss {
+ if maxLabelLen < len(s) {
+ maxLabelLen = len(s)
+ }
+ }
+
+ for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- {
+ prefixes := makePrefixMap(ss, prefixLen)
+ for i, s := range ss {
+ if len(s) <= prefixLen {
+ continue
+ }
+ mergeLabel(ss, i, prefixLen, prefixes)
+ }
+ }
+
+ return strings.Join(ss, "")
+}
+
+// mergeLabel merges the label at ss[i] with the first available matching label
+// in prefixMap, where the last "prefixLen" characters in ss[i] match the first
+// "prefixLen" characters in the matching label.
+// It will merge ss[i] repeatedly until no more matches are available.
+// All matching labels merged into ss[i] are replaced by "".
+func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) {
+ s := ss[i]
+ suffix := s[len(s)-prefixLen:]
+ for _, j := range prefixes[suffix] {
+ // Empty strings mean "already used." Also avoid merging with self.
+ if ss[j] == "" || i == j {
+ continue
+ }
+ if *v {
+ fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n",
+ prefixLen, i, j, ss[i], ss[j], suffix)
+ }
+ ss[i] += ss[j][prefixLen:]
+ ss[j] = ""
+ // ss[i] has a new suffix, so merge again if possible.
+ // Note: we only have to merge again at the same prefix length. Shorter
+ // prefix lengths will be handled in the next iteration of crush's for loop.
+ // Can there be matches for longer prefix lengths, introduced by the merge?
+ // I believe that any such matches would by necessity have been eliminated
+ // during substring removal or merged at a higher prefix length. For
+ // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde"
+ // would yield "abcde", which could be merged with "bcdef." However, in
+ // practice "cde" would already have been elimintated by removeSubstrings.
+ mergeLabel(ss, i, prefixLen, prefixes)
+ return
+ }
+}
+
+// prefixMap maps from a prefix to a list of strings containing that prefix. The
+// list of strings is represented as indexes into a slice of strings stored
+// elsewhere.
+type prefixMap map[string][]int
+
+// makePrefixMap constructs a prefixMap from a slice of strings.
+func makePrefixMap(ss []string, prefixLen int) prefixMap {
+ prefixes := make(prefixMap)
+ for i, s := range ss {
+ // We use < rather than <= because if a label matches on a prefix equal to
+ // its full length, that's actually a substring match handled by
+ // removeSubstrings.
+ if prefixLen < len(s) {
+ prefix := s[:prefixLen]
+ prefixes[prefix] = append(prefixes[prefix], i)
+ }
+ }
+
+ return prefixes
+}
diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go
new file mode 100644
index 000000000..8bbf3bcd7
--- /dev/null
+++ b/vendor/golang.org/x/net/publicsuffix/list.go
@@ -0,0 +1,135 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go
+
+// Package publicsuffix provides a public suffix list based on data from
+// http://publicsuffix.org/. A public suffix is one under which Internet users
+// can directly register names.
+package publicsuffix // import "golang.org/x/net/publicsuffix"
+
+// TODO: specify case sensitivity and leading/trailing dot behavior for
+// func PublicSuffix and func EffectiveTLDPlusOne.
+
+import (
+ "fmt"
+ "net/http/cookiejar"
+ "strings"
+)
+
+// List implements the cookiejar.PublicSuffixList interface by calling the
+// PublicSuffix function.
+var List cookiejar.PublicSuffixList = list{}
+
+type list struct{}
+
+func (list) PublicSuffix(domain string) string {
+ ps, _ := PublicSuffix(domain)
+ return ps
+}
+
+func (list) String() string {
+ return version
+}
+
+// PublicSuffix returns the public suffix of the domain using a copy of the
+// publicsuffix.org database compiled into the library.
+//
+// icann is whether the public suffix is managed by the Internet Corporation
+// for Assigned Names and Numbers. If not, the public suffix is privately
+// managed. For example, foo.org and foo.co.uk are ICANN domains,
+// foo.dyndns.org and foo.blogspot.co.uk are private domains.
+//
+// Use cases for distinguishing ICANN domains like foo.com from private
+// domains like foo.appspot.com can be found at
+// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
+func PublicSuffix(domain string) (publicSuffix string, icann bool) {
+ lo, hi := uint32(0), uint32(numTLD)
+ s, suffix, wildcard := domain, len(domain), false
+loop:
+ for {
+ dot := strings.LastIndex(s, ".")
+ if wildcard {
+ suffix = 1 + dot
+ }
+ if lo == hi {
+ break
+ }
+ f := find(s[1+dot:], lo, hi)
+ if f == notFound {
+ break
+ }
+
+ u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength)
+ icann = u&(1<<nodesBitsICANN-1) != 0
+ u >>= nodesBitsICANN
+ u = children[u&(1<<nodesBitsChildren-1)]
+ lo = u & (1<<childrenBitsLo - 1)
+ u >>= childrenBitsLo
+ hi = u & (1<<childrenBitsHi - 1)
+ u >>= childrenBitsHi
+ switch u & (1<<childrenBitsNodeType - 1) {
+ case nodeTypeNormal:
+ suffix = 1 + dot
+ case nodeTypeException:
+ suffix = 1 + len(s)
+ break loop
+ }
+ u >>= childrenBitsNodeType
+ wildcard = u&(1<<childrenBitsWildcard-1) != 0
+
+ if dot == -1 {
+ break
+ }
+ s = s[:dot]
+ }
+ if suffix == len(domain) {
+ // If no rules match, the prevailing rule is "*".
+ return domain[1+strings.LastIndex(domain, "."):], icann
+ }
+ return domain[suffix:], icann
+}
+
+const notFound uint32 = 1<<32 - 1
+
+// find returns the index of the node in the range [lo, hi) whose label equals
+// label, or notFound if there is no such node. The range is assumed to be in
+// strictly increasing node label order.
+func find(label string, lo, hi uint32) uint32 {
+ for lo < hi {
+ mid := lo + (hi-lo)/2
+ s := nodeLabel(mid)
+ if s < label {
+ lo = mid + 1
+ } else if s == label {
+ return mid
+ } else {
+ hi = mid
+ }
+ }
+ return notFound
+}
+
+// nodeLabel returns the label for the i'th node.
+func nodeLabel(i uint32) string {
+ x := nodes[i]
+ length := x & (1<<nodesBitsTextLength - 1)
+ x >>= nodesBitsTextLength
+ offset := x & (1<<nodesBitsTextOffset - 1)
+ return text[offset : offset+length]
+}
+
+// EffectiveTLDPlusOne returns the effective top level domain plus one more
+// label. For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org".
+func EffectiveTLDPlusOne(domain string) (string, error) {
+ suffix, _ := PublicSuffix(domain)
+ if len(domain) <= len(suffix) {
+ return "", fmt.Errorf("publicsuffix: cannot derive eTLD+1 for domain %q", domain)
+ }
+ i := len(domain) - len(suffix) - 1
+ if domain[i] != '.' {
+ return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain)
+ }
+ return domain[1+strings.LastIndex(domain[:i], "."):], nil
+}
diff --git a/vendor/golang.org/x/net/publicsuffix/list_test.go b/vendor/golang.org/x/net/publicsuffix/list_test.go
new file mode 100644
index 000000000..a08e64eaf
--- /dev/null
+++ b/vendor/golang.org/x/net/publicsuffix/list_test.go
@@ -0,0 +1,416 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package publicsuffix
+
+import (
+ "sort"
+ "strings"
+ "testing"
+)
+
+func TestNodeLabel(t *testing.T) {
+ for i, want := range nodeLabels {
+ got := nodeLabel(uint32(i))
+ if got != want {
+ t.Errorf("%d: got %q, want %q", i, got, want)
+ }
+ }
+}
+
+func TestFind(t *testing.T) {
+ testCases := []string{
+ "",
+ "a",
+ "a0",
+ "aaaa",
+ "ao",
+ "ap",
+ "ar",
+ "aro",
+ "arp",
+ "arpa",
+ "arpaa",
+ "arpb",
+ "az",
+ "b",
+ "b0",
+ "ba",
+ "z",
+ "zu",
+ "zv",
+ "zw",
+ "zx",
+ "zy",
+ "zz",
+ "zzzz",
+ }
+ for _, tc := range testCases {
+ got := find(tc, 0, numTLD)
+ want := notFound
+ for i := uint32(0); i < numTLD; i++ {
+ if tc == nodeLabel(i) {
+ want = i
+ break
+ }
+ }
+ if got != want {
+ t.Errorf("%q: got %d, want %d", tc, got, want)
+ }
+ }
+}
+
+func TestICANN(t *testing.T) {
+ testCases := map[string]bool{
+ "foo.org": true,
+ "foo.co.uk": true,
+ "foo.dyndns.org": false,
+ "foo.go.dyndns.org": false,
+ "foo.blogspot.co.uk": false,
+ "foo.intranet": false,
+ }
+ for domain, want := range testCases {
+ _, got := PublicSuffix(domain)
+ if got != want {
+ t.Errorf("%q: got %v, want %v", domain, got, want)
+ }
+ }
+}
+
+var publicSuffixTestCases = []struct {
+ domain, want string
+}{
+ // Empty string.
+ {"", ""},
+
+ // The .ao rules are:
+ // ao
+ // ed.ao
+ // gv.ao
+ // og.ao
+ // co.ao
+ // pb.ao
+ // it.ao
+ {"ao", "ao"},
+ {"www.ao", "ao"},
+ {"pb.ao", "pb.ao"},
+ {"www.pb.ao", "pb.ao"},
+ {"www.xxx.yyy.zzz.pb.ao", "pb.ao"},
+
+ // The .ar rules are:
+ // ar
+ // com.ar
+ // edu.ar
+ // gob.ar
+ // gov.ar
+ // int.ar
+ // mil.ar
+ // net.ar
+ // org.ar
+ // tur.ar
+ // blogspot.com.ar
+ {"ar", "ar"},
+ {"www.ar", "ar"},
+ {"nic.ar", "ar"},
+ {"www.nic.ar", "ar"},
+ {"com.ar", "com.ar"},
+ {"www.com.ar", "com.ar"},
+ {"blogspot.com.ar", "blogspot.com.ar"},
+ {"www.blogspot.com.ar", "blogspot.com.ar"},
+ {"www.xxx.yyy.zzz.blogspot.com.ar", "blogspot.com.ar"},
+ {"logspot.com.ar", "com.ar"},
+ {"zlogspot.com.ar", "com.ar"},
+ {"zblogspot.com.ar", "com.ar"},
+
+ // The .arpa rules are:
+ // arpa
+ // e164.arpa
+ // in-addr.arpa
+ // ip6.arpa
+ // iris.arpa
+ // uri.arpa
+ // urn.arpa
+ {"arpa", "arpa"},
+ {"www.arpa", "arpa"},
+ {"urn.arpa", "urn.arpa"},
+ {"www.urn.arpa", "urn.arpa"},
+ {"www.xxx.yyy.zzz.urn.arpa", "urn.arpa"},
+
+ // The relevant {kobe,kyoto}.jp rules are:
+ // jp
+ // *.kobe.jp
+ // !city.kobe.jp
+ // kyoto.jp
+ // ide.kyoto.jp
+ {"jp", "jp"},
+ {"kobe.jp", "jp"},
+ {"c.kobe.jp", "c.kobe.jp"},
+ {"b.c.kobe.jp", "c.kobe.jp"},
+ {"a.b.c.kobe.jp", "c.kobe.jp"},
+ {"city.kobe.jp", "kobe.jp"},
+ {"www.city.kobe.jp", "kobe.jp"},
+ {"kyoto.jp", "kyoto.jp"},
+ {"test.kyoto.jp", "kyoto.jp"},
+ {"ide.kyoto.jp", "ide.kyoto.jp"},
+ {"b.ide.kyoto.jp", "ide.kyoto.jp"},
+ {"a.b.ide.kyoto.jp", "ide.kyoto.jp"},
+
+ // The .tw rules are:
+ // tw
+ // edu.tw
+ // gov.tw
+ // mil.tw
+ // com.tw
+ // net.tw
+ // org.tw
+ // idv.tw
+ // game.tw
+ // ebiz.tw
+ // club.tw
+ // 網路.tw (xn--zf0ao64a.tw)
+ // 組織.tw (xn--uc0atv.tw)
+ // 商業.tw (xn--czrw28b.tw)
+ // blogspot.tw
+ {"tw", "tw"},
+ {"aaa.tw", "tw"},
+ {"www.aaa.tw", "tw"},
+ {"xn--czrw28b.aaa.tw", "tw"},
+ {"edu.tw", "edu.tw"},
+ {"www.edu.tw", "edu.tw"},
+ {"xn--czrw28b.edu.tw", "edu.tw"},
+ {"xn--czrw28b.tw", "xn--czrw28b.tw"},
+ {"www.xn--czrw28b.tw", "xn--czrw28b.tw"},
+ {"xn--uc0atv.xn--czrw28b.tw", "xn--czrw28b.tw"},
+ {"xn--kpry57d.tw", "tw"},
+
+ // The .uk rules are:
+ // uk
+ // ac.uk
+ // co.uk
+ // gov.uk
+ // ltd.uk
+ // me.uk
+ // net.uk
+ // nhs.uk
+ // org.uk
+ // plc.uk
+ // police.uk
+ // *.sch.uk
+ // blogspot.co.uk
+ {"uk", "uk"},
+ {"aaa.uk", "uk"},
+ {"www.aaa.uk", "uk"},
+ {"mod.uk", "uk"},
+ {"www.mod.uk", "uk"},
+ {"sch.uk", "uk"},
+ {"mod.sch.uk", "mod.sch.uk"},
+ {"www.sch.uk", "www.sch.uk"},
+ {"blogspot.co.uk", "blogspot.co.uk"},
+ {"blogspot.nic.uk", "uk"},
+ {"blogspot.sch.uk", "blogspot.sch.uk"},
+
+ // The .рф rules are
+ // рф (xn--p1ai)
+ {"xn--p1ai", "xn--p1ai"},
+ {"aaa.xn--p1ai", "xn--p1ai"},
+ {"www.xxx.yyy.xn--p1ai", "xn--p1ai"},
+
+ // The .zw rules are:
+ // *.zw
+ {"zw", "zw"},
+ {"www.zw", "www.zw"},
+ {"zzz.zw", "zzz.zw"},
+ {"www.zzz.zw", "zzz.zw"},
+ {"www.xxx.yyy.zzz.zw", "zzz.zw"},
+
+ // There are no .nosuchtld rules.
+ {"nosuchtld", "nosuchtld"},
+ {"foo.nosuchtld", "nosuchtld"},
+ {"bar.foo.nosuchtld", "nosuchtld"},
+}
+
+func BenchmarkPublicSuffix(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, tc := range publicSuffixTestCases {
+ List.PublicSuffix(tc.domain)
+ }
+ }
+}
+
+func TestPublicSuffix(t *testing.T) {
+ for _, tc := range publicSuffixTestCases {
+ got := List.PublicSuffix(tc.domain)
+ if got != tc.want {
+ t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want)
+ }
+ }
+}
+
+func TestSlowPublicSuffix(t *testing.T) {
+ for _, tc := range publicSuffixTestCases {
+ got := slowPublicSuffix(tc.domain)
+ if got != tc.want {
+ t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want)
+ }
+ }
+}
+
+// slowPublicSuffix implements the canonical (but O(number of rules)) public
+// suffix algorithm described at http://publicsuffix.org/list/.
+//
+// 1. Match domain against all rules and take note of the matching ones.
+// 2. If no rules match, the prevailing rule is "*".
+// 3. If more than one rule matches, the prevailing rule is the one which is an exception rule.
+// 4. If there is no matching exception rule, the prevailing rule is the one with the most labels.
+// 5. If the prevailing rule is a exception rule, modify it by removing the leftmost label.
+// 6. The public suffix is the set of labels from the domain which directly match the labels of the prevailing rule (joined by dots).
+// 7. The registered or registrable domain is the public suffix plus one additional label.
+//
+// This function returns the public suffix, not the registrable domain, and so
+// it stops after step 6.
+func slowPublicSuffix(domain string) string {
+ match := func(rulePart, domainPart string) bool {
+ switch rulePart[0] {
+ case '*':
+ return true
+ case '!':
+ return rulePart[1:] == domainPart
+ }
+ return rulePart == domainPart
+ }
+
+ domainParts := strings.Split(domain, ".")
+ var matchingRules [][]string
+
+loop:
+ for _, rule := range rules {
+ ruleParts := strings.Split(rule, ".")
+ if len(domainParts) < len(ruleParts) {
+ continue
+ }
+ for i := range ruleParts {
+ rulePart := ruleParts[len(ruleParts)-1-i]
+ domainPart := domainParts[len(domainParts)-1-i]
+ if !match(rulePart, domainPart) {
+ continue loop
+ }
+ }
+ matchingRules = append(matchingRules, ruleParts)
+ }
+ if len(matchingRules) == 0 {
+ matchingRules = append(matchingRules, []string{"*"})
+ } else {
+ sort.Sort(byPriority(matchingRules))
+ }
+ prevailing := matchingRules[0]
+ if prevailing[0][0] == '!' {
+ prevailing = prevailing[1:]
+ }
+ if prevailing[0][0] == '*' {
+ replaced := domainParts[len(domainParts)-len(prevailing)]
+ prevailing = append([]string{replaced}, prevailing[1:]...)
+ }
+ return strings.Join(prevailing, ".")
+}
+
+type byPriority [][]string
+
+func (b byPriority) Len() int { return len(b) }
+func (b byPriority) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byPriority) Less(i, j int) bool {
+ if b[i][0][0] == '!' {
+ return true
+ }
+ if b[j][0][0] == '!' {
+ return false
+ }
+ return len(b[i]) > len(b[j])
+}
+
+// eTLDPlusOneTestCases come from
+// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt
+var eTLDPlusOneTestCases = []struct {
+ domain, want string
+}{
+ // Empty input.
+ {"", ""},
+ // Unlisted TLD.
+ {"example", ""},
+ {"example.example", "example.example"},
+ {"b.example.example", "example.example"},
+ {"a.b.example.example", "example.example"},
+ // TLD with only 1 rule.
+ {"biz", ""},
+ {"domain.biz", "domain.biz"},
+ {"b.domain.biz", "domain.biz"},
+ {"a.b.domain.biz", "domain.biz"},
+ // TLD with some 2-level rules.
+ {"com", ""},
+ {"example.com", "example.com"},
+ {"b.example.com", "example.com"},
+ {"a.b.example.com", "example.com"},
+ {"uk.com", ""},
+ {"example.uk.com", "example.uk.com"},
+ {"b.example.uk.com", "example.uk.com"},
+ {"a.b.example.uk.com", "example.uk.com"},
+ {"test.ac", "test.ac"},
+ // TLD with only 1 (wildcard) rule.
+ {"mm", ""},
+ {"c.mm", ""},
+ {"b.c.mm", "b.c.mm"},
+ {"a.b.c.mm", "b.c.mm"},
+ // More complex TLD.
+ {"jp", ""},
+ {"test.jp", "test.jp"},
+ {"www.test.jp", "test.jp"},
+ {"ac.jp", ""},
+ {"test.ac.jp", "test.ac.jp"},
+ {"www.test.ac.jp", "test.ac.jp"},
+ {"kyoto.jp", ""},
+ {"test.kyoto.jp", "test.kyoto.jp"},
+ {"ide.kyoto.jp", ""},
+ {"b.ide.kyoto.jp", "b.ide.kyoto.jp"},
+ {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"},
+ {"c.kobe.jp", ""},
+ {"b.c.kobe.jp", "b.c.kobe.jp"},
+ {"a.b.c.kobe.jp", "b.c.kobe.jp"},
+ {"city.kobe.jp", "city.kobe.jp"},
+ {"www.city.kobe.jp", "city.kobe.jp"},
+ // TLD with a wildcard rule and exceptions.
+ {"ck", ""},
+ {"test.ck", ""},
+ {"b.test.ck", "b.test.ck"},
+ {"a.b.test.ck", "b.test.ck"},
+ {"www.ck", "www.ck"},
+ {"www.www.ck", "www.ck"},
+ // US K12.
+ {"us", ""},
+ {"test.us", "test.us"},
+ {"www.test.us", "test.us"},
+ {"ak.us", ""},
+ {"test.ak.us", "test.ak.us"},
+ {"www.test.ak.us", "test.ak.us"},
+ {"k12.ak.us", ""},
+ {"test.k12.ak.us", "test.k12.ak.us"},
+ {"www.test.k12.ak.us", "test.k12.ak.us"},
+ // Punycoded IDN labels
+ {"xn--85x722f.com.cn", "xn--85x722f.com.cn"},
+ {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"},
+ {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"},
+ {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"},
+ {"xn--55qx5d.cn", ""},
+ {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"},
+ {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"},
+ {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"},
+ {"xn--fiqs8s", ""},
+}
+
+func TestEffectiveTLDPlusOne(t *testing.T) {
+ for _, tc := range eTLDPlusOneTestCases {
+ got, _ := EffectiveTLDPlusOne(tc.domain)
+ if got != tc.want {
+ t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go
new file mode 100644
index 000000000..dfe67ebe6
--- /dev/null
+++ b/vendor/golang.org/x/net/publicsuffix/table.go
@@ -0,0 +1,8990 @@
+// generated by go run gen.go; DO NOT EDIT
+
+package publicsuffix
+
+const version = "publicsuffix.org's public_suffix_list.dat, git revision 533b016049473e520193e70156e4b54dc1f19568 (2016-08-05T11:21:15Z)"
+
+const (
+ nodesBitsChildren = 9
+ nodesBitsICANN = 1
+ nodesBitsTextOffset = 15
+ nodesBitsTextLength = 6
+
+ childrenBitsWildcard = 1
+ childrenBitsNodeType = 2
+ childrenBitsHi = 14
+ childrenBitsLo = 14
+)
+
+const (
+ nodeTypeNormal = 0
+ nodeTypeException = 1
+ nodeTypeParentOnly = 2
+)
+
+// numTLD is the number of top level domains.
+const numTLD = 1552
+
+// Text is the combined text of all labels.
+const text = "biellaakesvuemieleccebieszczadygeyachimataipeigersundnpaleomutas" +
+ "hinainfolldalottebievatmallorcafederationinohekinannestadrangeda" +
+ "lottokonamegatakatorintuitateshinanomachintaijinuyamanouchikuhok" +
+ "uryugasakitashiobarabifukagawalmartateyamabihorologyusuisserveex" +
+ "changebikedagestangebilbaogakievenesandvikcoromantovalle-d-aosta" +
+ "thellexusdecorativeartsanfranciscofreakunemurorangeiseiyoichirop" +
+ "racticaseihichisobetsuitairabillustrationinomiyakonojoshkar-olaw" +
+ "abiobirdartcenterprisesakikonaircraftraeumtgeradealstahaugesundr" +
+ "ivelandrobaknoluoktainaikawachinaganoharamcoalaheadjudaicable-mo" +
+ "dembetsukuinvestmentsangobirkenesoddtangenovarabirthplacebjarkoy" +
+ "uulsandoyuzawabjerkreimdbalatinorddalimitediscountysnes3-sa-east" +
+ "-1bjugnieznordlandrudmurtiablockbusternidunloppacificasertaishin" +
+ "omakikuchikuseikarugausdalouvreitatsunobloombergbauernrtattoolsz" +
+ "tynsettlersanjotaxihuanirasakis-a-candidatebloxcmsannanishiazais" +
+ "-a-catererbluedaplierneuesannohelplfinancialowiczest-le-patrondh" +
+ "eimperiabmoattachmentsanokasuyakutiabmsantabarbarabmweirbnpparib" +
+ "aselburgloppenzaogashimadachicagoboatsantacruzsantafedextraspace" +
+ "-to-rentalstomakomaibarabomloanswatch-and-clockerbondunsanukis-a" +
+ "-celticsfanishigotsukisofukushimaritimodenakanotoddenishiharabon" +
+ "nishiizunazukis-a-chefarmsteadupontariobookingmbhartiffanyuzhno-" +
+ "sakhalinskaszubybootsaotomeloyalistjordalshalsenishikatakazakis-" +
+ "a-conservativefsncfdurbanamexhibitionishikatsuragithubuserconten" +
+ "tgoryboschaefflerdalucaniabostikatowicebostonakijinsekikogenting" +
+ "minakamichiharabotanicalgardenishikawazukanazawabotanicgardenish" +
+ "imerabotanybouncemerckatsushikabeeldengeluidurhamburgmodellingmx" +
+ "finitybounty-fullensakerrypropertiesapodhalewismillerboutiquebec" +
+ "ngrimstadvrcambridgestonewspaperbozentsujiiebradescorporationish" +
+ "inomiyashironobrandywinevalleybrasiliabresciabrindisibenikebrist" +
+ "olgapartmentsapporobritishcolumbialowiezaganishinoomotegotvallea" +
+ "ostatoiluccapitalonewhollandvrdnsfor-better-thandabroadcastlecle" +
+ "rcasinore-og-uvdalucernebroadwaybroke-itjeldsundwgripebrokerbron" +
+ "noysundyndns-ipalermomasvuotnakatombetsupplybrothermesaverdeatnu" +
+ "orogersvpalmspringsakerbrowsersafetymarketsaratovalled-aostavang" +
+ "erbrumunddalukowfarsundyndns-mailuroybrunelblagdenesnaaseralinge" +
+ "nkainanaejrietisalatinabenoboribetsucksardegnamsosnowiecateringe" +
+ "budejjuedischesapeakebayernurembergriwataraidyndns-office-on-the" +
+ "-webcampobassociatesardiniabrusselsarlutskatsuyamaseratis-a-cpad" +
+ "oval-daostavalleybruxellesarpsborgrondarbryanskleppamperedchefas" +
+ "hionishinoshimatta-varjjatjmaxxxjaworznobryneustarhubalestrandab" +
+ "ergamoarekemreviewskrakoweddingladelmenhorstackspacekitagatajimi" +
+ "crolightinglassassinationalheritagematsubarakawagoeu-1buskerudin" +
+ "ewhampshirebungoonordreisa-geekaufenishiokoppegardyndns-picsaruf" +
+ "utsunomiyawakasaikaitakoenigrongabuzenishitosashimizunaminamiash" +
+ "igarabuzzgorzeleccolognewmexicoldwarmiamiastalowa-wolahppiacenza" +
+ "kopanerairguardyndns-remotegildeskalmykiabwhalingrossetouchijiwa" +
+ "deloittevadsoccertificationishiwakis-a-cubicle-slavellinowruzhgo" +
+ "rodoybzhitomirkutskodjeepostfoldnavyatkakegawalterconferencecons" +
+ "tructionconsuladoharuhrconsultanthropologyconsultingvollcontacto" +
+ "yookanzakiwiencontemporaryarteducationalchikugojomedio-campidano" +
+ "-mediocampidanomediocontractorskenconventureshinodesashibetsuiki" +
+ "mobetsuliguriacookingchannelveruminamibosogndalcoolkuszgradcoope" +
+ "raunitemasekfhappoumuenchencopenhagencyclopedichernihivanovosibi" +
+ "rskypescaravantaacorsicahcesuolocalhistorybnikahokutoeiheijis-a-" +
+ "doctoraycorvettenrightathomegoodsbschokoladencosenzamamibuilders" +
+ "cholarshipschoolcostumedizinhistorischeschulezajskhabarovskhakas" +
+ "siacouchpotatofrieschwarzgwangjuifminamidaitomangotembaixadacoun" +
+ "cilcouponschweizippodlasiellakasamatsudovre-eikercoursesciencece" +
+ "ntersciencehistorycq-acranbrookuwanalyticscientistockholmestrand" +
+ "creditcardcreditunioncremonashorokanaiecrewiiheyaizuwakamatsubus" +
+ "hikusakadogawacricketrzyncrimeacrotonewportlligatewaycrownprovid" +
+ "ercrscjohnsoncruisescotlandcryptonomichigangwoncuisinellajollame" +
+ "ricanexpressexyzjcbnlculturalcentertainmentoyosatoyokawacuneocup" +
+ "cakecxn--1ctwolominamatamayukis-a-financialadvisor-aurdalcymruov" +
+ "atoyotaris-a-geekgalaxycyonabarussiacyouthdfcbankzlguovdageaidnu" +
+ "lvikharkivgucciprianiigataiwanairforcertmgretachikawakuyabukicks" +
+ "-assedichernivtsiciliafieldfiguerestaurantoyotomiyazakis-a-green" +
+ "filateliafilminamiechizenfinalfinancefineartserveftparaglidingzp" +
+ "arisor-fronfinlandfinnoyfirebaseapparliamentoyotsukaidownloadfir" +
+ "enzefirestonextdirectoyourafirmdaleirfjordfishingolffanservegame" +
+ "-serverisignfitjarqhachiojiyahikobeatservehalflifestylefitnesset" +
+ "tlementoystre-slidrettozawafjalerflesbergflickragerotikamakuraza" +
+ "kiraflightservehttparmaflirumannortonsbergflogintogurafloraflore" +
+ "ncefloridafloristanohatakahashimamakirkeneservehumourfloromskogu" +
+ "chikuzenflowerserveirchernovtsykkylvenetogakushimotoganewjerseyf" +
+ "lsmidthruheredstonexus-east-1flynnhubalsfjordiscoveryokamikawane" +
+ "honbetsurutaharaurskog-holandroverhalla-speziaetnagaivuotnagaoka" +
+ "kyotambabydgoszczecinemailavagiske164fndfoodnetworkshoppingfor-o" +
+ "urfor-someetozsdefor-theaterforexrothachirogatakanabeautydalforg" +
+ "otdnserveminecraftranbyforli-cesena-forlicesenaforlikescandyndns" +
+ "-at-workinggrouparocherkasyzrankoshigayaltaikis-a-guruslivinghis" +
+ "toryforsaleirvikhersonforsandasuoloftrani-andria-barletta-trani-" +
+ "andriafortmissoulan-udefenseljordfortworthadanotaireservemp3util" +
+ "itiesquarezzoologicalvinklein-addrammenuernbergdyniabogadocscbgg" +
+ "fareastcoastaldefence-burgjemnes3-ap-northeast-1kappleaseating-o" +
+ "rganicbcg12000emmafanconagawakayamadridvagsoyericsson-aptibleang" +
+ "aviikadenaamesjevuemielno-ip6foruminamifuranofosneservep2parserv" +
+ "epicservequakefotaruis-a-hard-workerfoxfordegreefreeboxostrowiec" +
+ "hiryukyuragifudaigodoesntexistanbullensvanguardyndns-servercelli" +
+ "kes-piedmontblancomeeresasayamafreemasonryfreiburgfreightcmwildl" +
+ "ifedjejuegoshikiminokamoenairlinedre-eikerfreseniuscountryestate" +
+ "ofdelawaredumbrellanbibaidarfribourgfriuli-v-giuliafriuli-ve-giu" +
+ "liafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriul" +
+ "i-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezi" +
+ "a-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmata" +
+ "rtanddesignfrognfrolandfrom-akrehamnfrom-alfrom-arfrom-azwilliam" +
+ "hillfrom-capetownnews-stagingfrom-collectionfrom-ctraniandriabar" +
+ "lettatraniandriafrom-dchitachinakagawatchandclockautokeinofrom-d" +
+ "ell-ogliastrakhanawawinbaltimore-og-romsdalindasiaustevollaziobi" +
+ "ragroks-thisamitsukembuchikumagayagawakkanaibetsubamericanfamily" +
+ "dscloudcontrolledekafjorddnskingjerdrumckinseyekaterinburgjersta" +
+ "dotsuruokamchatkameokameyamashinatsukigatakamoriokamikitayamatot" +
+ "akadabruzzoologyeongbuk-uralsk12from-flanderservicesettsurfastly" +
+ "from-gafrom-higashiagatsumagoirminamiiselectranoyfrom-iafrom-idf" +
+ "rom-ilfrom-incheonfrom-ksevastopolefrom-kyotobetsumidatlantichit" +
+ "osetogitsuldaluxembourgrpanamafrom-lancashireggio-calabriafrom-m" +
+ "ansionsevenassisicilyfrom-mdfrom-megurorostrowwlkpmgfrom-microso" +
+ "ftbankhmelnitskiyamasfjordenfrom-mnfrom-mochizukirovogradoyfrom-" +
+ "msewindmillfrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktransportrap" +
+ "aniimimatakatsukis-a-hunterfrom-njcpartis-a-knightravelchannelfr" +
+ "om-nminamiizukamitondabayashiogamagoriziafrom-nvallee-aosteroyfr" +
+ "om-nyfrom-ohkurafrom-oketohmanxn--1qqw23afrom-orfrom-paderbornfr" +
+ "om-pratohnoshoooshikamaishimofusartsfranziskanerdpolicefrom-rivn" +
+ "efrom-schoenbrunnfrom-sdnipropetrovskhmelnytskyivalleeaosteigenf" +
+ "rom-tnfrom-txn--2m4a15efrom-utazuerichardlillehammerfest-mon-blo" +
+ "gueurovisionfrom-vaksdalfrom-vtravelersinsurancefrom-wafrom-wiel" +
+ "unnerfrom-wvanylvenicefrom-wyfrosinonefrostalbanshangrilangevagr" +
+ "arboretumbriamallamagentositelefonicaaarborteaches-yogasawaracin" +
+ "groks-theatreefroyahabaghdadultrdfstavropolitiendafujiiderafujik" +
+ "awaguchikonefujiminohtawaramotoineppugliafujinomiyadafujiokayama" +
+ "oris-a-landscaperugiafujisatoshonairportland-4-salernogatagajobo" +
+ "jis-a-lawyerfujisawafujishiroishidakabiratoridellogliastraderfuj" +
+ "itsurugashimamateramodalenfujixeroxn--30rr7yfujiyoshidafukayabea" +
+ "rdubaiduckdnsdojoburgfukuchiyamadafukudominichocolatelevisioniss" +
+ "andnessjoenissayokoshibahikariwanumataketomisatomobellevuelosang" +
+ "elesjaguarchitecturealtychyattorneyagawalbrzycharternopilawalesu" +
+ "ndyndns-weberlincolnissedaluxuryfukuis-a-liberalfukumitsubishiga" +
+ "kiryuohadselfipartnersharis-a-libertarianfukuokazakisarazurewebs" +
+ "iteshikagamiishibukawafukuroishikarikaturindalfukusakishiwadafuk" +
+ "uyamagatakahatakaishimoichinosekigaharafunabashiriuchinadafunaga" +
+ "takamatsukawafunahashikamiamakusatsumasendaisennangonohejis-a-li" +
+ "nux-useranishiaritabashikaoizumizakitaurayasudafundaciofuoiskuju" +
+ "kuriyamarburgfuosskoczowindowsharpartshawaiijimarumorimachidafur" +
+ "nitureggio-emilia-romagnakanojohanamakinoharafurubiraquarellebes" +
+ "byglandfurudonostiafurukawairtelecityeatshellaspeziafusodegauraf" +
+ "ussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomigawaf" +
+ "utboldlygoingnowhere-for-moregontrailroadfuttsurugiminamimakis-a" +
+ "-llamarylhurstcgroupartyfvgfyis-a-musicianfylkesbiblackfridayfyr" +
+ "esdalhannovareserveblogspotrentino-a-adigehanyuzenhapmirhareidsb" +
+ "ergenharstadharvestcelebrationhasamarahasaminami-alpssells-itren" +
+ "tino-aadigehashbanghasudahasura-appasadenaklodzkodairahasviklabu" +
+ "dhabikinokawabarthagakhanamigawahatogayahoohatoyamazakitahatakan" +
+ "ezawahatsukaichikaiseis-a-painteractivegarsheis-a-patsfanhattfje" +
+ "lldalhayashimamotobuildinghazuminobusellsyourhomeipassagenshimon" +
+ "itayanagitlaborhboehringerikehelsinkitahiroshimarriottrentino-al" +
+ "to-adigehembygdsforbundhemneshimonosekikawahemsedalhepforgeherok" +
+ "ussldheroyhgtvarggatrentino-altoadigehigashichichibungotakadatin" +
+ "ghigashihiroshimanehigashiizumozakitakamiizumisanofidelitysvardo" +
+ "llshimosuwalkis-a-personaltrainerhigashikagawahigashikagurasoeda" +
+ "higashikawakitaaikitakatakaokamikoaniikappulawyhigashikurumeiwam" +
+ "arshallstatebankmpspbamblebtimnetz-2higashimatsushimarinehigashi" +
+ "matsuyamakitaakitadaitoigawahigashimurayamalatvuopmidoris-a-phot" +
+ "ographerokuappassenger-associationhigashinarusembokukitakyushuai" +
+ "ahigashinehigashiomihachimanchesterhigashiosakasayamamotorcycles" +
+ "himotsukehigashishirakawamatakarazukamiminershimotsumahigashisum" +
+ "iyoshikawaminamiaikitamidsundhigashitsunotteroyhigashiurausukita" +
+ "motosumitakaginankokubunjis-a-playerhigashiyamatokoriyamanakakog" +
+ "awahigashiyodogawahigashiyoshinogaris-a-republicancerresearchaeo" +
+ "logicaliforniahiraizumisatohobby-sitehirakatashinagawahiranairtr" +
+ "affichonanbugattipschmidtre-gauldalvivano-frankivskazimierz-doln" +
+ "yhirarahiratsukagawahirayaitakasagooglecodespotrentino-s-tirolla" +
+ "grigentomologyhistorichouseshinichinanhitachiomiyaginowaniihamat" +
+ "amakawajimarcheapaviancarbonia-iglesias-carboniaiglesiascarbonia" +
+ "hitachiotagopocznosegawahitoyoshimifunehitradinghjartdalhjelmela" +
+ "ndholeckobierzyceholidayhomelinuxn--32vp30hagebostadhomesecurity" +
+ "maceratakasakitanakagusukumoduminamiogunicomcastresistancehomese" +
+ "curitypccwinnershinjournalismailillesandefjordhomesenseminehomeu" +
+ "nixn--3bst00minamisanrikubetsupplieshinjukumanohondahonefosshink" +
+ "amigotoyohashimototalhoneywellhongorgehonjyoitakashimarugame-hos" +
+ "tinghornindalhorseoulminamitanehortendofinternetrentino-stirolho" +
+ "teleshinshinotsurgeonshalloffamemergencyberlevagangaviikanonjis-" +
+ "a-rockstarachowicehotmailhoyangerhoylandetroitskmshinshirohumani" +
+ "tieshintokushimahurdalhurumajis-a-socialistmeindianapolis-a-blog" +
+ "gerhyllestadhyogoris-a-soxfanhyugawarahyundaiwafunehzchoseiroute" +
+ "rjgorajlchoyodobashichikashukujitawarajlljmpgfoggiajnjelenia-gor" +
+ "ajoyokaichibahcavuotnagaraumakeupowiathletajimabariakepnord-fron" +
+ "tierjpmorganjpnchristmasakikugawatchesaskatchewanggouvicenzajprs" +
+ "hirahamatonbetsurgeryjuniperjurkristiansundkrodsheradkrokstadelv" +
+ "aldaostarnbergkryminamiyamashirokawanabelgorodeokumatorinokumeji" +
+ "massa-carrara-massacarraramassabunkyonanaoshimageandsoundandvisi" +
+ "onkumenanyokkaichirurgiens-dentistes-en-francekunisakis-an-anarc" +
+ "historicalsocietyumenkunitachiarailwaykunitomigusukumamotoyamaso" +
+ "ykunneppupharmacyshiraois-an-artisteinkjerusalembroiderykunstsam" +
+ "mlungkunstunddesignkuokgrouphiladelphiaareadmyblogsitekureisenku" +
+ "rgankurobelaudibleborkdalvdalaskanittedallasalleasingleshiraokan" +
+ "makiwakunigamihamadakurogimilitarykuroisoftwarendalenugkuromatsu" +
+ "nais-an-engineeringkurotakikawasakis-an-entertainerkurskomitamam" +
+ "urakushirogawakustanais-bykusupersportrentino-suedtirolkutchanel" +
+ "kutnokuzbassnillfjordkuzumakis-certifiedogawarabikomaezakirunort" +
+ "hwesternmutualkvafjordkvalsundkvamfamberkeleykvanangenkvinesdalk" +
+ "vinnheradkviteseidskogkvitsoykwpspjelkavikommunalforbundkyowaria" +
+ "sahikawamitourismolanciamitoyoakemiuramiyazumiyotamanomjondalenm" +
+ "lbfanmonmouthaibarakisosakitagawamonstermonticellombardiamondshi" +
+ "ratakahagivestbytomaritimekeepingmontrealestatefarmequipmentrent" +
+ "inoa-adigemonza-brianzaporizhzheguris-into-animelbournemonza-e-d" +
+ "ella-brianzaporizhzhiamonzabrianzapposhishikuis-into-carshiojiri" +
+ "shirifujiedamonzaebrianzaptokuyamatsunomonzaedellabrianzaramopar" +
+ "achutingmordoviajessheiminanomoriyamatsusakahoginozawaonsenmoriy" +
+ "oshiokamitsuemormoneymoroyamatsushigemortgagemoscowioshisognemos" +
+ "eushistorymosjoenmoskeneshisuifuettertdasnetzmosshitaramamosviko" +
+ "monomoviemovistargardmtpchromedicaltanissettaitogliattiresassari" +
+ "s-a-democratjxn--0trq7p7nniyodogawamtranakatsugawamuenstermugith" +
+ "ubcloudusercontentrentinoaadigemuikamogawamukochikushinonsenergy" +
+ "mulhouservebeermultichoicemunakatanemuncieszynmuosattemuphilatel" +
+ "ymurmanskomorotsukamisunagawamurotorcraftrentinoalto-adigemusash" +
+ "imurayamatsuuramusashinoharamuseetrentinoaltoadigemuseumverenigi" +
+ "ngmutsuzawamutuellevangermydissentrentinos-tirolmydrobofagemydsh" +
+ "izukuishimogosenmyeffectrentinostirolmyfritzmyftphilipsymykolaiv" +
+ "aroymymediapchryslermyokohamamatsudamypepsonyoursidedyn-o-saurec" +
+ "ipesaro-urbino-pesarourbinopesaromalvikomvuxn--3ds443gmypetshizu" +
+ "okannamiharumyphotoshibahccavuotnagareyamalopolskanlandmypsxn--3" +
+ "e0b707emysecuritycamerakermyshopblockshoujis-into-cartoonshioyam" +
+ "emorialmytis-a-bookkeepermincommbankommunemyvnchungbukazopicture" +
+ "showapiemontepilotshowtimeteorapphotographysiopimientakinouepink" +
+ "ongsbergpioneerpippupiszpittsburghofauskedsmokorsetagayasells-fo" +
+ "r-unzenpiwatepizzapkongsvingerplanetariuminnesotaketakayamatsuma" +
+ "ebashimodateplantationplantshriramlidlugolekagoshimaintenancepla" +
+ "tformintelligenceplaystationplazaplchungnamdalseidfjordyndns-wik" +
+ "inderoyplombardyndns-blogdnsiskinkyknethnologyplumbingovtrentino" +
+ "sudtirolplusterpmnpodzonepohlpointtomskoninjamisonpoivronpokerpo" +
+ "krovskonskowolayangroupharmacienshirakofuelpolkowicepoltavalle-a" +
+ "ostarostwodzislawitdkonsulatrobeepilepsydneypomorzeszowithgoogle" +
+ "apisa-hockeynutrentinosued-tirolpordenonepornporsangerporsanguid" +
+ "eltajirikuzentakatakahamamurogawaporsgrunnanpoznanpraxis-a-bruin" +
+ "sfanprdpreservationpresidioprgmrprimelhusgardenprincipeprivatize" +
+ "healthinsuranceprochowiceproductionsienaplesigdalprofbsbxn--1lqs" +
+ "03nprogressivegaskimitsubatamicadaquesilkonyvelolprojectrentinos" +
+ "uedtirolpromombetsupportrentoyonakagyokutoyakokamishihoronobeoka" +
+ "minoyamatsuris-into-gamessinashikitchenpropertyprotectionprudent" +
+ "ialpruszkowithyoutubeneventodayprzeworskogptzpvtrevisohughesimbi" +
+ "rskooris-a-therapistoiapwchurchaseljeffersonrwhoswhokksundyndns-" +
+ "workisboringruepzqldqponqslgbtroandinosaurlandesimple-urlquicksy" +
+ "tesirdalqvchuvashiasrlsrtromsakatakkoelnsrvbarcelonagasukeu-2sto" +
+ "ragestordalstorenburgstorfjordstpetersburgstreamsterdamnserverba" +
+ "niastudiostudyndns-homeftpaccesslupskopervikomatsushimashikestuf" +
+ "f-4-salestufftoread-booksnesmolenskoryolasitestuttgartromsojaval" +
+ "d-aostaplesnoasaitoshimasurnadalsurreysusakis-lostre-toteneis-a-" +
+ "teacherkassymantechnologysusonosuzakanrasuzukanumazurysuzukis-no" +
+ "t-certifieducatorahimeshimakanegasakindleikangersvalbardudinkaku" +
+ "damatsuesveiosvelvikosakaerodromegalsacechirealminamiuonumasudas" +
+ "vizzeraswedenswidnicargodaddyndns-at-homednshomebuiltrusteeswieb" +
+ "odzindianmarketingswiftcoveronaritakurashikis-savedunetbankokono" +
+ "eswinoujscienceandhistoryswisshikis-slickolobrzegersundtuxfamily" +
+ "vestnesolognevestre-slidreamhostersolundbeckosaigawavestre-toten" +
+ "nishiawakuravestvagoyvevelstadvibo-valentiavibovalentiavideovill" +
+ "askoyabearalvahkihokumakogengerdalipayufuchukotkagaminogiesseneb" +
+ "akkeshibechambagriculturennebudapest-a-la-masionthewifiat-band-c" +
+ "ampaniavinnicarriervinnytsiavipsinaappiagetmyiphoenixn--3oq18vl8" +
+ "pn36avirginiavirtualvirtueeldomeindustriesteambulancevirtuelvisa" +
+ "kegawavistaprinternationalfirearmsolutionslingviterboltrvdonskos" +
+ "eis-an-accountantshintomikasaharavivoldavladikavkazanvladimirvla" +
+ "divostokaizukarasuyamazoevlogoipictetrentinosud-tirolvolkenkunde" +
+ "rseaportrysiljan-mayenvolkswagentsomavologdanskoshimizumakiyosum" +
+ "ycdn77-securechtrainingvolvolgogradvolyngdalvoronezhytomyrvossev" +
+ "angenvotevotingvotoyonezawavrnworse-thangglidingwowiwatsukiyonow" +
+ "tvenneslaskerrylogisticsokndalwritesthisblogsytewroclawloclaweko" +
+ "shunantokigawawtcircus-2wtfbx-oslodingenwuozuwwworldwzmiuwajimax" +
+ "n--4gq48lf9jeonnamerikawauexn--4it168dxn--4it797kosugexn--4pvxso" +
+ "mnarashinoxn--54b7fta0ccivilaviationxn--55qw42gxn--55qx5dxn--5js" +
+ "045dxn--5rtp49civilisationxn--5rtq34kotohiradomainsurehabmerxn--" +
+ "5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986" +
+ "b3xlxn--7t0a264civilizationxn--80adxhksooxn--80ao21axn--80aqecdr" +
+ "1axn--80asehdbarclaycardstvedestrandishakotankarumaifarmerseinew" +
+ "yorkshirecreationatuurwetenschappenaumburgliwicevents3-us-west-1" +
+ "xn--80aswgxn--80audnedalnxn--8ltr62kotouraxn--8pvr4uxn--8y0a063a" +
+ "xn--90a3academyactivedirectoryazannakadomari-elasticbeanstalkouh" +
+ "okutamakizunokunimilanoxn--90aishobaraomoriguchiharahkkeravjudyg" +
+ "arlandxn--90azhair-surveillancexn--9dbhblg6dietcimmobilienxn--9d" +
+ "bq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--as" +
+ "ky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakuho" +
+ "kkaidontexisteingeekounosunndalxn--b-5gaxn--b4w605ferdxn--bck1b9" +
+ "a5dre4civilwarmanagementkmaxxn--1ck2e1balsanagochihayaakasakawah" +
+ "aravennagasakijobserverdalimoliserniaukraanghkebinorilskariyakum" +
+ "oldev-myqnapcloudcontrolappagefrontappagespeedmobilizerobihirosa" +
+ "kikamijimatteledatabaseballooningjesdalavangenativeamericanantiq" +
+ "ues3-eu-central-1xn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jx" +
+ "axn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn-" +
+ "-bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptamboversaillesolarsso" +
+ "nxn--blt-elabourxn--bmlo-graingerxn--bod-2naroyxn--brnny-wuaccid" +
+ "ent-investigationjukudoyamagadancebetsukubabia-goracleaningatlan" +
+ "tabusebastopologyeonggiehtavuoatnadexeterimo-i-ranagahamaroygard" +
+ "endoftheinternetflixilovecollegefantasyleaguernseyxn--brnnysund-" +
+ "m8accident-preventionlineat-urlxn--brum-voagatulansnzxn--btsfjor" +
+ "d-9zaxn--c1avgxn--c2br7gxn--c3s14misasaguris-gonexn--cck2b3baref" +
+ "ootballangenoamishirasatochigiftsakuraibestadiskstationaustdalin" +
+ "desnesakyotanabellunordkappgafanpachigasakidsmynasperschlesische" +
+ "salangenaval-d-aosta-valleyonagoyaustinnaturalhistorymuseumcente" +
+ "repbodyndns-freebox-oskolegokasells-for-less3-eu-west-1xn--cg4bk" +
+ "is-uberleetrentino-sudtirolxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-ss" +
+ "lattumisawaxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc" +
+ "29axn--czr694bargainstitutelekommunikationavigationavuotnakayama" +
+ "tsuzakibigawaustraliaisondriodejaneirochestereportargets-itargiv" +
+ "ingjovikarlsoyokosukareliancebizenakamuratakaharuconnectarnobrze" +
+ "gyptianaturalsciencesnaturelles3-external-1xn--czrs0tunesokanoya" +
+ "kagexn--czru2dxn--czrw28barreauctionayoroceanographicsalondonets" +
+ "kasaokamisatokamachippubetsubetsugarufcfanflfanfshostrodawaraust" +
+ "rheimatunduhrennesoyokotehimeji234xn--d1acj3barrel-of-knowledgeo" +
+ "logyonaguniversityoriikashibatakasugaibmditchyouripalaceverbanka" +
+ "shiharauthordalandroidigitalillyokozemersongdalenviknakaniikawat" +
+ "anaguramusementarantours3-ap-northeast-2xn--d1alfaromeoxn--d1atu" +
+ "nkosherbrookegawaxn--d5qv7z876claimsauheradynv6xn--davvenjrga-y4" +
+ "axn--djrs72d6uyxn--djty4kouyamashikis-an-actorxn--dnna-grajewolt" +
+ "erskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4clickddielddanuorrissa" +
+ "gamiharaxn--eckvdtc9dxn--efvn9sopotrogstadxn--efvy88hakatanotoga" +
+ "waxn--ehqz56nxn--elqq16hakodatexn--estv75gxn--eveni-0qa01gaxn--f" +
+ "6qx53axn--fct429kouzushimashikokuchuoxn--fhbeiarnxn--finny-yuaxn" +
+ "--fiq228c5hsor-odalxn--fiq64barrell-of-knowledgeometre-experts-c" +
+ "omptablesaltdalinkashiwarautomotivecodynaliascoli-picenoipiranga" +
+ "mvikarmoyomitanobninskarpaczeladz-1xn--fiqs8sor-varangerxn--fiqz" +
+ "9sorfoldxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351" +
+ "exn--fpcrj9c3dxn--frde-grandrapidsorreisahayakawakamiichikawamis" +
+ "atottoris-leetrentino-sud-tirolxn--frna-woaraisaijosoyrovigorlic" +
+ "exn--frya-hraxn--fzc2c9e2clinichelyabinskydivingroundhandlingroz" +
+ "nyxn--fzys8d69uvgmailxn--g2xx48cliniquenoharaxn--gckr3f0fbxostro" +
+ "lekaluganskharkovalledaostavernxn--gecrj9clintonoshoesavannahgax" +
+ "n--ggaviika-8ya47hakonexn--gildeskl-g0axn--givuotna-8yandexn--3p" +
+ "xu8kostromahachijorpelandxn--gjvik-wuaxn--gk3at1exn--gls-elacaix" +
+ "axn--gmq050is-very-badaddjamalborkangerxn--gmqw5axn--h-2failxn--" +
+ "h1aeghakubankhvaolbia-tempio-olbiatempioolbialystokkemerovodkaka" +
+ "migaharagusaarlandxn--h2brj9clothingujolsterxn--hbmer-xqaxn--hce" +
+ "suolo-7ya35bashkiriautoscanadaejeonbukaruizawasnesoddenmarkhange" +
+ "lskjervoyagemologicallyngenglandds3-ap-southeast-1xn--hery-iraxn" +
+ "--hgebostad-g3axn--hmmrfeasta-s4accturystykarasjohkamiokaminokaw" +
+ "anishiaizubangexn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpm" +
+ "ir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2ex" +
+ "n--imr513nxn--indery-fyaotsurgutsiracusakakinokiaxn--io0a7is-ver" +
+ "y-evillagexn--j1aefermobilyxn--j1amhakuis-a-nascarfanxn--j6w193g" +
+ "xn--jlq61u9w7basilicataniaveroykeniwaizumiotsukumiyamazonawsabae" +
+ "robaticketsaritsynologyeongnamegawakeisenbahnaturbruksgymnaturhi" +
+ "storisches3-external-2xn--jlster-byaroslavlaanderenxn--jrpeland-" +
+ "54axn--jvr189misconfusedxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kc" +
+ "rx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx" +
+ "9axn--klty5xn--42c2d9axn--koluokta-7ya57hakusandiegoodyearthaeba" +
+ "ruminamiminowaxn--kprw13dxn--kpry57dxn--kpu716ferraraxn--kput3is" +
+ "-very-goodhandsonxn--krager-gyasakaiminatoyonoxn--kranghke-b0axn" +
+ "--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-sue" +
+ "d-tirolxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugis-very-nice" +
+ "xn--kvnangen-k0axn--l-1fairwindsortlandxn--l1accentureklamborghi" +
+ "niizaxn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ldin" +
+ "gen-q1axn--leagaviika-52basketballfinanzgoravocatanzarowebhoppda" +
+ "limanowarudastronomyasustor-elvdalpha-myqnapcloudappspotagerepai" +
+ "rbusantiquest-a-la-maisondre-landebusinessebyklefrakkestadgcanon" +
+ "oichinomiyakebinagisochildrensgardenasushiobaraeroportalabamagas" +
+ "akishimabarackmaze12xn--lesund-huaxn--lgbbat1ad8jevnakershuscult" +
+ "ureggioemiliaromagnakasatsunais-a-techietis-a-studentalxn--lgrd-" +
+ "poacoachampionshiphoptobamagazinebraskaunjargallupinbatochiokino" +
+ "shimalselvendrellinzaiinetarumizusawavoues3-fips-us-gov-west-1xn" +
+ "--lhppi-xqaxn--linds-pramericanartuscanyxn--lns-qlanxessorumisak" +
+ "is-foundationxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-li" +
+ "acntmpanasonichernigovernmentjometlifeinsurancexn--lten-granexn-" +
+ "-lury-iraxn--mely-iraxn--merker-kuaxn--mgb2ddesouthcarolinazawax" +
+ "n--mgb9awbferrarittogoldpoint2thisayamanashiibadajozoraholtalenv" +
+ "ironmentalconservationxn--mgba3a3ejtushuissier-justicexn--mgba3a" +
+ "4f16axn--mgba3a4franamizuholdingsmileksvikozagawaxn--mgba7c0bbn0" +
+ "axn--mgbaakc7dvferreroticapebretonamiasakuchinotsuchiurakawassam" +
+ "ukawataricohdatsunanjoetsuwanouchikujogaszkoladbrokescrapper-sit" +
+ "exn--mgbaam7a8haldenxn--mgbab2bdxn--mgbai9a5eva00batsfjordivtasv" +
+ "uodnaharimaniwakuratexascolipicenord-aurdalcesalvadordalibabaika" +
+ "liszczytnord-odalipetskashiwazakiyokawaraxaugustowadaegubs3-ap-s" +
+ "outheast-2xn--mgbai9azgqp6jewelryxn--mgbayh7gpaduaxn--mgbb9fbpob" +
+ "anazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d" +
+ "4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--mgbpl2fhskozakis-an-" +
+ "actresshinyoshitomiokaneyamaxunusualpersonxn--mgbqly7c0a67fbcolo" +
+ "nialwilliamsburgulenxn--mgbqly7cvafredrikstadtvsouthwestfalenxn-" +
+ "-mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausposts-and-telecommu" +
+ "nicationsnasadodgeorgeorgiaxn--mgbx4cd0abbottuvalle-daostaticirc" +
+ "legnicagliaridagawarszawashingtondclkazunoxn--mix082fetsundxn--m" +
+ "ix891fgushikamifuranoshiroomuraxn--mjndalen-64axn--mk0axinfiniti" +
+ "s-very-sweetpepperxn--mk1bu44coloradoplateaudioxn--mkru45is-with" +
+ "-thebandoomdnsaliasdaburyatiaarpfizerxn--mlatvuopmi-s4axn--mli-t" +
+ "lapyatigorskpnxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuragawaxn" +
+ "--mosjen-eyatominamiawajikisleofmandalxn--mot-tlaquilancasterxn-" +
+ "-mre-og-romsdal-qqbbcartoonartdecoffeedbackplaneappalanakhodkana" +
+ "gawaxn--msy-ula0halsaitamatsukuris-a-nurservebbshimokawaxn--mtta" +
+ "-vrjjat-k7afamilycompanycolumbusheyxn--muost-0qaxn--mxtq1mishima" +
+ "tsumotofukexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45brj9citadeliver" +
+ "yggeelvinckchristiansburguitarsatxn--11b4c3dynnsaudaxn--nit225kp" +
+ "pspiegelxn--nmesjevuemie-tcbajddarchaeologyxn--nnx388axn--nodexn" +
+ "--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-by" +
+ "aeservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattanooga" +
+ "norfolkebiblegallocus-1xn--o3cw4hammarfeastafricamagichofunatori" +
+ "entexpressaseboknowsitalluzernisshingugexn--od0algxn--od0aq3bbta" +
+ "tamotorsalzburglobalashovhachinohedmarkasukabedzin-the-bandaioir" +
+ "aseeklogesuranceoceanographiquevje-og-hornnesamegawaxn--ogbpf8fl" +
+ "ekkefjordxn--oppegrd-ixaxn--ostery-fyatsukaratsuginamikatagamiho" +
+ "boleslawiecommunitysfjordyroyrvikingunmarnardalxn--osyro-wuaxn--" +
+ "p1acfhvalerxn--p1aissmarterthanyoustkarasjokomaganexn--pbt977com" +
+ "obaraxn--pgbs0dhlxn--porsgu-sta26fidonnakamagayachtscrappingxn--" +
+ "1lqs71dxn--pssu33lxn--pssy2uxn--q9jyb4comparemarkerryhotelsaves-" +
+ "the-whalessandria-trani-barletta-andriatranibarlettaandriaxn--qc" +
+ "ka1pmcdonaldsowaxn--qqqt11missilelxn--qxamurskiptveterinairealto" +
+ "rlandxn--rady-iraxn--rdal-poaxn--rde-ularvikrasnodarxn--rdy-0nab" +
+ "ariwchoshibuyachiyodavvesiidazaifuefukihaborokunohealth-carerefo" +
+ "rmitakeharaxn--rennesy-v1axn--rhkkervju-01aflakstadaokagakibichu" +
+ "oxn--rholt-mragowoodsidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61e" +
+ "xn--risa-5narusawaxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rm" +
+ "skog-byatsushiroxn--rny31hamurakamigoriginshimokitayamaxn--rovu8" +
+ "8bbvacationsupdatelemarkasumigaurawa-mazowszexboxenapponazure-mo" +
+ "bilexn--rros-granvindafjordxn--rskog-uuaxn--rst-0narutokyotangot" +
+ "pantheonsitextileitungsenxn--rsta-francaiseharaxn--ryken-vuaxn--" +
+ "ryrvik-byawaraxn--s-1faitheguardianxn--s9brj9compute-1xn--sandne" +
+ "ssjen-ogbizhevskrasnoyarskomforbananarepublicartierhcloudfunctio" +
+ "ns3-us-gov-west-1xn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-g" +
+ "ratangenxn--skierv-utazaskvolloabathsbcomputerhistoryofscience-f" +
+ "ictionxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn-" +
+ "-slat-5narviikananporovnoxn--slt-elabbvieeexn--smla-hraxn--smna-" +
+ "gratis-a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--sne" +
+ "s-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1a" +
+ "xn--sr-varanger-ggbentleyukuhashimojiitatebayashijonawatexn--srf" +
+ "old-byawatahamaxn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stj" +
+ "rdal-s1axn--stjrdalshalsen-sqbeppubolognagatorockartuzyurihonjou" +
+ "rnalistjohnhlfanhsamnangerxn--stre-toten-zcbspreadbettingxn--t60" +
+ "b56axn--tckweatherchannelxn--tiq49xqyjewishartgalleryxn--tjme-hr" +
+ "axn--tn0agrinet-freakspydebergxn--tnsberg-q1axn--tor131oxn--tran" +
+ "y-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc" +
+ "0atversicherungxn--uc0ay4axn--uist22hangoutsystemscloudfrontdoor" +
+ "xn--uisz3gxn--unjrga-rtaobaokinawashirosatobishimaizurubtsovskja" +
+ "kdnepropetrovskiervaapsteiermarkredirectmeldalxn--unup4yxn--uuwu" +
+ "58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--vermgensberater-c" +
+ "tberndivttasvuotnakaiwamizawaxn--vermgensberatung-pwbeskidynatho" +
+ "medepotenzachpomorskienikiiyamanobeauxartsandcraftsamsclubindali" +
+ "vornoddaxn--vestvgy-ixa6oxn--vg-yiabcn-north-1xn--vgan-qoaxn--vg" +
+ "sy-qoa0jfkomakiyosatokashikiyosemitexn--vgu402comsecuritytactics" +
+ "avonamsskoganeis-a-designerimarylandxn--vhquvestfoldxn--vler-qoa" +
+ "xn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bestbuysho" +
+ "usesamsunglobodoes-itverranzanquannefrankfurtatarstanikkoebenhav" +
+ "nikolaevennodessaikinkobayashikshacknetnedalomzansimagicasadelam" +
+ "onedavvenjargaulardalorenskoglogowegroweibolzanordre-landiyusuha" +
+ "raxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1condoshichi" +
+ "nohealthcareersaxoxn--wgbl6axn--xhq521betainaboxfusejnynysagaero" +
+ "clubmedecincinnationwidealerxn--xkc2al3hye2axn--xkc2dl3a5ee0hann" +
+ "anmokuizumodernxn--y9a3aquariumisugitokorozawaxn--yer-znarvikris" +
+ "tiansandcatshiranukaniepcexn--yfro4i67oxn--ygarden-p1axn--ygbi2a" +
+ "mmxn--45q11citicatholicheltenham-radio-openair-traffic-controlle" +
+ "yxn--ystre-slidre-ujbieidsvollotenkawaxn--zbx025dxn--zf0ao64axn-" +
+ "-zf0avxn--4gbriminingxn--zfr164bielawallonieruchomoscienceandind" +
+ "ustrynikonantanangerxperiaxz"
+
+// nodes is the list of nodes. Each node is represented as a uint32, which
+// encodes the node's children, wildcard bit and node type (as an index into
+// the children array), ICANN bit and text.
+//
+// If the table was generated with the -comments flag, there is a //-comment
+// after each node's data. In it is the nodes-array indexes of the children,
+// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The
+// nodeType is printed as + for normal, ! for exception, and o for parent-only
+// nodes that have children but don't match a domain label in their own right.
+// An I denotes an ICANN domain.
+//
+// The layout within the uint32, from MSB to LSB, is:
+// [ 1 bits] unused
+// [ 9 bits] children index
+// [ 1 bits] ICANN bit
+// [15 bits] text index
+// [ 6 bits] text length
+var nodes = [...]uint32{
+ 0x274903,
+ 0x370704,
+ 0x28c306,
+ 0x36c9c3,
+ 0x36c9c6,
+ 0x3948c6,
+ 0x3a4883,
+ 0x208e44,
+ 0x252cc7,
+ 0x28bf48,
+ 0x1a00882,
+ 0x308207,
+ 0x350b49,
+ 0x2f91ca,
+ 0x2f91cb,
+ 0x232343,
+ 0x28d846,
+ 0x231645,
+ 0x1e00702,
+ 0x2105c4,
+ 0x22d243,
+ 0x275685,
+ 0x2207982,
+ 0x33d083,
+ 0x26ee604,
+ 0x24bb45,
+ 0x2a01782,
+ 0x37528e,
+ 0x2470c3,
+ 0x37bac6,
+ 0x37bacb,
+ 0x2e03642,
+ 0x28c487,
+ 0x233846,
+ 0x3200a42,
+ 0x2573c3,
+ 0x2573c4,
+ 0x353f86,
+ 0x240788,
+ 0x285686,
+ 0x39ffc4,
+ 0x3600dc2,
+ 0x32ab89,
+ 0x364d87,
+ 0x2f4806,
+ 0x3527c9,
+ 0x295108,
+ 0x3404c4,
+ 0x2ee886,
+ 0x211206,
+ 0x3a02202,
+ 0x23cf4f,
+ 0x262c8e,
+ 0x215644,
+ 0x2bc805,
+ 0x2e16c5,
+ 0x2e8b89,
+ 0x239849,
+ 0x3293c7,
+ 0x3a8706,
+ 0x230103,
+ 0x3e04602,
+ 0x33d3c3,
+ 0x21c0ca,
+ 0x21c343,
+ 0x253c45,
+ 0x284d02,
+ 0x284d09,
+ 0x4203442,
+ 0x203444,
+ 0x208986,
+ 0x27c205,
+ 0x349a04,
+ 0x4a837c4,
+ 0x203803,
+ 0x230684,
+ 0x4e00f82,
+ 0x370444,
+ 0x261b84,
+ 0x22428a,
+ 0x52009c2,
+ 0x2ae907,
+ 0x27c6c8,
+ 0x5a07dc2,
+ 0x325747,
+ 0x2b72c4,
+ 0x2b72c7,
+ 0x36fa85,
+ 0x36ba87,
+ 0x329186,
+ 0x260c44,
+ 0x33f4c5,
+ 0x2a1447,
+ 0x6a036c2,
+ 0x346e43,
+ 0x20d402,
+ 0x365a03,
+ 0x6e0dec2,
+ 0x27edc5,
+ 0x7203402,
+ 0x24c184,
+ 0x27a0c5,
+ 0x215587,
+ 0x3907ce,
+ 0x2f5e84,
+ 0x23fb44,
+ 0x203403,
+ 0x2e7ac9,
+ 0x30534b,
+ 0x30c688,
+ 0x31aec8,
+ 0x321348,
+ 0x3114c8,
+ 0x35260a,
+ 0x36b987,
+ 0x223546,
+ 0x769d742,
+ 0x373483,
+ 0x37cf03,
+ 0x38c044,
+ 0x254183,
+ 0x3a48c3,
+ 0x1712542,
+ 0x7a06442,
+ 0x245845,
+ 0x24dcc6,
+ 0x2ca2c4,
+ 0x397487,
+ 0x27d286,
+ 0x31b9c4,
+ 0x3a7d87,
+ 0x206443,
+ 0x7ebf042,
+ 0x8252f42,
+ 0x8613bc2,
+ 0x213bc6,
+ 0x8a00002,
+ 0x37b205,
+ 0x313a83,
+ 0x204184,
+ 0x2d9c84,
+ 0x2d9c85,
+ 0x207043,
+ 0x8f23743,
+ 0x9209e42,
+ 0x288c85,
+ 0x288c8b,
+ 0x258306,
+ 0x20b6cb,
+ 0x271f44,
+ 0x20c9c9,
+ 0x20e284,
+ 0x960f202,
+ 0x20f903,
+ 0x20fc83,
+ 0x160fe02,
+ 0x23d483,
+ 0x20fe0a,
+ 0x9a10842,
+ 0x210845,
+ 0x28f40a,
+ 0x2cdd44,
+ 0x211603,
+ 0x211c44,
+ 0x2139c3,
+ 0x2139c4,
+ 0x2139c7,
+ 0x214405,
+ 0x216145,
+ 0x216686,
+ 0x2169c6,
+ 0x2173c3,
+ 0x219d48,
+ 0x256d03,
+ 0x9e1a382,
+ 0x21ab08,
+ 0x21a38b,
+ 0x21e608,
+ 0x21ed86,
+ 0x21fb07,
+ 0x2246c8,
+ 0xa635842,
+ 0xaa95682,
+ 0x2f5708,
+ 0x29e287,
+ 0x235e05,
+ 0x235e08,
+ 0x354888,
+ 0x387283,
+ 0x22b144,
+ 0x38c082,
+ 0xae2ca42,
+ 0xb214382,
+ 0xba2e142,
+ 0x22e143,
+ 0xbe01742,
+ 0x208e03,
+ 0x201744,
+ 0x217543,
+ 0x340484,
+ 0x25248b,
+ 0x21a2c3,
+ 0x2d2446,
+ 0x224104,
+ 0x29cbce,
+ 0x354ec5,
+ 0x25f248,
+ 0x21d287,
+ 0x21d28a,
+ 0x2341c3,
+ 0x2341c7,
+ 0x305505,
+ 0x387e04,
+ 0x3ac206,
+ 0x3ac207,
+ 0x2c2d44,
+ 0x390b07,
+ 0x3a9dc4,
+ 0x206144,
+ 0x206146,
+ 0x268984,
+ 0x21e046,
+ 0x20e0c3,
+ 0x222dc8,
+ 0x3b03c8,
+ 0x23fb03,
+ 0x23d443,
+ 0x395bc4,
+ 0x39aa83,
+ 0xc200482,
+ 0xc6fc042,
+ 0x2004c3,
+ 0x2072c6,
+ 0x37e383,
+ 0x21e4c4,
+ 0xca15442,
+ 0x326983,
+ 0x215443,
+ 0x217d82,
+ 0xce008c2,
+ 0x2bae86,
+ 0x232547,
+ 0x2e5745,
+ 0x2642c4,
+ 0x2a1305,
+ 0x202987,
+ 0x26b645,
+ 0x2af3c9,
+ 0x2c7606,
+ 0x2cf308,
+ 0x2e5646,
+ 0xd205742,
+ 0x240348,
+ 0x36cf06,
+ 0x205745,
+ 0x376d47,
+ 0x3b02c4,
+ 0x3b02c5,
+ 0x285844,
+ 0x285848,
+ 0xd60b782,
+ 0xda11a82,
+ 0x32b786,
+ 0x316cc8,
+ 0x32da85,
+ 0x337646,
+ 0x3387c8,
+ 0x33e708,
+ 0xde63085,
+ 0x3a3d84,
+ 0x3ad007,
+ 0xe20dbc2,
+ 0xe619fc2,
+ 0xfa04a82,
+ 0x3580c5,
+ 0x29f9c5,
+ 0x373806,
+ 0x318647,
+ 0x22b447,
+ 0x10258403,
+ 0x2a4a47,
+ 0x2d3708,
+ 0x380289,
+ 0x375447,
+ 0x383987,
+ 0x392988,
+ 0x3a5b86,
+ 0x3abd46,
+ 0x22ef0c,
+ 0x22fa8a,
+ 0x22fe07,
+ 0x23150b,
+ 0x232387,
+ 0x23238e,
+ 0x232bc4,
+ 0x232ec4,
+ 0x234447,
+ 0x259cc7,
+ 0x2380c6,
+ 0x2380c7,
+ 0x238c47,
+ 0x13207802,
+ 0x23a006,
+ 0x23a00a,
+ 0x23a28b,
+ 0x23b387,
+ 0x23bd45,
+ 0x23c083,
+ 0x23c346,
+ 0x23c347,
+ 0x239a03,
+ 0x1362d9c2,
+ 0x23cbca,
+ 0x13b51c82,
+ 0x13ea5202,
+ 0x1423e102,
+ 0x14633942,
+ 0x23ee45,
+ 0x23f904,
+ 0x14e00682,
+ 0x3704c5,
+ 0x275643,
+ 0x316745,
+ 0x20d9c4,
+ 0x291ec6,
+ 0x362306,
+ 0x288e83,
+ 0x36d844,
+ 0x3407c3,
+ 0x15201842,
+ 0x207bc4,
+ 0x3ad586,
+ 0x207bc5,
+ 0x256a86,
+ 0x376e48,
+ 0x218dc4,
+ 0x22d008,
+ 0x2ddfc5,
+ 0x2c8108,
+ 0x357c46,
+ 0x2b36c7,
+ 0x25e144,
+ 0x25e146,
+ 0x310083,
+ 0x382383,
+ 0x2bfd88,
+ 0x30aac4,
+ 0x329547,
+ 0x2443c6,
+ 0x308549,
+ 0x20aa88,
+ 0x24ab08,
+ 0x3058c4,
+ 0x3aae03,
+ 0x208c82,
+ 0x156b0a82,
+ 0x15a0b502,
+ 0x200d03,
+ 0x15e0a182,
+ 0x252e04,
+ 0x36c345,
+ 0x23b203,
+ 0x22f3c4,
+ 0x302b07,
+ 0x264003,
+ 0x243d48,
+ 0x207f85,
+ 0x3055c4,
+ 0x36ab03,
+ 0x27a045,
+ 0x27a184,
+ 0x20ba06,
+ 0x211d04,
+ 0x213746,
+ 0x2154c6,
+ 0x254984,
+ 0x21ebc3,
+ 0x1628bb42,
+ 0x34bdc5,
+ 0x21fec3,
+ 0x16600442,
+ 0x2633c5,
+ 0x230743,
+ 0x230749,
+ 0x16a03f42,
+ 0x17202282,
+ 0x24c545,
+ 0x218406,
+ 0x329907,
+ 0x2c9e86,
+ 0x2b9208,
+ 0x2b920b,
+ 0x20730b,
+ 0x22e5c5,
+ 0x2cf9c5,
+ 0x2c0cc9,
+ 0x1600bc2,
+ 0x254b48,
+ 0x20b904,
+ 0x17a00202,
+ 0x2520c3,
+ 0x18259e86,
+ 0x37e208,
+ 0x18606482,
+ 0x222308,
+ 0x18a079c2,
+ 0x27208a,
+ 0x226b03,
+ 0x306bc6,
+ 0x328dc8,
+ 0x203f88,
+ 0x331dc6,
+ 0x368847,
+ 0x23d147,
+ 0x210d8a,
+ 0x2cddc4,
+ 0x33ce04,
+ 0x3505c9,
+ 0x38f545,
+ 0x262e86,
+ 0x212203,
+ 0x244d04,
+ 0x213544,
+ 0x305d07,
+ 0x225f47,
+ 0x265e84,
+ 0x210cc5,
+ 0x3738c8,
+ 0x35e287,
+ 0x3613c7,
+ 0x18e0bc82,
+ 0x2f5d44,
+ 0x292c88,
+ 0x382844,
+ 0x242244,
+ 0x242645,
+ 0x242787,
+ 0x20e8c9,
+ 0x243604,
+ 0x244109,
+ 0x2446c8,
+ 0x244a84,
+ 0x244a87,
+ 0x245303,
+ 0x245dc7,
+ 0x1644942,
+ 0x17a5202,
+ 0x246a46,
+ 0x247107,
+ 0x2475c4,
+ 0x248287,
+ 0x249207,
+ 0x249fc8,
+ 0x24a743,
+ 0x237842,
+ 0x201182,
+ 0x24ca03,
+ 0x24ca04,
+ 0x24ca0b,
+ 0x31afc8,
+ 0x2569c4,
+ 0x24d705,
+ 0x250107,
+ 0x255745,
+ 0x35b0ca,
+ 0x256903,
+ 0x19205642,
+ 0x256c04,
+ 0x259a89,
+ 0x25da03,
+ 0x25dac7,
+ 0x39edc9,
+ 0x2aef08,
+ 0x2078c3,
+ 0x278f47,
+ 0x279689,
+ 0x2809c3,
+ 0x282bc4,
+ 0x283f49,
+ 0x286fc6,
+ 0x2886c3,
+ 0x2022c2,
+ 0x23f443,
+ 0x39bb87,
+ 0x37b345,
+ 0x358b06,
+ 0x244f04,
+ 0x2e3505,
+ 0x21c083,
+ 0x217606,
+ 0x20cbc2,
+ 0x3901c4,
+ 0x221b82,
+ 0x2d9603,
+ 0x196007c2,
+ 0x23fe43,
+ 0x216e44,
+ 0x216e47,
+ 0x36c406,
+ 0x246a02,
+ 0x19a4f282,
+ 0x377044,
+ 0x19e28142,
+ 0x1a215c02,
+ 0x31b704,
+ 0x31b705,
+ 0x2c0205,
+ 0x322f46,
+ 0x1a6101c2,
+ 0x227785,
+ 0x228285,
+ 0x29f903,
+ 0x37d386,
+ 0x3a8245,
+ 0x213b42,
+ 0x338405,
+ 0x213b44,
+ 0x218d03,
+ 0x218f43,
+ 0x1aa0b142,
+ 0x2ef587,
+ 0x35e504,
+ 0x35e509,
+ 0x244c04,
+ 0x229383,
+ 0x34d189,
+ 0x34bc88,
+ 0x29f844,
+ 0x29f846,
+ 0x2a2283,
+ 0x2123c3,
+ 0x21cdc4,
+ 0x2d9d43,
+ 0x1aed51c2,
+ 0x300102,
+ 0x1b21a042,
+ 0x315648,
+ 0x325b88,
+ 0x395006,
+ 0x241ec5,
+ 0x21ec45,
+ 0x24f2c5,
+ 0x220442,
+ 0x1b6912c2,
+ 0x162c282,
+ 0x38f6c8,
+ 0x240285,
+ 0x37c904,
+ 0x2ddf05,
+ 0x377607,
+ 0x24fc84,
+ 0x237642,
+ 0x1ba03c82,
+ 0x30a384,
+ 0x218b87,
+ 0x39e907,
+ 0x36ba44,
+ 0x28f3c3,
+ 0x23fa44,
+ 0x23fa48,
+ 0x2e0006,
+ 0x3ac08a,
+ 0x20e784,
+ 0x28f748,
+ 0x24a244,
+ 0x21fc06,
+ 0x291284,
+ 0x3583c6,
+ 0x262249,
+ 0x2605c7,
+ 0x233d03,
+ 0x1be06dc2,
+ 0x26bc83,
+ 0x20f402,
+ 0x1c213f02,
+ 0x2dd186,
+ 0x360648,
+ 0x2a3447,
+ 0x3a2f89,
+ 0x235609,
+ 0x2a3d05,
+ 0x2a5b89,
+ 0x2a6bc5,
+ 0x2a7549,
+ 0x2a8345,
+ 0x2a7f44,
+ 0x2a7f47,
+ 0x296f43,
+ 0x2a8f87,
+ 0x383d46,
+ 0x2aa487,
+ 0x2a0585,
+ 0x2aa303,
+ 0x1c62f542,
+ 0x3928c4,
+ 0x1ca28182,
+ 0x258dc3,
+ 0x1ce0d4c2,
+ 0x2e4d86,
+ 0x27c645,
+ 0x2ac987,
+ 0x328943,
+ 0x254104,
+ 0x216903,
+ 0x2f5443,
+ 0x1d20b9c2,
+ 0x1da00042,
+ 0x3949c4,
+ 0x237803,
+ 0x359545,
+ 0x2a9d85,
+ 0x1de04542,
+ 0x1e600942,
+ 0x279286,
+ 0x20a544,
+ 0x30ac04,
+ 0x30ac0a,
+ 0x1ee01042,
+ 0x2f780a,
+ 0x36ee08,
+ 0x1f201104,
+ 0x213ac3,
+ 0x252583,
+ 0x321489,
+ 0x2729c9,
+ 0x302c06,
+ 0x1f602503,
+ 0x2d8145,
+ 0x2f834d,
+ 0x202506,
+ 0x20928b,
+ 0x1fa01982,
+ 0x332e08,
+ 0x1fe19e42,
+ 0x20205f02,
+ 0x2c2f45,
+ 0x20603dc2,
+ 0x266947,
+ 0x2a5687,
+ 0x214803,
+ 0x2576c8,
+ 0x20a02602,
+ 0x2828c4,
+ 0x3a84c3,
+ 0x332805,
+ 0x387083,
+ 0x27c106,
+ 0x2eaec4,
+ 0x23d403,
+ 0x26c843,
+ 0x20e0a3c2,
+ 0x22e544,
+ 0x34ec05,
+ 0x366687,
+ 0x276dc3,
+ 0x2ad183,
+ 0x2ad983,
+ 0x1626682,
+ 0x2ada43,
+ 0x2adcc3,
+ 0x21206d02,
+ 0x30f384,
+ 0x27a3c6,
+ 0x20d343,
+ 0x2ae043,
+ 0x216af102,
+ 0x2af108,
+ 0x2aff04,
+ 0x259186,
+ 0x2b0547,
+ 0x229786,
+ 0x32db84,
+ 0x2f2001c2,
+ 0x383c0b,
+ 0x2fe28e,
+ 0x21954f,
+ 0x2332c3,
+ 0x2fa3f402,
+ 0x1614082,
+ 0x2fe01b82,
+ 0x22c983,
+ 0x231f83,
+ 0x2d8fc6,
+ 0x2ed8c6,
+ 0x2e3807,
+ 0x230204,
+ 0x302953c2,
+ 0x306082c2,
+ 0x2e78c5,
+ 0x2e9ac7,
+ 0x32b046,
+ 0x30a69c02,
+ 0x269c04,
+ 0x3712c3,
+ 0x30e0a482,
+ 0x34e083,
+ 0x3a07c4,
+ 0x2b64c9,
+ 0x16bd742,
+ 0x31234082,
+ 0x2d9846,
+ 0x267a05,
+ 0x3163fc02,
+ 0x31a00102,
+ 0x33be87,
+ 0x362b09,
+ 0x350dcb,
+ 0x23cf05,
+ 0x372d09,
+ 0x2be486,
+ 0x258347,
+ 0x31e080c4,
+ 0x24b649,
+ 0x35ac47,
+ 0x2b7a47,
+ 0x20a683,
+ 0x20a686,
+ 0x2dc647,
+ 0x206f43,
+ 0x278186,
+ 0x32604582,
+ 0x32a2fdc2,
+ 0x21ea83,
+ 0x253d05,
+ 0x21dec7,
+ 0x354b86,
+ 0x37b2c5,
+ 0x31e604,
+ 0x205105,
+ 0x2e6684,
+ 0x32e0a902,
+ 0x322487,
+ 0x2d7884,
+ 0x245b84,
+ 0x35c88d,
+ 0x245b89,
+ 0x2280c8,
+ 0x24ec84,
+ 0x3296c5,
+ 0x20a907,
+ 0x30f644,
+ 0x27d347,
+ 0x31bf45,
+ 0x33332384,
+ 0x2cecc5,
+ 0x25c6c4,
+ 0x24fdc6,
+ 0x318445,
+ 0x33632c82,
+ 0x2116c4,
+ 0x2116c5,
+ 0x211ac6,
+ 0x37b405,
+ 0x250844,
+ 0x2e1b83,
+ 0x325dc6,
+ 0x201305,
+ 0x202005,
+ 0x318544,
+ 0x20e803,
+ 0x20e80c,
+ 0x33a87902,
+ 0x33e07c82,
+ 0x342120c2,
+ 0x332283,
+ 0x332284,
+ 0x346067c2,
+ 0x2f2908,
+ 0x358bc5,
+ 0x268344,
+ 0x27d686,
+ 0x34a326c2,
+ 0x34e1fa82,
+ 0x35200982,
+ 0x2b5345,
+ 0x254846,
+ 0x305c44,
+ 0x3544c6,
+ 0x2ae6c6,
+ 0x202cc3,
+ 0x3570e38a,
+ 0x237b45,
+ 0x220906,
+ 0x2f0249,
+ 0x220907,
+ 0x28fb88,
+ 0x294fc9,
+ 0x224c08,
+ 0x311206,
+ 0x237d03,
+ 0x35a08a42,
+ 0x385103,
+ 0x385109,
+ 0x263988,
+ 0x35e0a582,
+ 0x36202242,
+ 0x230c43,
+ 0x2cf185,
+ 0x24d204,
+ 0x2c1b89,
+ 0x2a9784,
+ 0x2d2fc8,
+ 0x209403,
+ 0x252904,
+ 0x264443,
+ 0x35c7c7,
+ 0x36640a02,
+ 0x25efc2,
+ 0x22b905,
+ 0x269e49,
+ 0x219bc3,
+ 0x27aa04,
+ 0x2d8104,
+ 0x20a983,
+ 0x27dd0a,
+ 0x36b6ecc2,
+ 0x36e11682,
+ 0x2befc3,
+ 0x371483,
+ 0x16528c2,
+ 0x2543c3,
+ 0x37253702,
+ 0x295744,
+ 0x37608f82,
+ 0x37b0ac84,
+ 0x345546,
+ 0x2794c4,
+ 0x259583,
+ 0x280543,
+ 0x21f4c3,
+ 0x23a606,
+ 0x2c5405,
+ 0x2bf847,
+ 0x258209,
+ 0x2c3ec5,
+ 0x2c5346,
+ 0x2c5948,
+ 0x2c5b46,
+ 0x249c04,
+ 0x298d4b,
+ 0x2c7103,
+ 0x2c7105,
+ 0x2c7248,
+ 0x20f082,
+ 0x33c182,
+ 0x37e272c2,
+ 0x3820dc02,
+ 0x261983,
+ 0x38607a42,
+ 0x26b403,
+ 0x2c7544,
+ 0x2c88c3,
+ 0x38e00ec2,
+ 0x2ca3cb,
+ 0x392ccc86,
+ 0x2bc206,
+ 0x2cd2c8,
+ 0x396ccdc2,
+ 0x39a0fcc2,
+ 0x39e18f82,
+ 0x3a22c902,
+ 0x3a7a9b42,
+ 0x3a9b4b,
+ 0x3aa01082,
+ 0x222543,
+ 0x317805,
+ 0x31d706,
+ 0x3ae021c4,
+ 0x31cbc7,
+ 0x3ad38a,
+ 0x31d9c6,
+ 0x22e804,
+ 0x261583,
+ 0x3ba05702,
+ 0x201cc2,
+ 0x24e2c3,
+ 0x3be49943,
+ 0x2f0d07,
+ 0x318347,
+ 0x3d24cb07,
+ 0x226ac7,
+ 0x21a5c3,
+ 0x21d48a,
+ 0x21a5c4,
+ 0x2442c4,
+ 0x2442ca,
+ 0x24a445,
+ 0x3d601142,
+ 0x2491c3,
+ 0x3da01ec2,
+ 0x209583,
+ 0x26bc43,
+ 0x3e201a02,
+ 0x2a49c4,
+ 0x21bdc4,
+ 0x3b3145,
+ 0x2daa05,
+ 0x27af06,
+ 0x27b286,
+ 0x3e60ba82,
+ 0x3ea01a82,
+ 0x344b05,
+ 0x2bbf12,
+ 0x2477c6,
+ 0x222c83,
+ 0x22ddc6,
+ 0x2fdf45,
+ 0x1600d42,
+ 0x46e0cd42,
+ 0x2ec943,
+ 0x2e5ac3,
+ 0x2da803,
+ 0x47202bc2,
+ 0x375583,
+ 0x47610342,
+ 0x2070c3,
+ 0x30f3c8,
+ 0x223cc3,
+ 0x223cc6,
+ 0x39f6c7,
+ 0x2db306,
+ 0x2db30b,
+ 0x22e747,
+ 0x3926c4,
+ 0x47e00e82,
+ 0x2ee785,
+ 0x21a583,
+ 0x22a743,
+ 0x3194c3,
+ 0x3194c6,
+ 0x2cfa8a,
+ 0x26f343,
+ 0x233704,
+ 0x316c06,
+ 0x205b46,
+ 0x482257c3,
+ 0x253fc7,
+ 0x37bf4d,
+ 0x38b907,
+ 0x298a85,
+ 0x243b86,
+ 0x201343,
+ 0x49b7d5c3,
+ 0x49e00d82,
+ 0x310684,
+ 0x225c8c,
+ 0x35c149,
+ 0x22c087,
+ 0x242fc5,
+ 0x255e44,
+ 0x27e388,
+ 0x283845,
+ 0x2884c5,
+ 0x28ec89,
+ 0x2f48c3,
+ 0x2f48c4,
+ 0x2a5184,
+ 0x4a200ac2,
+ 0x25f2c3,
+ 0x4a690d42,
+ 0x3707c6,
+ 0x16adac2,
+ 0x4aa96f02,
+ 0x2b5248,
+ 0x2cec07,
+ 0x296f05,
+ 0x2d480b,
+ 0x2d1386,
+ 0x2d4a06,
+ 0x2f6946,
+ 0x229e04,
+ 0x2fa7c6,
+ 0x2d3e48,
+ 0x230e83,
+ 0x24cdc3,
+ 0x24cdc4,
+ 0x2d4f04,
+ 0x2d5207,
+ 0x2d6345,
+ 0x4aed6482,
+ 0x4b209d02,
+ 0x209d05,
+ 0x29b784,
+ 0x2d844b,
+ 0x2d9b88,
+ 0x2da204,
+ 0x269c42,
+ 0x4baaed82,
+ 0x2af343,
+ 0x2da644,
+ 0x2dae45,
+ 0x275a07,
+ 0x2dda44,
+ 0x22e604,
+ 0x4be05fc2,
+ 0x35a549,
+ 0x2dec85,
+ 0x23d1c5,
+ 0x2df805,
+ 0x4c219683,
+ 0x2e0644,
+ 0x2e064b,
+ 0x2e0c44,
+ 0x2e10cb,
+ 0x2e2205,
+ 0x21968a,
+ 0x2e39c8,
+ 0x2e3bca,
+ 0x2e3e43,
+ 0x2e3e4a,
+ 0x4c625702,
+ 0x4ca3c782,
+ 0x29ca83,
+ 0x4cee55c2,
+ 0x2e55c3,
+ 0x4d371082,
+ 0x4d714202,
+ 0x2e6504,
+ 0x219e86,
+ 0x354205,
+ 0x2e7203,
+ 0x274ec6,
+ 0x223a44,
+ 0x4da058c2,
+ 0x2b6a04,
+ 0x2c094a,
+ 0x385e87,
+ 0x27c486,
+ 0x2cff47,
+ 0x225dc3,
+ 0x24a2c8,
+ 0x25a20b,
+ 0x302d05,
+ 0x2b6e05,
+ 0x2b6e06,
+ 0x20c744,
+ 0x323548,
+ 0x211103,
+ 0x211104,
+ 0x211107,
+ 0x353ec6,
+ 0x322b06,
+ 0x29ca0a,
+ 0x241804,
+ 0x24180a,
+ 0x227306,
+ 0x227307,
+ 0x24d787,
+ 0x271884,
+ 0x271889,
+ 0x3621c5,
+ 0x23544b,
+ 0x273d43,
+ 0x213903,
+ 0x21ec83,
+ 0x388004,
+ 0x4de03b82,
+ 0x24f446,
+ 0x2aa085,
+ 0x2b1ac5,
+ 0x220046,
+ 0x36e604,
+ 0x4e200c02,
+ 0x220144,
+ 0x4e60b482,
+ 0x22f4c4,
+ 0x221983,
+ 0x4eae5b02,
+ 0x306543,
+ 0x257086,
+ 0x4ee03182,
+ 0x33e288,
+ 0x220784,
+ 0x220786,
+ 0x31b806,
+ 0x2501c4,
+ 0x325d45,
+ 0x3a3c88,
+ 0x3a80c7,
+ 0x2048c7,
+ 0x2048cf,
+ 0x292b86,
+ 0x2198c3,
+ 0x2198c4,
+ 0x224884,
+ 0x228383,
+ 0x21fd44,
+ 0x3ac384,
+ 0x4f225742,
+ 0x288bc3,
+ 0x235803,
+ 0x4f6057c2,
+ 0x234183,
+ 0x252ec3,
+ 0x2161ca,
+ 0x29e487,
+ 0x235fcc,
+ 0x236286,
+ 0x2369c6,
+ 0x237487,
+ 0x238dc7,
+ 0x23c109,
+ 0x21ac44,
+ 0x23c4c4,
+ 0x4fa05202,
+ 0x4fe03e42,
+ 0x253dc4,
+ 0x2fc1c6,
+ 0x2a3e08,
+ 0x37e044,
+ 0x266986,
+ 0x2c9e45,
+ 0x265b08,
+ 0x207503,
+ 0x269185,
+ 0x26b043,
+ 0x23d2c3,
+ 0x23d2c4,
+ 0x26f8c3,
+ 0x502de902,
+ 0x50600fc2,
+ 0x273c09,
+ 0x283745,
+ 0x283944,
+ 0x285a05,
+ 0x20de04,
+ 0x3a96c7,
+ 0x339c45,
+ 0x24ccc4,
+ 0x24ccc8,
+ 0x2d2946,
+ 0x2d4004,
+ 0x2d4488,
+ 0x2d76c7,
+ 0x50a1b842,
+ 0x2e1944,
+ 0x228444,
+ 0x2b7c47,
+ 0x50e74644,
+ 0x255342,
+ 0x51214202,
+ 0x2636c3,
+ 0x2636c4,
+ 0x234043,
+ 0x234045,
+ 0x5162dbc2,
+ 0x2f8a45,
+ 0x219b82,
+ 0x381505,
+ 0x360805,
+ 0x51a0acc2,
+ 0x2153c4,
+ 0x51e063c2,
+ 0x22d2c6,
+ 0x2ab886,
+ 0x269f88,
+ 0x2b89c8,
+ 0x2e4d04,
+ 0x314e05,
+ 0x2f8849,
+ 0x329a04,
+ 0x2cfa44,
+ 0x254a83,
+ 0x52210ec5,
+ 0x378047,
+ 0x2895c4,
+ 0x39ab0d,
+ 0x2e74c2,
+ 0x2e74c3,
+ 0x2e7583,
+ 0x52601d42,
+ 0x388bc5,
+ 0x2eb107,
+ 0x226b84,
+ 0x226b87,
+ 0x2951c9,
+ 0x2c0a89,
+ 0x21d047,
+ 0x253143,
+ 0x288308,
+ 0x239c09,
+ 0x2e7f47,
+ 0x2e82c5,
+ 0x2e8a86,
+ 0x2e90c6,
+ 0x2e9245,
+ 0x245c85,
+ 0x52a00c42,
+ 0x222885,
+ 0x2ba70a,
+ 0x2a6708,
+ 0x21f986,
+ 0x2e2447,
+ 0x265dc4,
+ 0x2b0387,
+ 0x2ecd86,
+ 0x52e00242,
+ 0x2117c6,
+ 0x2f048a,
+ 0x2f1645,
+ 0x532d1e82,
+ 0x53649742,
+ 0x2dc986,
+ 0x2ec288,
+ 0x39eac7,
+ 0x53a00602,
+ 0x20fa43,
+ 0x200a06,
+ 0x308e44,
+ 0x39f586,
+ 0x322c46,
+ 0x37cf8a,
+ 0x3a8b05,
+ 0x20cdc6,
+ 0x20d3c3,
+ 0x20d3c4,
+ 0x207282,
+ 0x2ff3c3,
+ 0x53e44382,
+ 0x2dc483,
+ 0x2f7a84,
+ 0x2ec3c4,
+ 0x2ec3ca,
+ 0x242103,
+ 0x285748,
+ 0x2746ca,
+ 0x233147,
+ 0x2f2f46,
+ 0x22d184,
+ 0x22e6c2,
+ 0x207a82,
+ 0x54205002,
+ 0x23fa03,
+ 0x24d547,
+ 0x275187,
+ 0x38f60b,
+ 0x370684,
+ 0x347107,
+ 0x275b06,
+ 0x213cc7,
+ 0x29e3c4,
+ 0x2c7d45,
+ 0x29fe45,
+ 0x54614882,
+ 0x226646,
+ 0x2c82c3,
+ 0x22e942,
+ 0x30e5c6,
+ 0x54a0d182,
+ 0x54e01582,
+ 0x201585,
+ 0x5521f6c2,
+ 0x556020c2,
+ 0x2e4685,
+ 0x38dd45,
+ 0x20ce85,
+ 0x267743,
+ 0x2350c5,
+ 0x2d1447,
+ 0x2a7405,
+ 0x32a4c5,
+ 0x25f344,
+ 0x23f046,
+ 0x246184,
+ 0x55a06882,
+ 0x278dc5,
+ 0x2a2a47,
+ 0x2fc3c8,
+ 0x26cc46,
+ 0x26cc4d,
+ 0x272789,
+ 0x272792,
+ 0x2efd05,
+ 0x2f8d83,
+ 0x56601382,
+ 0x2e4444,
+ 0x202583,
+ 0x324d05,
+ 0x35f8c5,
+ 0x56a1ce42,
+ 0x36ab43,
+ 0x56e3e2c2,
+ 0x57295802,
+ 0x5760d502,
+ 0x33d205,
+ 0x365dc3,
+ 0x323c08,
+ 0x57a030c2,
+ 0x57e035c2,
+ 0x2a4986,
+ 0x32820a,
+ 0x20c903,
+ 0x234703,
+ 0x2e9843,
+ 0x58a03d02,
+ 0x66e02c02,
+ 0x6760a242,
+ 0x201502,
+ 0x38c0c9,
+ 0x2bcb84,
+ 0x2579c8,
+ 0x67ae7242,
+ 0x67e03f02,
+ 0x2e1305,
+ 0x231948,
+ 0x245108,
+ 0x39e0cc,
+ 0x2352c3,
+ 0x240242,
+ 0x682049c2,
+ 0x2c4346,
+ 0x2f3dc5,
+ 0x321ac3,
+ 0x380786,
+ 0x2f3f06,
+ 0x24fe43,
+ 0x2f6703,
+ 0x2f7146,
+ 0x2f7f04,
+ 0x272186,
+ 0x2c72c5,
+ 0x2f818a,
+ 0x29e8c4,
+ 0x2f9844,
+ 0x348a4a,
+ 0x6866ff82,
+ 0x33de45,
+ 0x2fb58a,
+ 0x2fc5c5,
+ 0x2fd144,
+ 0x2fd246,
+ 0x2fd3c4,
+ 0x340186,
+ 0x68a00282,
+ 0x27bdc6,
+ 0x27ce85,
+ 0x203707,
+ 0x22eb06,
+ 0x237684,
+ 0x2afb87,
+ 0x30e2c6,
+ 0x211805,
+ 0x2af7c7,
+ 0x39b2c7,
+ 0x39b2ce,
+ 0x224046,
+ 0x27d205,
+ 0x27ef07,
+ 0x227603,
+ 0x227607,
+ 0x3aa985,
+ 0x20fd04,
+ 0x2213c2,
+ 0x2e5b47,
+ 0x230284,
+ 0x2d8f44,
+ 0x25ee4b,
+ 0x21b503,
+ 0x2835c7,
+ 0x21b504,
+ 0x2a5247,
+ 0x22b603,
+ 0x32cf0d,
+ 0x389408,
+ 0x24cbc4,
+ 0x24cbc5,
+ 0x2ffbc5,
+ 0x2fdc03,
+ 0x68e1a642,
+ 0x2ff383,
+ 0x2ff603,
+ 0x38cb44,
+ 0x279785,
+ 0x218fc7,
+ 0x20d446,
+ 0x36edc3,
+ 0x37784b,
+ 0x30e70b,
+ 0x26e78b,
+ 0x27988a,
+ 0x2a608b,
+ 0x2d0acb,
+ 0x2d1ecc,
+ 0x2f6d11,
+ 0x33ae8a,
+ 0x34b2cb,
+ 0x376acb,
+ 0x3b008a,
+ 0x3b218a,
+ 0x2fffcd,
+ 0x30128e,
+ 0x30190b,
+ 0x301bca,
+ 0x302f11,
+ 0x30334a,
+ 0x30384b,
+ 0x303d8e,
+ 0x3046cc,
+ 0x304a4b,
+ 0x304d0e,
+ 0x30508c,
+ 0x30700a,
+ 0x307d0c,
+ 0x6930800a,
+ 0x3095c9,
+ 0x30ae8a,
+ 0x30b10a,
+ 0x30b38b,
+ 0x30da0e,
+ 0x30dd91,
+ 0x31a149,
+ 0x31a38a,
+ 0x31ac4b,
+ 0x31edca,
+ 0x31f916,
+ 0x3210cb,
+ 0x324aca,
+ 0x32510a,
+ 0x32788b,
+ 0x32aa09,
+ 0x32d889,
+ 0x32e20d,
+ 0x32ea8b,
+ 0x32f7cb,
+ 0x33018b,
+ 0x330949,
+ 0x330f8e,
+ 0x3314ca,
+ 0x3338ca,
+ 0x333e0a,
+ 0x33454b,
+ 0x334d8b,
+ 0x33504d,
+ 0x33734d,
+ 0x338090,
+ 0x33854b,
+ 0x338b4c,
+ 0x3397cb,
+ 0x33b98b,
+ 0x33e48b,
+ 0x34318b,
+ 0x343c0f,
+ 0x343fcb,
+ 0x344c4a,
+ 0x345289,
+ 0x3456c9,
+ 0x345d4b,
+ 0x34600e,
+ 0x3490cb,
+ 0x349e8f,
+ 0x34c38b,
+ 0x34c64b,
+ 0x34c90b,
+ 0x34cd4a,
+ 0x3509c9,
+ 0x35688f,
+ 0x35d8cc,
+ 0x35dfcc,
+ 0x35f58e,
+ 0x35fd8f,
+ 0x36014e,
+ 0x360c50,
+ 0x36104f,
+ 0x36304e,
+ 0x36350c,
+ 0x363812,
+ 0x366291,
+ 0x36684e,
+ 0x366c8e,
+ 0x3671ce,
+ 0x36754f,
+ 0x36790e,
+ 0x367c93,
+ 0x368151,
+ 0x36858e,
+ 0x368a0c,
+ 0x369a93,
+ 0x36a510,
+ 0x36af4c,
+ 0x36b24c,
+ 0x36b70b,
+ 0x36c6ce,
+ 0x36da8b,
+ 0x36decb,
+ 0x36f48c,
+ 0x375c0a,
+ 0x3762cc,
+ 0x3765cc,
+ 0x3768c9,
+ 0x378acb,
+ 0x378d88,
+ 0x378f89,
+ 0x378f8f,
+ 0x37a8cb,
+ 0x37b5ca,
+ 0x37ed0c,
+ 0x380f09,
+ 0x3812c8,
+ 0x381ccb,
+ 0x38214b,
+ 0x38348a,
+ 0x38370b,
+ 0x384e8c,
+ 0x385888,
+ 0x38960b,
+ 0x38bdcb,
+ 0x38f8cb,
+ 0x391acb,
+ 0x39ae4b,
+ 0x39b109,
+ 0x39b64d,
+ 0x3a0b8a,
+ 0x3a1ad7,
+ 0x3a2758,
+ 0x3a6909,
+ 0x3a7b0b,
+ 0x3ab094,
+ 0x3ab58b,
+ 0x3abb0a,
+ 0x3ac48a,
+ 0x3ac70b,
+ 0x3ad710,
+ 0x3adb11,
+ 0x3ae3ca,
+ 0x3af68d,
+ 0x3afd8d,
+ 0x3b254b,
+ 0x3b3506,
+ 0x226743,
+ 0x6963d343,
+ 0x382b86,
+ 0x28c985,
+ 0x369607,
+ 0x33ad46,
+ 0x16235c2,
+ 0x2ad2c9,
+ 0x274cc4,
+ 0x2cf548,
+ 0x23f943,
+ 0x2e4387,
+ 0x239942,
+ 0x2ac9c3,
+ 0x69a006c2,
+ 0x2c2806,
+ 0x2c3dc4,
+ 0x310d04,
+ 0x2383c3,
+ 0x2383c5,
+ 0x6a2c6b02,
+ 0x2da544,
+ 0x2717c7,
+ 0x165ee02,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x202883,
+ 0x200882,
+ 0x894c8,
+ 0x204a82,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+ 0x315ed6,
+ 0x319093,
+ 0x346f89,
+ 0x3acf08,
+ 0x2ee609,
+ 0x2fb706,
+ 0x30a3d0,
+ 0x3b0ad3,
+ 0x207748,
+ 0x259687,
+ 0x277847,
+ 0x29deca,
+ 0x2f7b09,
+ 0x32a189,
+ 0x2975cb,
+ 0x329186,
+ 0x3115ca,
+ 0x21ed86,
+ 0x2748c3,
+ 0x2ef4c5,
+ 0x222dc8,
+ 0x22d38d,
+ 0x35818c,
+ 0x27cb47,
+ 0x3015cd,
+ 0x3a3d84,
+ 0x22ec8a,
+ 0x22f5ca,
+ 0x22fa8a,
+ 0x2631c7,
+ 0x237f07,
+ 0x23a9c4,
+ 0x25e146,
+ 0x354e44,
+ 0x2ed248,
+ 0x2a97c9,
+ 0x2b9206,
+ 0x2b9208,
+ 0x23d78d,
+ 0x2c0cc9,
+ 0x203f88,
+ 0x23d147,
+ 0x2017ca,
+ 0x247106,
+ 0x258c87,
+ 0x2db9c4,
+ 0x242dc7,
+ 0x35c4ca,
+ 0x337bce,
+ 0x24f2c5,
+ 0x2fd94b,
+ 0x2efb09,
+ 0x2729c9,
+ 0x2a54c7,
+ 0x399f4a,
+ 0x2b7b87,
+ 0x2fe3c9,
+ 0x358648,
+ 0x2d7c8b,
+ 0x2cf185,
+ 0x227f8a,
+ 0x218d49,
+ 0x321a4a,
+ 0x2c3f4b,
+ 0x242ccb,
+ 0x297355,
+ 0x2d4345,
+ 0x23d1c5,
+ 0x2e064a,
+ 0x3061ca,
+ 0x31e007,
+ 0x21b9c3,
+ 0x29cd48,
+ 0x2cb7ca,
+ 0x220786,
+ 0x239a49,
+ 0x265b08,
+ 0x2d4004,
+ 0x3379c9,
+ 0x2b89c8,
+ 0x357b87,
+ 0x278dc6,
+ 0x2a2a47,
+ 0x293687,
+ 0x23a405,
+ 0x24f10c,
+ 0x24cbc5,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x258403,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x258403,
+ 0x249943,
+ 0x223cc3,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x204a82,
+ 0x201802,
+ 0x22fcc2,
+ 0x202602,
+ 0x203c42,
+ 0x2954c2,
+ 0x4658403,
+ 0x230743,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x231a03,
+ 0x894c8,
+ 0x24c844,
+ 0x2526c7,
+ 0x255683,
+ 0x2c2f44,
+ 0x232283,
+ 0x283f83,
+ 0x2d9d43,
+ 0x200882,
+ 0x123743,
+ 0x5604a82,
+ 0x22fcc2,
+ 0x1104,
+ 0x2016c2,
+ 0xdfdc4,
+ 0x894c8,
+ 0x206043,
+ 0x2c69c3,
+ 0x5e58403,
+ 0x22ec84,
+ 0x6230743,
+ 0x66d9d43,
+ 0x20b9c2,
+ 0x201104,
+ 0x249943,
+ 0x211783,
+ 0x202542,
+ 0x2257c3,
+ 0x21a842,
+ 0x2e6443,
+ 0x203182,
+ 0x200f43,
+ 0x265bc3,
+ 0x203702,
+ 0x894c8,
+ 0x206043,
+ 0x211783,
+ 0x202542,
+ 0x2e6443,
+ 0x203182,
+ 0x200f43,
+ 0x265bc3,
+ 0x203702,
+ 0x2e6443,
+ 0x203182,
+ 0x200f43,
+ 0x265bc3,
+ 0x203702,
+ 0x258403,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x20bb42,
+ 0x219683,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x249943,
+ 0x2257c3,
+ 0x2e82c5,
+ 0x21ce42,
+ 0x200882,
+ 0x894c8,
+ 0x2d9d43,
+ 0x2542c1,
+ 0x20b041,
+ 0x254281,
+ 0x20adc1,
+ 0x24c901,
+ 0x271541,
+ 0x24c8c1,
+ 0x279a81,
+ 0x2f6f01,
+ 0x300281,
+ 0x200141,
+ 0x200001,
+ 0x894c8,
+ 0x200481,
+ 0x200741,
+ 0x200081,
+ 0x201181,
+ 0x2007c1,
+ 0x200901,
+ 0x200041,
+ 0x202b41,
+ 0x2001c1,
+ 0x2000c1,
+ 0x200341,
+ 0x200cc1,
+ 0x200e81,
+ 0x200ac1,
+ 0x219e81,
+ 0x200c01,
+ 0x200241,
+ 0x200a01,
+ 0x2002c1,
+ 0x200281,
+ 0x203701,
+ 0x203fc1,
+ 0x200781,
+ 0x200641,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2016c2,
+ 0x2257c3,
+ 0x63007,
+ 0x1f186,
+ 0x1d84a,
+ 0x87548,
+ 0x4d088,
+ 0x4d447,
+ 0x543c6,
+ 0xceb05,
+ 0x555c5,
+ 0x7e246,
+ 0x152dc6,
+ 0x224284,
+ 0x325607,
+ 0x894c8,
+ 0x2afc84,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x21ce42,
+ 0x2b6a83,
+ 0x21a1c3,
+ 0x262043,
+ 0x202202,
+ 0x245403,
+ 0x203803,
+ 0x202403,
+ 0x200001,
+ 0x207043,
+ 0x271f44,
+ 0x328983,
+ 0x30abc3,
+ 0x219fc3,
+ 0x35c043,
+ 0xa258403,
+ 0x232ec4,
+ 0x219f83,
+ 0x205283,
+ 0x230743,
+ 0x230483,
+ 0x218903,
+ 0x29fa83,
+ 0x30ab43,
+ 0x222303,
+ 0x213543,
+ 0x247a84,
+ 0x237842,
+ 0x24c943,
+ 0x256383,
+ 0x275843,
+ 0x254203,
+ 0x252f83,
+ 0x2d9d43,
+ 0x2e4f03,
+ 0x21bbc3,
+ 0x201103,
+ 0x2148c3,
+ 0x35fbc3,
+ 0x318703,
+ 0x3857c3,
+ 0x200983,
+ 0x230c43,
+ 0x219bc3,
+ 0x20f082,
+ 0x288883,
+ 0x249943,
+ 0x1602883,
+ 0x212b43,
+ 0x231583,
+ 0x22e043,
+ 0x2257c3,
+ 0x31c643,
+ 0x219683,
+ 0x236243,
+ 0x2f6783,
+ 0x2e6603,
+ 0x3b0845,
+ 0x244443,
+ 0x2e6643,
+ 0x2e7a03,
+ 0x20d3c4,
+ 0x259f83,
+ 0x35c0c3,
+ 0x275783,
+ 0x231a03,
+ 0x21ce42,
+ 0x2352c3,
+ 0x2fa644,
+ 0x2d8f44,
+ 0x23fc43,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x2257c3,
+ 0xb658403,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x205842,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x6c2,
+ 0x2034c2,
+ 0x2240c2,
+ 0x894c8,
+ 0x4a82,
+ 0x232502,
+ 0x209082,
+ 0x239642,
+ 0x201142,
+ 0x20ba82,
+ 0x555c5,
+ 0x20d082,
+ 0x202542,
+ 0x202bc2,
+ 0x2019c2,
+ 0x200ac2,
+ 0x384f82,
+ 0x214202,
+ 0x22c942,
+ 0x11880d,
+ 0xe95c9,
+ 0x44f0b,
+ 0xd1308,
+ 0x182cc9,
+ 0x2d9d43,
+ 0x894c8,
+ 0x894c8,
+ 0x4dfc6,
+ 0x200882,
+ 0x224284,
+ 0x204a82,
+ 0x258403,
+ 0x201802,
+ 0x230743,
+ 0x2095c2,
+ 0x2afc84,
+ 0x202503,
+ 0x20a582,
+ 0x249943,
+ 0x2016c2,
+ 0x2257c3,
+ 0x23d1c6,
+ 0x30b94f,
+ 0x6ffb43,
+ 0x894c8,
+ 0x204a82,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x147448b,
+ 0x204a82,
+ 0x258403,
+ 0x2d9d43,
+ 0x249943,
+ 0x200882,
+ 0x207d42,
+ 0x209e42,
+ 0xea58403,
+ 0x239482,
+ 0x230743,
+ 0x244942,
+ 0x221b82,
+ 0x2d9d43,
+ 0x220442,
+ 0x301b82,
+ 0x242f42,
+ 0x204042,
+ 0x28b382,
+ 0x201b02,
+ 0x200902,
+ 0x206dc2,
+ 0x26b682,
+ 0x213f02,
+ 0x2ad182,
+ 0x236bc2,
+ 0x2c8302,
+ 0x255582,
+ 0x219bc3,
+ 0x208f82,
+ 0x249943,
+ 0x23d982,
+ 0x26e742,
+ 0x2257c3,
+ 0x245482,
+ 0x2057c2,
+ 0x205202,
+ 0x200fc2,
+ 0x20acc2,
+ 0x2d1e82,
+ 0x214882,
+ 0x23e2c2,
+ 0x2267c2,
+ 0x301bca,
+ 0x344c4a,
+ 0x37ca0a,
+ 0x3b3682,
+ 0x20d042,
+ 0x23d3c2,
+ 0xef46cc9,
+ 0xf3a490a,
+ 0xf58fb47,
+ 0xad82,
+ 0x1a490a,
+ 0x2054c4,
+ 0xfe58403,
+ 0x230743,
+ 0x2446c4,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x244443,
+ 0x224043,
+ 0x894c8,
+ 0x1454344,
+ 0x53bc5,
+ 0x51eca,
+ 0x107c82,
+ 0x17bac6,
+ 0x153811,
+ 0x10746cc9,
+ 0x153c47,
+ 0x3442,
+ 0x1ac98a,
+ 0xd9547,
+ 0x894c8,
+ 0xfea08,
+ 0xdac7,
+ 0x1181918b,
+ 0x1a382,
+ 0xee987,
+ 0x574a,
+ 0x11030f,
+ 0x6308f,
+ 0x19fc2,
+ 0x4a82,
+ 0x9f9c8,
+ 0xe8d0a,
+ 0x63608,
+ 0x1842,
+ 0x11008f,
+ 0x128a0b,
+ 0x1702c8,
+ 0x7a287,
+ 0xd964a,
+ 0x574cb,
+ 0x10d109,
+ 0x1701c7,
+ 0xf25cc,
+ 0x11cac7,
+ 0xcfd0a,
+ 0x132948,
+ 0xef6ce,
+ 0x4ee8e,
+ 0xd938b,
+ 0x11e14b,
+ 0xe930b,
+ 0x1f189,
+ 0x2158b,
+ 0x23b0d,
+ 0x29c4b,
+ 0x2c38d,
+ 0x57b8d,
+ 0x7d04a,
+ 0xd8d8b,
+ 0xe5e4b,
+ 0x177ac5,
+ 0x108b50,
+ 0x15428f,
+ 0xf584f,
+ 0xec4d,
+ 0x71d50,
+ 0x79c2,
+ 0x11fa9188,
+ 0x62e88,
+ 0x122e0d05,
+ 0x4360b,
+ 0x4a748,
+ 0x11e30a,
+ 0x56c09,
+ 0x5e5c7,
+ 0x5e907,
+ 0x5eac7,
+ 0x5f107,
+ 0x5fb07,
+ 0x60407,
+ 0x60e87,
+ 0x65807,
+ 0x66007,
+ 0x661c7,
+ 0x66c47,
+ 0x66e07,
+ 0x66fc7,
+ 0x67187,
+ 0x67487,
+ 0x678c7,
+ 0x68e87,
+ 0x69547,
+ 0x69d07,
+ 0x6a707,
+ 0x6a8c7,
+ 0x6aec7,
+ 0x6b2c7,
+ 0x6b4c7,
+ 0x6b787,
+ 0x6b947,
+ 0x6bb07,
+ 0x6c6c7,
+ 0x6cf87,
+ 0x6da47,
+ 0x6e147,
+ 0x6e407,
+ 0x6ea47,
+ 0x6ec07,
+ 0x6ef87,
+ 0x6fdc7,
+ 0x70047,
+ 0x70447,
+ 0x70fc7,
+ 0x71187,
+ 0x715c7,
+ 0x72307,
+ 0x72607,
+ 0x72c07,
+ 0x72dc7,
+ 0x73147,
+ 0x73587,
+ 0xcbc2,
+ 0x3f34a,
+ 0xf6a07,
+ 0x124c8f0b,
+ 0x14c8f16,
+ 0x15c11,
+ 0xdce8a,
+ 0x9f84a,
+ 0x4dfc6,
+ 0x18df4b,
+ 0x1a042,
+ 0x187c51,
+ 0x97149,
+ 0x90ec9,
+ 0x6dc2,
+ 0x9d6ca,
+ 0xa3609,
+ 0xa3d0f,
+ 0xa4e8e,
+ 0xa5ec8,
+ 0xd4c2,
+ 0x742c9,
+ 0x8628e,
+ 0xabdcc,
+ 0xd328f,
+ 0x19510e,
+ 0xdf8c,
+ 0x13349,
+ 0x14f51,
+ 0x24dc8,
+ 0x2d892,
+ 0xc7fcd,
+ 0x1a638d,
+ 0x34ecb,
+ 0x3e755,
+ 0x3f209,
+ 0x41d8a,
+ 0x4fb49,
+ 0x56510,
+ 0x6c40b,
+ 0x7708f,
+ 0x7804b,
+ 0x7d90c,
+ 0x7e650,
+ 0x87f0a,
+ 0x8874d,
+ 0x1459ce,
+ 0x17480a,
+ 0x8d54c,
+ 0x93354,
+ 0x96dd1,
+ 0x9b64b,
+ 0x9c8cf,
+ 0xa9f4d,
+ 0xab74e,
+ 0x157a4c,
+ 0xebecc,
+ 0x15774b,
+ 0xe518e,
+ 0xf9050,
+ 0x12c8cb,
+ 0x168e8d,
+ 0xb3d4f,
+ 0xb55cc,
+ 0xb908e,
+ 0xb9891,
+ 0xbb70c,
+ 0x11aa87,
+ 0xc18cd,
+ 0xc2b4c,
+ 0xd2a90,
+ 0xe330d,
+ 0x1361c7,
+ 0xeced0,
+ 0xf1848,
+ 0x11f00b,
+ 0x16fe4f,
+ 0x295c8,
+ 0xdd08d,
+ 0x181490,
+ 0xaf303,
+ 0xa482,
+ 0x57f89,
+ 0x4ec8a,
+ 0xfa686,
+ 0x128d4609,
+ 0x15683,
+ 0x108351,
+ 0x153489,
+ 0xcdc07,
+ 0x11018b,
+ 0xd21d0,
+ 0xd268c,
+ 0xd3a85,
+ 0x1195c8,
+ 0x19c30a,
+ 0x126b87,
+ 0x1a82,
+ 0x54cca,
+ 0xf5b89,
+ 0x32f4a,
+ 0x19ea0f,
+ 0x3bdcb,
+ 0x11068c,
+ 0x110952,
+ 0xadac5,
+ 0x15e60a,
+ 0x12edf6c5,
+ 0x114203,
+ 0x184f82,
+ 0xe6e4a,
+ 0x156288,
+ 0x190c87,
+ 0x3b82,
+ 0xb482,
+ 0x3182,
+ 0x183e90,
+ 0x3e42,
+ 0x1a5c4f,
+ 0x7e246,
+ 0x11e78e,
+ 0xd5e0b,
+ 0x174a08,
+ 0xca189,
+ 0x17a092,
+ 0x3e4d,
+ 0x42b08,
+ 0x44dc9,
+ 0x4594d,
+ 0x47289,
+ 0x48a8b,
+ 0x49388,
+ 0x51d08,
+ 0x55c88,
+ 0x55f09,
+ 0x5610a,
+ 0x5dc4c,
+ 0xe6bca,
+ 0xf67c7,
+ 0x10ecd,
+ 0xea20b,
+ 0x74acc,
+ 0x5f350,
+ 0x35c2,
+ 0x16974d,
+ 0x3d02,
+ 0x2c02,
+ 0xf670a,
+ 0xdcd8a,
+ 0xe428b,
+ 0xe600c,
+ 0xfe78e,
+ 0x18cc0d,
+ 0xea948,
+ 0x6c2,
+ 0x10b2a68e,
+ 0x10d8fb47,
+ 0x1118fb49,
+ 0x10083,
+ 0x1171214c,
+ 0xad82,
+ 0x537d1,
+ 0x12a5d1,
+ 0x140851,
+ 0x165e51,
+ 0x11208f,
+ 0x11eacc,
+ 0x12478d,
+ 0x14824d,
+ 0x159a55,
+ 0xad8c,
+ 0x191050,
+ 0x106d0c,
+ 0x10c84c,
+ 0x4a509,
+ 0xad82,
+ 0x5388e,
+ 0x12a68e,
+ 0x14090e,
+ 0x165f0e,
+ 0x11214c,
+ 0x11eb89,
+ 0xae49,
+ 0x159c4d,
+ 0x106dc9,
+ 0x10c909,
+ 0x133803,
+ 0x95843,
+ 0xad82,
+ 0x153805,
+ 0x1ac984,
+ 0x28c84,
+ 0xe7e44,
+ 0x17b4c4,
+ 0xff504,
+ 0x153c44,
+ 0x141d2c3,
+ 0x1410d83,
+ 0xfe444,
+ 0x79c2,
+ 0x18cc03,
+ 0x200882,
+ 0x204a82,
+ 0x201802,
+ 0x20bc82,
+ 0x2095c2,
+ 0x2016c2,
+ 0x203182,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201103,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x249943,
+ 0x2257c3,
+ 0x4fc3,
+ 0x2d9d43,
+ 0x200882,
+ 0x323743,
+ 0x14a58403,
+ 0x37e0c7,
+ 0x2d9d43,
+ 0x332283,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x24388a,
+ 0x23d1c5,
+ 0x219683,
+ 0x201582,
+ 0x894c8,
+ 0x894c8,
+ 0x4a82,
+ 0x10e102,
+ 0xeeac5,
+ 0x894c8,
+ 0x58403,
+ 0xefa47,
+ 0xc678f,
+ 0xfa704,
+ 0x10d28a,
+ 0xaba07,
+ 0x9560a,
+ 0x18e3ca,
+ 0xfa686,
+ 0x790d,
+ 0x123743,
+ 0x894c8,
+ 0x4a82,
+ 0x446c4,
+ 0x68ac3,
+ 0xe82c5,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x203803,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x291083,
+ 0x224043,
+ 0x203803,
+ 0x224284,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x22f903,
+ 0x258403,
+ 0x230743,
+ 0x20e8c3,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x201104,
+ 0x265743,
+ 0x230c43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x200a43,
+ 0x16e58403,
+ 0x230743,
+ 0x241583,
+ 0x2d9d43,
+ 0x27da43,
+ 0x230c43,
+ 0x2257c3,
+ 0x207443,
+ 0x3284c4,
+ 0x894c8,
+ 0x17658403,
+ 0x230743,
+ 0x2a5f83,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x21ba43,
+ 0x894c8,
+ 0x17e58403,
+ 0x230743,
+ 0x2095c3,
+ 0x202883,
+ 0x2257c3,
+ 0x894c8,
+ 0x158fb47,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0xfbfc4,
+ 0x329345,
+ 0x894c8,
+ 0x742,
+ 0x31f43,
+ 0x355b88,
+ 0x241047,
+ 0x224284,
+ 0x352ac6,
+ 0x359906,
+ 0x894c8,
+ 0x240303,
+ 0x2f5249,
+ 0x2b33d5,
+ 0xb33df,
+ 0x258403,
+ 0x331dd2,
+ 0xff686,
+ 0x138e05,
+ 0x11e30a,
+ 0x56c09,
+ 0x331b8f,
+ 0x2afc84,
+ 0x240a45,
+ 0x35f990,
+ 0x3ad107,
+ 0x202883,
+ 0x251b48,
+ 0x2db58a,
+ 0x23b9c4,
+ 0x2df103,
+ 0x23d1c6,
+ 0x201582,
+ 0x385c4b,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x2e4b43,
+ 0x204a82,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x332283,
+ 0x208f83,
+ 0x2257c3,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x249943,
+ 0x2257c3,
+ 0x200882,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x224284,
+ 0x258403,
+ 0x230743,
+ 0x30ac84,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x21bbc3,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x254943,
+ 0x672c3,
+ 0x132283,
+ 0x249943,
+ 0x2257c3,
+ 0x301bca,
+ 0x31f6c9,
+ 0x33c04b,
+ 0x33c9ca,
+ 0x344c4a,
+ 0x351b4b,
+ 0x36ebca,
+ 0x375c0a,
+ 0x37ca0a,
+ 0x37cc8b,
+ 0x39c049,
+ 0x39de8a,
+ 0x39e3cb,
+ 0x3ab84b,
+ 0x3b1f4a,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x25e5c4,
+ 0x213142,
+ 0x2021c4,
+ 0x275685,
+ 0x203803,
+ 0x224284,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2446c4,
+ 0x2afc84,
+ 0x201104,
+ 0x230c43,
+ 0x249943,
+ 0x2257c3,
+ 0x293485,
+ 0x22f903,
+ 0x219683,
+ 0x25a383,
+ 0x24ccc4,
+ 0x254284,
+ 0x273f45,
+ 0x894c8,
+ 0x2f8cc4,
+ 0x21e046,
+ 0x285844,
+ 0x204a82,
+ 0x3614c7,
+ 0x246c47,
+ 0x242244,
+ 0x255745,
+ 0x2e3505,
+ 0x2a8f85,
+ 0x201104,
+ 0x316e08,
+ 0x362906,
+ 0x2e1a08,
+ 0x2386c5,
+ 0x2cf185,
+ 0x21a5c4,
+ 0x2257c3,
+ 0x2dfdc4,
+ 0x350d06,
+ 0x23d2c3,
+ 0x24ccc4,
+ 0x26be05,
+ 0x232144,
+ 0x38ca84,
+ 0x201582,
+ 0x24d2c6,
+ 0x3924c6,
+ 0x2f3dc5,
+ 0x200882,
+ 0x323743,
+ 0x1d604a82,
+ 0x231c44,
+ 0x2095c2,
+ 0x219bc3,
+ 0x22c902,
+ 0x249943,
+ 0x2016c2,
+ 0x2161c3,
+ 0x224043,
+ 0x894c8,
+ 0x894c8,
+ 0x2d9d43,
+ 0x200882,
+ 0x1e204a82,
+ 0x2d9d43,
+ 0x266f43,
+ 0x265743,
+ 0x320444,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x200882,
+ 0x1ea04a82,
+ 0x258403,
+ 0x249943,
+ 0x2257c3,
+ 0x201382,
+ 0x21ce42,
+ 0x332283,
+ 0x2d8843,
+ 0x200882,
+ 0x894c8,
+ 0x204a82,
+ 0x230743,
+ 0x2446c4,
+ 0x2099c3,
+ 0x2d9d43,
+ 0x21bbc3,
+ 0x219bc3,
+ 0x249943,
+ 0x2174c3,
+ 0x2257c3,
+ 0x21b9c3,
+ 0x127b13,
+ 0x131714,
+ 0x1a206,
+ 0x1f186,
+ 0x4cec7,
+ 0x75009,
+ 0x6208a,
+ 0x8740d,
+ 0x11850c,
+ 0x17c3ca,
+ 0x555c5,
+ 0x18c288,
+ 0x7e246,
+ 0x152dc6,
+ 0x2079c2,
+ 0x1739cc,
+ 0x1acb47,
+ 0x205d1,
+ 0x258403,
+ 0xcfc85,
+ 0xb444,
+ 0x15c06,
+ 0x8f1c6,
+ 0x8b64a,
+ 0xaccc3,
+ 0x74fc5,
+ 0xb983,
+ 0x18e00c,
+ 0x1af108,
+ 0x27bc8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x200882,
+ 0x204a82,
+ 0x2d9d43,
+ 0x20b9c2,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+ 0x35fd8f,
+ 0x36014e,
+ 0x894c8,
+ 0x258403,
+ 0x3df47,
+ 0x230743,
+ 0x2d9d43,
+ 0x202503,
+ 0x249943,
+ 0x2257c3,
+ 0x21b943,
+ 0x265107,
+ 0x203642,
+ 0x29ffc9,
+ 0x200dc2,
+ 0x38418b,
+ 0x28ff8a,
+ 0x291709,
+ 0x201c42,
+ 0x2544c6,
+ 0x250a95,
+ 0x3842d5,
+ 0x25fdd3,
+ 0x384853,
+ 0x204602,
+ 0x204ec5,
+ 0x31d3cc,
+ 0x22518b,
+ 0x26dd85,
+ 0x20e3c2,
+ 0x284d02,
+ 0x372c06,
+ 0x203442,
+ 0x2521c6,
+ 0x332acd,
+ 0x36458c,
+ 0x308bc4,
+ 0x2009c2,
+ 0x21fd82,
+ 0x22dc48,
+ 0x203402,
+ 0x30e986,
+ 0x2aebc4,
+ 0x250c55,
+ 0x25ff53,
+ 0x20ffc3,
+ 0x34634a,
+ 0x31c387,
+ 0x2e44c9,
+ 0x226fc7,
+ 0x252f42,
+ 0x200002,
+ 0x200006,
+ 0x208e82,
+ 0x894c8,
+ 0x20fe02,
+ 0x210842,
+ 0x399887,
+ 0x3aaa47,
+ 0x21a805,
+ 0x21a382,
+ 0x21b987,
+ 0x21bb48,
+ 0x235842,
+ 0x295682,
+ 0x22e142,
+ 0x201742,
+ 0x36d148,
+ 0x217543,
+ 0x268c88,
+ 0x2c780d,
+ 0x21a2c3,
+ 0x2f5fc8,
+ 0x230dcf,
+ 0x23118e,
+ 0x22410a,
+ 0x2a1591,
+ 0x2a1a10,
+ 0x2b218d,
+ 0x2b24cc,
+ 0x20bd07,
+ 0x3464c7,
+ 0x352b89,
+ 0x23d442,
+ 0x2004c2,
+ 0x24e74c,
+ 0x24ea4b,
+ 0x2008c2,
+ 0x357906,
+ 0x205742,
+ 0x211a82,
+ 0x219fc2,
+ 0x204a82,
+ 0x381f44,
+ 0x235b47,
+ 0x207802,
+ 0x23a547,
+ 0x23b7c7,
+ 0x212182,
+ 0x206082,
+ 0x23de05,
+ 0x200682,
+ 0x260fce,
+ 0x278b4d,
+ 0x230743,
+ 0x2842ce,
+ 0x3571cd,
+ 0x227283,
+ 0x204802,
+ 0x281b44,
+ 0x23fac2,
+ 0x2017c2,
+ 0x345485,
+ 0x34cb87,
+ 0x36e142,
+ 0x20bc82,
+ 0x243f47,
+ 0x247ec8,
+ 0x237842,
+ 0x2adb46,
+ 0x24e5cc,
+ 0x24e90b,
+ 0x205642,
+ 0x25a90f,
+ 0x25acd0,
+ 0x25b0cf,
+ 0x25b495,
+ 0x25b9d4,
+ 0x25bece,
+ 0x25c24e,
+ 0x25c5cf,
+ 0x25c98e,
+ 0x25cd14,
+ 0x25d213,
+ 0x25d6cd,
+ 0x273749,
+ 0x288603,
+ 0x2007c2,
+ 0x31b245,
+ 0x2099c6,
+ 0x2095c2,
+ 0x26d887,
+ 0x2d9d43,
+ 0x21a042,
+ 0x22cc08,
+ 0x2a17d1,
+ 0x2a1c10,
+ 0x200942,
+ 0x20f047,
+ 0x203dc2,
+ 0x30f787,
+ 0x20a482,
+ 0x24b949,
+ 0x372bc7,
+ 0x285b08,
+ 0x222446,
+ 0x261e43,
+ 0x261e45,
+ 0x22fdc2,
+ 0x200402,
+ 0x200405,
+ 0x22b385,
+ 0x20a902,
+ 0x2280c3,
+ 0x2321c7,
+ 0x3a3f87,
+ 0x201302,
+ 0x2ff804,
+ 0x23e383,
+ 0x2bfc09,
+ 0x2d9208,
+ 0x2120c2,
+ 0x2067c2,
+ 0x2164c7,
+ 0x21d1c5,
+ 0x2a4008,
+ 0x204b87,
+ 0x2037c3,
+ 0x2a1246,
+ 0x2b200d,
+ 0x2b238c,
+ 0x279346,
+ 0x209082,
+ 0x208a42,
+ 0x202242,
+ 0x230c4f,
+ 0x23104e,
+ 0x2e3587,
+ 0x200342,
+ 0x309445,
+ 0x309446,
+ 0x253702,
+ 0x208f82,
+ 0x212946,
+ 0x2a0203,
+ 0x30f6c6,
+ 0x2c1285,
+ 0x2c128d,
+ 0x2c1dd5,
+ 0x2c258c,
+ 0x2c330d,
+ 0x2c39d2,
+ 0x20dc02,
+ 0x207a42,
+ 0x201082,
+ 0x2e0986,
+ 0x2abc86,
+ 0x201a82,
+ 0x209a46,
+ 0x202bc2,
+ 0x21ff85,
+ 0x203c42,
+ 0x261109,
+ 0x33d40c,
+ 0x33d74b,
+ 0x2016c2,
+ 0x248308,
+ 0x201342,
+ 0x200d82,
+ 0x224f46,
+ 0x366b45,
+ 0x21f387,
+ 0x247485,
+ 0x2a1405,
+ 0x23dfc2,
+ 0x352f42,
+ 0x200ac2,
+ 0x277387,
+ 0x2d004d,
+ 0x2d03cc,
+ 0x234107,
+ 0x2adac2,
+ 0x21d302,
+ 0x22be08,
+ 0x258108,
+ 0x2d4148,
+ 0x2dd044,
+ 0x2e5407,
+ 0x2da3c3,
+ 0x2aed82,
+ 0x2137c2,
+ 0x2dd809,
+ 0x3a3107,
+ 0x205fc2,
+ 0x26f0c5,
+ 0x23c782,
+ 0x2768c2,
+ 0x2768c3,
+ 0x2768c6,
+ 0x2e4b42,
+ 0x2e63c2,
+ 0x2018c2,
+ 0x33e186,
+ 0x30f047,
+ 0x201702,
+ 0x2058c2,
+ 0x268acf,
+ 0x28410d,
+ 0x28668e,
+ 0x35704c,
+ 0x20cb82,
+ 0x2024c2,
+ 0x222285,
+ 0x3b2346,
+ 0x2135c2,
+ 0x20b942,
+ 0x203b82,
+ 0x204b04,
+ 0x2c7684,
+ 0x336d86,
+ 0x203182,
+ 0x277bc7,
+ 0x220a83,
+ 0x222988,
+ 0x2244c8,
+ 0x2c7e47,
+ 0x3a5fc6,
+ 0x21b842,
+ 0x234503,
+ 0x2413c7,
+ 0x2693c6,
+ 0x2e08c5,
+ 0x344808,
+ 0x2063c2,
+ 0x322587,
+ 0x210ec2,
+ 0x2e74c2,
+ 0x203e02,
+ 0x2bab89,
+ 0x200242,
+ 0x200a02,
+ 0x234383,
+ 0x32a387,
+ 0x2013c2,
+ 0x33d58c,
+ 0x33d88b,
+ 0x2793c6,
+ 0x20b8c5,
+ 0x21f6c2,
+ 0x2020c2,
+ 0x2b1d06,
+ 0x22aa83,
+ 0x33f547,
+ 0x242c82,
+ 0x206882,
+ 0x250915,
+ 0x384495,
+ 0x25fc93,
+ 0x3849d3,
+ 0x26bf07,
+ 0x289688,
+ 0x289690,
+ 0x28af0f,
+ 0x28fd53,
+ 0x2914d2,
+ 0x29fb90,
+ 0x2a8bcf,
+ 0x336352,
+ 0x31f291,
+ 0x34ed13,
+ 0x2ba952,
+ 0x2c0ecf,
+ 0x2c944e,
+ 0x2cba12,
+ 0x2cc851,
+ 0x2cd84f,
+ 0x2ce5ce,
+ 0x2fa911,
+ 0x2d9e10,
+ 0x2dd392,
+ 0x2e1dd1,
+ 0x2e25c6,
+ 0x2e4bc7,
+ 0x2f7947,
+ 0x205542,
+ 0x27f985,
+ 0x35a847,
+ 0x21ce42,
+ 0x208d02,
+ 0x228f05,
+ 0x21c743,
+ 0x2741c6,
+ 0x2d020d,
+ 0x2d054c,
+ 0x201502,
+ 0x31d24b,
+ 0x22504a,
+ 0x2eaf4a,
+ 0x2b0fc9,
+ 0x2dbe4b,
+ 0x204ccd,
+ 0x36cb8c,
+ 0x21ce8a,
+ 0x220c0c,
+ 0x33940b,
+ 0x26dbcc,
+ 0x270c0b,
+ 0x33d383,
+ 0x289286,
+ 0x2e7a82,
+ 0x2e7242,
+ 0x21e383,
+ 0x203f02,
+ 0x2047c3,
+ 0x2498c6,
+ 0x25b647,
+ 0x273406,
+ 0x2e8ec8,
+ 0x257e08,
+ 0x2f0646,
+ 0x2049c2,
+ 0x2f378d,
+ 0x2f3acc,
+ 0x2afd47,
+ 0x2f8b87,
+ 0x20c702,
+ 0x236e02,
+ 0x241342,
+ 0x32bd42,
+ 0x204a82,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+ 0x200882,
+ 0x200702,
+ 0x21a8d2c5,
+ 0x21e031c5,
+ 0x22309946,
+ 0x894c8,
+ 0x226ae205,
+ 0x204a82,
+ 0x201802,
+ 0x22b27605,
+ 0x22e7d805,
+ 0x2327ea47,
+ 0x23612609,
+ 0x23a57284,
+ 0x2095c2,
+ 0x21a042,
+ 0x23f808c5,
+ 0x2428f9c9,
+ 0x2470cf88,
+ 0x24aac305,
+ 0x24ebf387,
+ 0x25217788,
+ 0x256d6205,
+ 0x25a03606,
+ 0x25ed7ac9,
+ 0x26373f08,
+ 0x266b96c8,
+ 0x26a97d0a,
+ 0x26e457c4,
+ 0x272c9b05,
+ 0x276b4c88,
+ 0x27a67a05,
+ 0x2175c2,
+ 0x27e00343,
+ 0x282a0b86,
+ 0x28641bc8,
+ 0x28a087c6,
+ 0x28f0f188,
+ 0x2931d706,
+ 0x29701044,
+ 0x201cc2,
+ 0x29a404c7,
+ 0x29ea7ac4,
+ 0x2a276e47,
+ 0x2a79f6c7,
+ 0x2016c2,
+ 0x2aa98a85,
+ 0x2af2df84,
+ 0x2b3744c7,
+ 0x2b61c307,
+ 0x2ba81986,
+ 0x2be2af85,
+ 0x2c292307,
+ 0x2c6d0ec8,
+ 0x2cb1b5c7,
+ 0x2ceab1c9,
+ 0x2d38dd45,
+ 0x2d736047,
+ 0x2da8cb46,
+ 0x2de546c8,
+ 0x2c834d,
+ 0x23e149,
+ 0x2e0e0b,
+ 0x382f0b,
+ 0x27130b,
+ 0x2a380b,
+ 0x3006cb,
+ 0x30098b,
+ 0x300d89,
+ 0x301e4b,
+ 0x30210b,
+ 0x30268b,
+ 0x3035ca,
+ 0x303b0a,
+ 0x30410c,
+ 0x30764b,
+ 0x307a8a,
+ 0x31a60a,
+ 0x32b44e,
+ 0x32c04e,
+ 0x32c3ca,
+ 0x32e54a,
+ 0x32f08b,
+ 0x32f34b,
+ 0x32fecb,
+ 0x34960b,
+ 0x349c0a,
+ 0x34a8cb,
+ 0x34ab8a,
+ 0x34ae0a,
+ 0x34b08a,
+ 0x36fbcb,
+ 0x377d0b,
+ 0x37968e,
+ 0x379a0b,
+ 0x3831cb,
+ 0x38540b,
+ 0x3898ca,
+ 0x389b49,
+ 0x389d8a,
+ 0x38b40a,
+ 0x39cb4b,
+ 0x39e68b,
+ 0x39f00a,
+ 0x3a0e0b,
+ 0x3a590b,
+ 0x3b198b,
+ 0x2e280048,
+ 0x2e686b89,
+ 0x2eb5e8c9,
+ 0x2eecf548,
+ 0x336b05,
+ 0x202c83,
+ 0x20d2c4,
+ 0x2e7d45,
+ 0x256fc6,
+ 0x260585,
+ 0x285d04,
+ 0x26d788,
+ 0x21db45,
+ 0x28e684,
+ 0x201b07,
+ 0x29abca,
+ 0x35504a,
+ 0x35de07,
+ 0x202b87,
+ 0x2f16c7,
+ 0x364307,
+ 0x3af485,
+ 0x30fdc6,
+ 0x3287c7,
+ 0x247004,
+ 0x2fbe46,
+ 0x3a5246,
+ 0x3b31c5,
+ 0x305a04,
+ 0x2b8306,
+ 0x299fc7,
+ 0x2298c6,
+ 0x37c747,
+ 0x27a403,
+ 0x248f46,
+ 0x22f405,
+ 0x27eb47,
+ 0x2bde8a,
+ 0x22cd04,
+ 0x215988,
+ 0x2aac49,
+ 0x2c6c07,
+ 0x24b286,
+ 0x2de448,
+ 0x39fa49,
+ 0x234cc4,
+ 0x35e484,
+ 0x2fa145,
+ 0x2020c8,
+ 0x2be687,
+ 0x2a7149,
+ 0x23dc08,
+ 0x2fd446,
+ 0x23f046,
+ 0x295b48,
+ 0x372286,
+ 0x2031c5,
+ 0x281a46,
+ 0x277548,
+ 0x230b46,
+ 0x24d90b,
+ 0x233346,
+ 0x29788d,
+ 0x399e85,
+ 0x2a7986,
+ 0x2085c5,
+ 0x294709,
+ 0x341c87,
+ 0x37fd88,
+ 0x36d906,
+ 0x296249,
+ 0x2ee4c6,
+ 0x2bde05,
+ 0x27ba86,
+ 0x2a8646,
+ 0x2c4ec9,
+ 0x234ac6,
+ 0x36e4c7,
+ 0x2d71c5,
+ 0x204643,
+ 0x24da85,
+ 0x297b47,
+ 0x3274c6,
+ 0x399d89,
+ 0x309946,
+ 0x281c86,
+ 0x37d649,
+ 0x281449,
+ 0x29dd87,
+ 0x310e88,
+ 0x28f009,
+ 0x27f608,
+ 0x320146,
+ 0x2cd085,
+ 0x30888a,
+ 0x281d06,
+ 0x37df46,
+ 0x2a0d45,
+ 0x387ac8,
+ 0x20e647,
+ 0x3abe8a,
+ 0x244b06,
+ 0x2e7805,
+ 0x365bc6,
+ 0x326787,
+ 0x24b147,
+ 0x2bbb85,
+ 0x2bdfc5,
+ 0x29e106,
+ 0x2acd46,
+ 0x387106,
+ 0x330844,
+ 0x2809c9,
+ 0x287a46,
+ 0x28c58a,
+ 0x2171c8,
+ 0x335d48,
+ 0x35504a,
+ 0x359445,
+ 0x299f05,
+ 0x382a08,
+ 0x2c9888,
+ 0x332447,
+ 0x2023c6,
+ 0x313d48,
+ 0x2e1447,
+ 0x27ec88,
+ 0x368d46,
+ 0x282f08,
+ 0x2b2886,
+ 0x238847,
+ 0x296c86,
+ 0x2b8306,
+ 0x23280a,
+ 0x381fc6,
+ 0x2cd089,
+ 0x2ae506,
+ 0x2d180a,
+ 0x301049,
+ 0x2f0746,
+ 0x386bc4,
+ 0x31b30d,
+ 0x286e07,
+ 0x3163c6,
+ 0x2b9585,
+ 0x2ee545,
+ 0x31b806,
+ 0x26f609,
+ 0x2db807,
+ 0x278606,
+ 0x2c6606,
+ 0x285d89,
+ 0x2bf544,
+ 0x22a504,
+ 0x203888,
+ 0x249c86,
+ 0x26f188,
+ 0x27ba08,
+ 0x282987,
+ 0x200849,
+ 0x387307,
+ 0x2ae0ca,
+ 0x27b3cf,
+ 0x243b4a,
+ 0x222085,
+ 0x277785,
+ 0x214cc5,
+ 0x2aeb07,
+ 0x205d83,
+ 0x311088,
+ 0x2f43c6,
+ 0x2f44c9,
+ 0x2af546,
+ 0x2c3807,
+ 0x296009,
+ 0x37fc88,
+ 0x2a0e07,
+ 0x2ffa83,
+ 0x336b85,
+ 0x205d05,
+ 0x33068b,
+ 0x267ac4,
+ 0x2d2d44,
+ 0x276106,
+ 0x2ffe07,
+ 0x39814a,
+ 0x242487,
+ 0x234d47,
+ 0x27d805,
+ 0x2041c5,
+ 0x224a49,
+ 0x2b8306,
+ 0x24230d,
+ 0x358585,
+ 0x3029c3,
+ 0x206c43,
+ 0x346ec5,
+ 0x34d7c5,
+ 0x2de448,
+ 0x279047,
+ 0x22a286,
+ 0x29b9c6,
+ 0x22a845,
+ 0x230a07,
+ 0x202e07,
+ 0x3627c7,
+ 0x2c9b8a,
+ 0x249008,
+ 0x330844,
+ 0x3876c7,
+ 0x27a547,
+ 0x32ed06,
+ 0x266307,
+ 0x2b1708,
+ 0x35e7c8,
+ 0x26d246,
+ 0x264788,
+ 0x234b44,
+ 0x3287c6,
+ 0x20f646,
+ 0x3658c6,
+ 0x3478c6,
+ 0x29bf44,
+ 0x3643c6,
+ 0x2b8506,
+ 0x294d86,
+ 0x22adc6,
+ 0x206b06,
+ 0x2b1546,
+ 0x22a188,
+ 0x2fbcc8,
+ 0x2ca688,
+ 0x260788,
+ 0x382986,
+ 0x20dd85,
+ 0x277d06,
+ 0x2ac385,
+ 0x388d07,
+ 0x23dcc5,
+ 0x213a43,
+ 0x200e85,
+ 0x22a744,
+ 0x206c45,
+ 0x212b83,
+ 0x2f2b47,
+ 0x31a8c8,
+ 0x37c806,
+ 0x36918d,
+ 0x277746,
+ 0x293ec5,
+ 0x2bab83,
+ 0x2b4649,
+ 0x2bf6c6,
+ 0x2944c6,
+ 0x29d644,
+ 0x243ac7,
+ 0x231e86,
+ 0x387905,
+ 0x2327c3,
+ 0x203d04,
+ 0x27a706,
+ 0x2d2f44,
+ 0x30bc88,
+ 0x397609,
+ 0x342189,
+ 0x29d44a,
+ 0x23ac0d,
+ 0x30ee07,
+ 0x37ddc6,
+ 0x20d9c4,
+ 0x212609,
+ 0x2851c8,
+ 0x286a06,
+ 0x261386,
+ 0x266307,
+ 0x2bff86,
+ 0x21b206,
+ 0x397906,
+ 0x39f74a,
+ 0x217788,
+ 0x22d785,
+ 0x2826c9,
+ 0x27f18a,
+ 0x369508,
+ 0x299548,
+ 0x294448,
+ 0x20320c,
+ 0x2e5085,
+ 0x29bc48,
+ 0x309346,
+ 0x2d1186,
+ 0x375e47,
+ 0x242385,
+ 0x281bc5,
+ 0x342049,
+ 0x212287,
+ 0x2b1bc5,
+ 0x21cc87,
+ 0x206c43,
+ 0x2bebc5,
+ 0x37eb08,
+ 0x2ce187,
+ 0x299409,
+ 0x2d4005,
+ 0x2fb844,
+ 0x29f308,
+ 0x20be47,
+ 0x2a0fc8,
+ 0x329fc8,
+ 0x2ebdc5,
+ 0x240dc6,
+ 0x264e46,
+ 0x2e3109,
+ 0x3145c7,
+ 0x2ac786,
+ 0x31c787,
+ 0x212a03,
+ 0x257284,
+ 0x29b305,
+ 0x257444,
+ 0x33e8c4,
+ 0x248687,
+ 0x206287,
+ 0x2787c4,
+ 0x299250,
+ 0x322187,
+ 0x2041c5,
+ 0x33df0c,
+ 0x2b77c4,
+ 0x2f9648,
+ 0x238749,
+ 0x300546,
+ 0x227d08,
+ 0x259404,
+ 0x259408,
+ 0x388046,
+ 0x22ac48,
+ 0x29b006,
+ 0x2c89cb,
+ 0x204645,
+ 0x2c4a08,
+ 0x216cc4,
+ 0x28074a,
+ 0x299409,
+ 0x227e86,
+ 0x2d6cc8,
+ 0x256405,
+ 0x2ff184,
+ 0x2f9546,
+ 0x362688,
+ 0x280048,
+ 0x344586,
+ 0x325944,
+ 0x308806,
+ 0x387387,
+ 0x276d47,
+ 0x26630f,
+ 0x2074c7,
+ 0x2f0807,
+ 0x2d1045,
+ 0x2ec8c5,
+ 0x29da49,
+ 0x28c246,
+ 0x27e005,
+ 0x281747,
+ 0x2d6f88,
+ 0x294e85,
+ 0x296c86,
+ 0x217008,
+ 0x2087ca,
+ 0x2845c8,
+ 0x3adfc7,
+ 0x27b806,
+ 0x282686,
+ 0x205303,
+ 0x20d383,
+ 0x27f349,
+ 0x28ee89,
+ 0x2ab0c6,
+ 0x2d4005,
+ 0x2a4188,
+ 0x2d6cc8,
+ 0x2b9ec8,
+ 0x39798b,
+ 0x3693c7,
+ 0x2fdd89,
+ 0x266588,
+ 0x338944,
+ 0x2c4fc8,
+ 0x28a9c9,
+ 0x2aca85,
+ 0x2aea07,
+ 0x2f49c5,
+ 0x27ff48,
+ 0x28d14b,
+ 0x292050,
+ 0x2a7785,
+ 0x216c0c,
+ 0x22a445,
+ 0x209203,
+ 0x2a6a46,
+ 0x2b6d84,
+ 0x32e086,
+ 0x299fc7,
+ 0x212a44,
+ 0x23c808,
+ 0x310f4d,
+ 0x2d6b85,
+ 0x23b104,
+ 0x221dc4,
+ 0x282149,
+ 0x2a06c8,
+ 0x3097c7,
+ 0x3880c8,
+ 0x280a88,
+ 0x278905,
+ 0x262a87,
+ 0x278887,
+ 0x2f5007,
+ 0x2bdfc9,
+ 0x231d09,
+ 0x23a6c6,
+ 0x2b26c6,
+ 0x266546,
+ 0x25a505,
+ 0x3b1504,
+ 0x203506,
+ 0x203a86,
+ 0x278948,
+ 0x32644b,
+ 0x267f07,
+ 0x20d9c4,
+ 0x316846,
+ 0x209047,
+ 0x346805,
+ 0x3179c5,
+ 0x204884,
+ 0x231c86,
+ 0x203588,
+ 0x212609,
+ 0x2559c6,
+ 0x284b48,
+ 0x3879c6,
+ 0x32f5c8,
+ 0x2b010c,
+ 0x2787c6,
+ 0x293b8d,
+ 0x29400b,
+ 0x36e585,
+ 0x202f47,
+ 0x234bc6,
+ 0x24b008,
+ 0x23a749,
+ 0x2e2d48,
+ 0x2041c5,
+ 0x2ed607,
+ 0x27f708,
+ 0x3a2509,
+ 0x240686,
+ 0x36e2ca,
+ 0x24ad88,
+ 0x2e2b8b,
+ 0x2cb44c,
+ 0x259508,
+ 0x279e46,
+ 0x262488,
+ 0x207607,
+ 0x231f89,
+ 0x28f8cd,
+ 0x29a486,
+ 0x3a5348,
+ 0x2fbb89,
+ 0x2b5048,
+ 0x283008,
+ 0x2b8dcc,
+ 0x2ba0c7,
+ 0x2badc7,
+ 0x2bde05,
+ 0x2e9d47,
+ 0x2d6e48,
+ 0x2f95c6,
+ 0x25584c,
+ 0x2e22c8,
+ 0x2c5f48,
+ 0x361fc6,
+ 0x205a87,
+ 0x23a8c4,
+ 0x260788,
+ 0x35748c,
+ 0x21f70c,
+ 0x222105,
+ 0x3943c7,
+ 0x3258c6,
+ 0x205a06,
+ 0x2948c8,
+ 0x3a3584,
+ 0x2298cb,
+ 0x22264b,
+ 0x27b806,
+ 0x310dc7,
+ 0x261f45,
+ 0x26e545,
+ 0x229a06,
+ 0x2563c5,
+ 0x267a85,
+ 0x376107,
+ 0x276709,
+ 0x233444,
+ 0x35ec85,
+ 0x2d53c5,
+ 0x24f708,
+ 0x229245,
+ 0x2a6549,
+ 0x2c2f87,
+ 0x2c2f8b,
+ 0x2d0746,
+ 0x229ec9,
+ 0x305948,
+ 0x27e545,
+ 0x2f5108,
+ 0x231d48,
+ 0x218687,
+ 0x282547,
+ 0x248709,
+ 0x22ab87,
+ 0x374bc9,
+ 0x2a910c,
+ 0x2ab0c8,
+ 0x3af2c9,
+ 0x2b5447,
+ 0x280b49,
+ 0x2063c7,
+ 0x2cb548,
+ 0x24fa45,
+ 0x328746,
+ 0x2b95c8,
+ 0x2d4d08,
+ 0x27f049,
+ 0x267ac7,
+ 0x26e605,
+ 0x2112c9,
+ 0x2c0406,
+ 0x28cb44,
+ 0x2e2a06,
+ 0x241a48,
+ 0x244507,
+ 0x326648,
+ 0x264849,
+ 0x361d47,
+ 0x29ad86,
+ 0x203004,
+ 0x200f09,
+ 0x262908,
+ 0x361e87,
+ 0x30fec6,
+ 0x205dc6,
+ 0x37dec4,
+ 0x2a7b86,
+ 0x206bc3,
+ 0x355e89,
+ 0x204606,
+ 0x29f785,
+ 0x29b9c6,
+ 0x2a1105,
+ 0x27fb88,
+ 0x259247,
+ 0x364146,
+ 0x327646,
+ 0x335d48,
+ 0x29dbc7,
+ 0x29a4c5,
+ 0x29bec8,
+ 0x38b7c8,
+ 0x24ad88,
+ 0x22a305,
+ 0x3287c6,
+ 0x341f49,
+ 0x264cc4,
+ 0x37238b,
+ 0x21af0b,
+ 0x22d689,
+ 0x206c43,
+ 0x250645,
+ 0x20dc46,
+ 0x2585c8,
+ 0x27b344,
+ 0x37c806,
+ 0x2c9cc9,
+ 0x2c5d45,
+ 0x376046,
+ 0x20be46,
+ 0x202344,
+ 0x2996ca,
+ 0x29f6c8,
+ 0x2d4d06,
+ 0x24c0c5,
+ 0x20c807,
+ 0x22ff87,
+ 0x240dc4,
+ 0x21b147,
+ 0x23dc84,
+ 0x23dc86,
+ 0x217143,
+ 0x2bdfc5,
+ 0x370105,
+ 0x20c1c8,
+ 0x257385,
+ 0x278509,
+ 0x2605c7,
+ 0x2605cb,
+ 0x2a098c,
+ 0x2a200a,
+ 0x2bf387,
+ 0x201043,
+ 0x2e3688,
+ 0x22a4c5,
+ 0x294f05,
+ 0x336c44,
+ 0x2cb446,
+ 0x238746,
+ 0x2a7bc7,
+ 0x38c5cb,
+ 0x29bf44,
+ 0x37ff04,
+ 0x26d3c4,
+ 0x2c4746,
+ 0x212a44,
+ 0x2021c8,
+ 0x336a45,
+ 0x23b245,
+ 0x2b9e07,
+ 0x203049,
+ 0x34d7c5,
+ 0x371d0a,
+ 0x2d70c9,
+ 0x299b0a,
+ 0x39f889,
+ 0x385304,
+ 0x2c66c5,
+ 0x2c0088,
+ 0x37458b,
+ 0x2fa145,
+ 0x27bb86,
+ 0x21a544,
+ 0x278a46,
+ 0x361bc9,
+ 0x316907,
+ 0x309b08,
+ 0x23af86,
+ 0x387307,
+ 0x280048,
+ 0x38f206,
+ 0x23e684,
+ 0x360487,
+ 0x3458c5,
+ 0x34ba07,
+ 0x203604,
+ 0x234b46,
+ 0x217a08,
+ 0x2941c8,
+ 0x2e9ac7,
+ 0x212a48,
+ 0x2b2945,
+ 0x206a84,
+ 0x354f48,
+ 0x212b44,
+ 0x214c45,
+ 0x2eca04,
+ 0x2e1547,
+ 0x287b07,
+ 0x280c88,
+ 0x2a1146,
+ 0x257305,
+ 0x278308,
+ 0x2847c8,
+ 0x29d389,
+ 0x21b206,
+ 0x3abf08,
+ 0x2805ca,
+ 0x346888,
+ 0x2d6205,
+ 0x277f06,
+ 0x26f4c8,
+ 0x2ed6ca,
+ 0x305b47,
+ 0x2855c5,
+ 0x292848,
+ 0x2ad704,
+ 0x387b46,
+ 0x2bb548,
+ 0x206b06,
+ 0x359748,
+ 0x264b07,
+ 0x201a06,
+ 0x386bc4,
+ 0x37bdc7,
+ 0x2fefc4,
+ 0x361b87,
+ 0x2de18d,
+ 0x22d705,
+ 0x2cdf8b,
+ 0x29b106,
+ 0x248408,
+ 0x23c7c4,
+ 0x275446,
+ 0x27a706,
+ 0x2627c7,
+ 0x29384d,
+ 0x2a9dc7,
+ 0x302908,
+ 0x247705,
+ 0x2a7d08,
+ 0x2be606,
+ 0x2b29c8,
+ 0x211dc6,
+ 0x263f87,
+ 0x281009,
+ 0x339b47,
+ 0x286cc8,
+ 0x271705,
+ 0x21a888,
+ 0x205945,
+ 0x235cc5,
+ 0x358e45,
+ 0x222383,
+ 0x281ac4,
+ 0x2826c5,
+ 0x2d7ac9,
+ 0x324e86,
+ 0x2b1808,
+ 0x3a9485,
+ 0x32c607,
+ 0x246e0a,
+ 0x375f89,
+ 0x2a854a,
+ 0x2ca708,
+ 0x21cacc,
+ 0x2817cd,
+ 0x304983,
+ 0x359648,
+ 0x203cc5,
+ 0x208586,
+ 0x37fb06,
+ 0x2d5d45,
+ 0x31c889,
+ 0x355305,
+ 0x278308,
+ 0x251a46,
+ 0x33a446,
+ 0x29f1c9,
+ 0x38ea07,
+ 0x28d406,
+ 0x246d88,
+ 0x3657c8,
+ 0x2cf747,
+ 0x22adce,
+ 0x2be845,
+ 0x3a2405,
+ 0x206a08,
+ 0x326d87,
+ 0x205e02,
+ 0x2b8944,
+ 0x32df8a,
+ 0x361f48,
+ 0x209146,
+ 0x296148,
+ 0x264e46,
+ 0x323348,
+ 0x2ac788,
+ 0x235c84,
+ 0x3304c5,
+ 0x685844,
+ 0x685844,
+ 0x685844,
+ 0x2031c3,
+ 0x205c46,
+ 0x2787c6,
+ 0x29a74c,
+ 0x201a43,
+ 0x27f186,
+ 0x217104,
+ 0x2bf648,
+ 0x2c9b05,
+ 0x32e086,
+ 0x2b4d88,
+ 0x2cb746,
+ 0x3640c6,
+ 0x323848,
+ 0x29b387,
+ 0x22a949,
+ 0x2c864a,
+ 0x26aa44,
+ 0x23dcc5,
+ 0x2a7105,
+ 0x212406,
+ 0x30ee46,
+ 0x2a4586,
+ 0x2eb986,
+ 0x22aa84,
+ 0x22aa8b,
+ 0x22ff84,
+ 0x20c885,
+ 0x2ab645,
+ 0x282a46,
+ 0x3aae88,
+ 0x281687,
+ 0x3098c4,
+ 0x258903,
+ 0x2ad205,
+ 0x2e28c7,
+ 0x2a2609,
+ 0x28158b,
+ 0x2a7bc7,
+ 0x20c0c7,
+ 0x2b4c88,
+ 0x32c747,
+ 0x2a2846,
+ 0x23e408,
+ 0x2a478b,
+ 0x2e7c86,
+ 0x212d09,
+ 0x2a4905,
+ 0x2ffa83,
+ 0x376046,
+ 0x264a08,
+ 0x211e83,
+ 0x234c83,
+ 0x280046,
+ 0x264e46,
+ 0x38b18a,
+ 0x279e85,
+ 0x27a54b,
+ 0x29b90b,
+ 0x23bf83,
+ 0x21b543,
+ 0x2ae044,
+ 0x2643c7,
+ 0x259504,
+ 0x203204,
+ 0x3091c4,
+ 0x346b88,
+ 0x24c008,
+ 0x31c1c9,
+ 0x38ddc8,
+ 0x39fc07,
+ 0x22adc6,
+ 0x2b144f,
+ 0x2be986,
+ 0x2c9a84,
+ 0x24be4a,
+ 0x2e27c7,
+ 0x3b3246,
+ 0x28cb89,
+ 0x31c145,
+ 0x20c305,
+ 0x31c286,
+ 0x21a9c3,
+ 0x2ad749,
+ 0x217906,
+ 0x264609,
+ 0x398146,
+ 0x2bdfc5,
+ 0x222505,
+ 0x205cc3,
+ 0x264508,
+ 0x228b07,
+ 0x2f43c4,
+ 0x2bf4c8,
+ 0x2b8084,
+ 0x2c6f86,
+ 0x2a6a46,
+ 0x239786,
+ 0x2c48c9,
+ 0x294e85,
+ 0x2b8306,
+ 0x2667c9,
+ 0x3ae786,
+ 0x2b1546,
+ 0x386f46,
+ 0x2104c5,
+ 0x2eca06,
+ 0x263f84,
+ 0x24fa45,
+ 0x2b95c4,
+ 0x3090c6,
+ 0x358544,
+ 0x2064c3,
+ 0x285285,
+ 0x231a48,
+ 0x223947,
+ 0x2b3249,
+ 0x2854c8,
+ 0x295911,
+ 0x20beca,
+ 0x27b747,
+ 0x2edf06,
+ 0x217104,
+ 0x2b96c8,
+ 0x283b88,
+ 0x295aca,
+ 0x2a630d,
+ 0x27ba86,
+ 0x323946,
+ 0x37be86,
+ 0x2bba07,
+ 0x3029c5,
+ 0x254587,
+ 0x2bf585,
+ 0x2c30c4,
+ 0x2a5d46,
+ 0x328607,
+ 0x2ad44d,
+ 0x26f407,
+ 0x26d688,
+ 0x278609,
+ 0x277e06,
+ 0x240605,
+ 0x2145c4,
+ 0x241b46,
+ 0x240cc6,
+ 0x3620c6,
+ 0x298c48,
+ 0x210383,
+ 0x24f943,
+ 0x30fb85,
+ 0x31e686,
+ 0x2ac745,
+ 0x23b188,
+ 0x29a18a,
+ 0x30f304,
+ 0x2bf648,
+ 0x294448,
+ 0x282887,
+ 0x3a9549,
+ 0x2b4988,
+ 0x212687,
+ 0x26c106,
+ 0x206b0a,
+ 0x241bc8,
+ 0x2cb289,
+ 0x2a0788,
+ 0x217f09,
+ 0x2e2e47,
+ 0x2eb385,
+ 0x35ea46,
+ 0x2f9448,
+ 0x323a48,
+ 0x24db48,
+ 0x214d88,
+ 0x20c885,
+ 0x200884,
+ 0x228808,
+ 0x2bcbc4,
+ 0x39f684,
+ 0x2bdfc5,
+ 0x28e6c7,
+ 0x202e09,
+ 0x2625c7,
+ 0x280605,
+ 0x276306,
+ 0x33d146,
+ 0x208944,
+ 0x29f506,
+ 0x387644,
+ 0x283a86,
+ 0x3a3646,
+ 0x213106,
+ 0x2041c5,
+ 0x23b047,
+ 0x201043,
+ 0x33f949,
+ 0x335b48,
+ 0x212504,
+ 0x21250d,
+ 0x2942c8,
+ 0x381ac8,
+ 0x2cb206,
+ 0x281109,
+ 0x375f89,
+ 0x3618c5,
+ 0x29a28a,
+ 0x287cca,
+ 0x34c08c,
+ 0x34c206,
+ 0x276bc6,
+ 0x2beb06,
+ 0x26aa09,
+ 0x2087c6,
+ 0x2545c6,
+ 0x3553c6,
+ 0x260788,
+ 0x212a46,
+ 0x2c4c0b,
+ 0x28e845,
+ 0x23b245,
+ 0x276e45,
+ 0x2028c6,
+ 0x206ac3,
+ 0x239706,
+ 0x26f387,
+ 0x2b9585,
+ 0x23f105,
+ 0x2ee545,
+ 0x344986,
+ 0x30ce84,
+ 0x30ce86,
+ 0x293089,
+ 0x20274c,
+ 0x2c2e08,
+ 0x2931c4,
+ 0x2ec7c6,
+ 0x29b206,
+ 0x264a08,
+ 0x2d6cc8,
+ 0x202649,
+ 0x20c807,
+ 0x2499c9,
+ 0x247c06,
+ 0x22e244,
+ 0x20e304,
+ 0x27fe44,
+ 0x280048,
+ 0x202c4a,
+ 0x34d746,
+ 0x3514c7,
+ 0x22ce47,
+ 0x229fc5,
+ 0x2a70c4,
+ 0x28a986,
+ 0x302a06,
+ 0x231f43,
+ 0x335987,
+ 0x329ec8,
+ 0x361a0a,
+ 0x2cc1c8,
+ 0x30f188,
+ 0x358585,
+ 0x36e685,
+ 0x268005,
+ 0x22a386,
+ 0x37c246,
+ 0x2061c5,
+ 0x3560c9,
+ 0x2a6ecc,
+ 0x2680c7,
+ 0x295b48,
+ 0x2d6085,
+ 0x685844,
+ 0x20a104,
+ 0x2ce2c4,
+ 0x2c1786,
+ 0x29c48e,
+ 0x20c387,
+ 0x2bbc05,
+ 0x264c4c,
+ 0x2b7f47,
+ 0x328587,
+ 0x328f89,
+ 0x215a49,
+ 0x2855c5,
+ 0x335b48,
+ 0x341f49,
+ 0x2ea885,
+ 0x2b94c8,
+ 0x2c51c6,
+ 0x3551c6,
+ 0x301044,
+ 0x2a2408,
+ 0x245603,
+ 0x353b84,
+ 0x2ad285,
+ 0x31b807,
+ 0x209505,
+ 0x280489,
+ 0x38ba8d,
+ 0x2a53c6,
+ 0x35c244,
+ 0x202348,
+ 0x27654a,
+ 0x3b17c7,
+ 0x235385,
+ 0x208d03,
+ 0x29bace,
+ 0x264e4c,
+ 0x2fa487,
+ 0x29c647,
+ 0x203643,
+ 0x208805,
+ 0x2ce2c5,
+ 0x296508,
+ 0x292689,
+ 0x362506,
+ 0x259504,
+ 0x27b686,
+ 0x36558b,
+ 0x2eebcc,
+ 0x262347,
+ 0x2c97c5,
+ 0x38b6c8,
+ 0x2cf505,
+ 0x24be47,
+ 0x2404c7,
+ 0x245605,
+ 0x206ac3,
+ 0x36c2c4,
+ 0x20d285,
+ 0x2ace05,
+ 0x2ace06,
+ 0x2908c8,
+ 0x328607,
+ 0x37fe06,
+ 0x208486,
+ 0x358d86,
+ 0x3262c9,
+ 0x262b87,
+ 0x362386,
+ 0x2eed46,
+ 0x2456c6,
+ 0x2a7a85,
+ 0x20a206,
+ 0x399745,
+ 0x2292c8,
+ 0x291c8b,
+ 0x28a786,
+ 0x22ce84,
+ 0x2ed489,
+ 0x2605c4,
+ 0x2c5148,
+ 0x2f0e87,
+ 0x282f04,
+ 0x2b3b88,
+ 0x2ba684,
+ 0x2a7ac4,
+ 0x3a93c5,
+ 0x2d6bc6,
+ 0x346ac7,
+ 0x23b0c3,
+ 0x29ae45,
+ 0x2f4944,
+ 0x3a2446,
+ 0x361948,
+ 0x323745,
+ 0x28e149,
+ 0x2114c5,
+ 0x2f4bc8,
+ 0x326007,
+ 0x388e48,
+ 0x2b3087,
+ 0x2f08c9,
+ 0x364246,
+ 0x35aec6,
+ 0x28f144,
+ 0x26c045,
+ 0x2f300c,
+ 0x276e47,
+ 0x277647,
+ 0x22cd08,
+ 0x2a53c6,
+ 0x26f2c4,
+ 0x2eae44,
+ 0x248589,
+ 0x2bec06,
+ 0x224ac7,
+ 0x347844,
+ 0x324f86,
+ 0x328185,
+ 0x2a0c87,
+ 0x2c4b86,
+ 0x36e189,
+ 0x34bec7,
+ 0x266307,
+ 0x29f046,
+ 0x23ab05,
+ 0x27de88,
+ 0x217788,
+ 0x237a86,
+ 0x323785,
+ 0x255106,
+ 0x201b83,
+ 0x296389,
+ 0x2a430e,
+ 0x2b1e88,
+ 0x2b8188,
+ 0x23788b,
+ 0x28e386,
+ 0x30eac4,
+ 0x2813c4,
+ 0x2a440a,
+ 0x216b07,
+ 0x362445,
+ 0x212d09,
+ 0x2b85c5,
+ 0x39f6c7,
+ 0x300344,
+ 0x397787,
+ 0x27b908,
+ 0x2c6cc6,
+ 0x3a54c9,
+ 0x2b4a8a,
+ 0x216a86,
+ 0x293e06,
+ 0x2ab5c5,
+ 0x379fc5,
+ 0x333207,
+ 0x23f608,
+ 0x3280c8,
+ 0x235c86,
+ 0x222585,
+ 0x30ebce,
+ 0x330844,
+ 0x237a05,
+ 0x275c89,
+ 0x28c048,
+ 0x3adf06,
+ 0x2988cc,
+ 0x299d90,
+ 0x29c0cf,
+ 0x29d948,
+ 0x2bf387,
+ 0x2041c5,
+ 0x2826c5,
+ 0x346949,
+ 0x292a49,
+ 0x308906,
+ 0x2fa1c7,
+ 0x394345,
+ 0x332449,
+ 0x32ed86,
+ 0x20860d,
+ 0x27fd09,
+ 0x203204,
+ 0x2b1c08,
+ 0x2288c9,
+ 0x34d906,
+ 0x276405,
+ 0x35aec6,
+ 0x3099c9,
+ 0x27c808,
+ 0x20dd85,
+ 0x2806c4,
+ 0x298a8b,
+ 0x34d7c5,
+ 0x258646,
+ 0x281b06,
+ 0x265cc6,
+ 0x397b8b,
+ 0x28e249,
+ 0x206505,
+ 0x388c07,
+ 0x20be46,
+ 0x2de086,
+ 0x280348,
+ 0x26c209,
+ 0x26d44c,
+ 0x2e26c8,
+ 0x34da06,
+ 0x344583,
+ 0x2aec06,
+ 0x282385,
+ 0x27a888,
+ 0x221f86,
+ 0x2a0ec8,
+ 0x242505,
+ 0x212785,
+ 0x2998c8,
+ 0x2300c7,
+ 0x37fa47,
+ 0x2a7bc7,
+ 0x227d08,
+ 0x28cd48,
+ 0x26a386,
+ 0x308f07,
+ 0x257147,
+ 0x28224a,
+ 0x247b03,
+ 0x2028c6,
+ 0x202d85,
+ 0x32df84,
+ 0x278609,
+ 0x2f0844,
+ 0x2239c4,
+ 0x29b084,
+ 0x29c64b,
+ 0x228a47,
+ 0x30ee05,
+ 0x291b08,
+ 0x276306,
+ 0x276308,
+ 0x279dc6,
+ 0x289145,
+ 0x289a85,
+ 0x28b4c6,
+ 0x28c808,
+ 0x28cac8,
+ 0x2787c6,
+ 0x29194f,
+ 0x295e50,
+ 0x399e85,
+ 0x201043,
+ 0x247645,
+ 0x2fdcc8,
+ 0x292949,
+ 0x24ad88,
+ 0x326148,
+ 0x37d988,
+ 0x228b07,
+ 0x275fc9,
+ 0x2a10c8,
+ 0x2d3d44,
+ 0x29af08,
+ 0x24f7c9,
+ 0x30d4c7,
+ 0x297c84,
+ 0x262688,
+ 0x23ae0a,
+ 0x2c45c6,
+ 0x27ba86,
+ 0x21b0c9,
+ 0x299fc7,
+ 0x2c5548,
+ 0x3999c8,
+ 0x3476c8,
+ 0x351005,
+ 0x37af45,
+ 0x23b245,
+ 0x2ce285,
+ 0x32b287,
+ 0x206ac5,
+ 0x2b9585,
+ 0x3a8606,
+ 0x24acc7,
+ 0x3744c7,
+ 0x23b106,
+ 0x2cac45,
+ 0x258646,
+ 0x2592c5,
+ 0x2b7dc8,
+ 0x324e04,
+ 0x3ae806,
+ 0x2e4684,
+ 0x2ff188,
+ 0x3ae90a,
+ 0x27904c,
+ 0x38c7c5,
+ 0x2bbac6,
+ 0x26d606,
+ 0x3297c6,
+ 0x2fdec4,
+ 0x328445,
+ 0x279c07,
+ 0x29a049,
+ 0x2a2707,
+ 0x685844,
+ 0x685844,
+ 0x309745,
+ 0x227084,
+ 0x29828a,
+ 0x276186,
+ 0x2e2b04,
+ 0x3b31c5,
+ 0x2f8f45,
+ 0x302904,
+ 0x281747,
+ 0x211447,
+ 0x2c4748,
+ 0x317c48,
+ 0x20dd89,
+ 0x32ee88,
+ 0x29844b,
+ 0x212404,
+ 0x35e3c5,
+ 0x27e085,
+ 0x2a7b49,
+ 0x26c209,
+ 0x2ed388,
+ 0x23da88,
+ 0x282a44,
+ 0x29b245,
+ 0x202c83,
+ 0x2123c5,
+ 0x2b8386,
+ 0x2924cc,
+ 0x217806,
+ 0x259306,
+ 0x292685,
+ 0x344a08,
+ 0x2eee46,
+ 0x2ee086,
+ 0x27ba86,
+ 0x2260cc,
+ 0x362284,
+ 0x358eca,
+ 0x3ae0c8,
+ 0x292307,
+ 0x23e586,
+ 0x3625c7,
+ 0x2de9c5,
+ 0x30fec6,
+ 0x34fbc6,
+ 0x37f907,
+ 0x223a04,
+ 0x2e1645,
+ 0x275c84,
+ 0x2c3147,
+ 0x275ec8,
+ 0x276a4a,
+ 0x27f587,
+ 0x237c07,
+ 0x2bf307,
+ 0x2cf649,
+ 0x2924ca,
+ 0x22aa43,
+ 0x223905,
+ 0x213143,
+ 0x309209,
+ 0x22e988,
+ 0x2d1047,
+ 0x24ae89,
+ 0x217886,
+ 0x2af648,
+ 0x2f2ac5,
+ 0x2848ca,
+ 0x321f89,
+ 0x26d109,
+ 0x375e47,
+ 0x283c89,
+ 0x213008,
+ 0x2ecb86,
+ 0x2bbc88,
+ 0x2104c7,
+ 0x22ab87,
+ 0x2d70c7,
+ 0x2d0ec8,
+ 0x2ec646,
+ 0x23abc5,
+ 0x279c07,
+ 0x293908,
+ 0x358d04,
+ 0x28c444,
+ 0x28d307,
+ 0x2acb07,
+ 0x341dca,
+ 0x2ecb06,
+ 0x2fa30a,
+ 0x2b8887,
+ 0x330607,
+ 0x235d84,
+ 0x374c84,
+ 0x22c5c6,
+ 0x3558c4,
+ 0x3558cc,
+ 0x3a8d05,
+ 0x214bc9,
+ 0x2f4d44,
+ 0x3029c5,
+ 0x2764c8,
+ 0x28cb85,
+ 0x31b806,
+ 0x20f544,
+ 0x298fca,
+ 0x2d2e46,
+ 0x28ceca,
+ 0x31b5c7,
+ 0x2c8ac5,
+ 0x21a9c5,
+ 0x22a00a,
+ 0x29f605,
+ 0x29d446,
+ 0x2bcbc4,
+ 0x2ae1c6,
+ 0x3332c5,
+ 0x222046,
+ 0x2e9acc,
+ 0x2c56ca,
+ 0x26c104,
+ 0x22adc6,
+ 0x299fc7,
+ 0x2c8e84,
+ 0x260788,
+ 0x38dc46,
+ 0x30ea49,
+ 0x2c2949,
+ 0x2ab1c9,
+ 0x372546,
+ 0x2105c6,
+ 0x2bbdc7,
+ 0x356008,
+ 0x2103c9,
+ 0x228a47,
+ 0x2b27c6,
+ 0x387387,
+ 0x37bd45,
+ 0x330844,
+ 0x2bb987,
+ 0x2f49c5,
+ 0x285fc5,
+ 0x33b2c7,
+ 0x2454c8,
+ 0x38b646,
+ 0x294bcd,
+ 0x29670f,
+ 0x29b90d,
+ 0x20bf44,
+ 0x231b46,
+ 0x2cc508,
+ 0x355385,
+ 0x282408,
+ 0x21854a,
+ 0x203204,
+ 0x3a5686,
+ 0x28bbc7,
+ 0x3a6207,
+ 0x29b449,
+ 0x2bbc45,
+ 0x302904,
+ 0x33040a,
+ 0x2b4549,
+ 0x283d87,
+ 0x269846,
+ 0x34d906,
+ 0x29b186,
+ 0x360546,
+ 0x2cbe8f,
+ 0x2cc3c9,
+ 0x212a46,
+ 0x3a6606,
+ 0x274d09,
+ 0x309007,
+ 0x214603,
+ 0x226246,
+ 0x20d383,
+ 0x2d5c08,
+ 0x3871c7,
+ 0x29db49,
+ 0x2a68c8,
+ 0x37fb88,
+ 0x267c06,
+ 0x240b09,
+ 0x2c7ac5,
+ 0x23e584,
+ 0x2eb447,
+ 0x26aa85,
+ 0x20bf44,
+ 0x30eec8,
+ 0x216dc4,
+ 0x3078c7,
+ 0x31a846,
+ 0x29e1c5,
+ 0x2a0788,
+ 0x34d7cb,
+ 0x336047,
+ 0x22a286,
+ 0x2bea04,
+ 0x31d686,
+ 0x2bdfc5,
+ 0x2f49c5,
+ 0x27dc09,
+ 0x281349,
+ 0x22abc4,
+ 0x22ac05,
+ 0x22ae05,
+ 0x284746,
+ 0x335c48,
+ 0x2b7106,
+ 0x329d0b,
+ 0x3003ca,
+ 0x2ff0c5,
+ 0x289b06,
+ 0x2f40c5,
+ 0x2065c5,
+ 0x2945c7,
+ 0x203888,
+ 0x2499c4,
+ 0x3617c6,
+ 0x28cb46,
+ 0x2131c7,
+ 0x2ffa44,
+ 0x27a706,
+ 0x36d285,
+ 0x36d289,
+ 0x2107c4,
+ 0x2a7249,
+ 0x2787c6,
+ 0x2ba188,
+ 0x22ae05,
+ 0x22cf45,
+ 0x222046,
+ 0x26d349,
+ 0x215a49,
+ 0x259386,
+ 0x28c148,
+ 0x264d48,
+ 0x2f4084,
+ 0x360a44,
+ 0x360a48,
+ 0x3164c8,
+ 0x249ac9,
+ 0x2b8306,
+ 0x27ba86,
+ 0x313c0d,
+ 0x37c806,
+ 0x2affc9,
+ 0x202a85,
+ 0x31c286,
+ 0x2546c8,
+ 0x30cdc5,
+ 0x257184,
+ 0x2bdfc5,
+ 0x280e88,
+ 0x298049,
+ 0x275d44,
+ 0x234b46,
+ 0x2e2f8a,
+ 0x369508,
+ 0x341f49,
+ 0x2de5ca,
+ 0x24ae06,
+ 0x2968c8,
+ 0x24bc05,
+ 0x321e08,
+ 0x2b3185,
+ 0x217749,
+ 0x366f89,
+ 0x228c42,
+ 0x2a4905,
+ 0x26e286,
+ 0x278707,
+ 0x3ace45,
+ 0x2e7706,
+ 0x2f7f88,
+ 0x2a53c6,
+ 0x2bff49,
+ 0x277746,
+ 0x2801c8,
+ 0x2a8885,
+ 0x246546,
+ 0x264088,
+ 0x280048,
+ 0x3a36c8,
+ 0x2fd4c8,
+ 0x20a204,
+ 0x22a803,
+ 0x2c0184,
+ 0x27b606,
+ 0x37bd84,
+ 0x2b80c7,
+ 0x2edf89,
+ 0x2be205,
+ 0x3999c6,
+ 0x226246,
+ 0x29070b,
+ 0x2ff006,
+ 0x317006,
+ 0x2c3688,
+ 0x23f046,
+ 0x2a6603,
+ 0x209fc3,
+ 0x330844,
+ 0x3abe05,
+ 0x387807,
+ 0x275ec8,
+ 0x275ecf,
+ 0x279b0b,
+ 0x335a48,
+ 0x234bc6,
+ 0x335d4e,
+ 0x222043,
+ 0x2db944,
+ 0x2fef85,
+ 0x300c06,
+ 0x28aa8b,
+ 0x28e786,
+ 0x217089,
+ 0x29e1c5,
+ 0x38a288,
+ 0x20d588,
+ 0x21590c,
+ 0x29c686,
+ 0x212406,
+ 0x2d4005,
+ 0x286a88,
+ 0x24b145,
+ 0x338948,
+ 0x29bd4a,
+ 0x35e8c9,
+ 0x685844,
+ 0x2f604a82,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x249943,
+ 0x2257c3,
+ 0x224283,
+ 0x224284,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2afc84,
+ 0x2d9d43,
+ 0x3ad107,
+ 0x219bc3,
+ 0x202883,
+ 0x251b48,
+ 0x2257c3,
+ 0x2db58b,
+ 0x2df103,
+ 0x23d1c6,
+ 0x201582,
+ 0x385c4b,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x2257c3,
+ 0x29ca03,
+ 0x206883,
+ 0x200882,
+ 0x894c8,
+ 0x354045,
+ 0x2d3b88,
+ 0x2d88c8,
+ 0x204a82,
+ 0x365cc5,
+ 0x33f707,
+ 0x200202,
+ 0x23ca07,
+ 0x2095c2,
+ 0x237647,
+ 0x265389,
+ 0x3173c8,
+ 0x347549,
+ 0x331282,
+ 0x2672c7,
+ 0x259104,
+ 0x33f7c7,
+ 0x3002c7,
+ 0x23f402,
+ 0x219bc3,
+ 0x20dc02,
+ 0x201cc2,
+ 0x2016c2,
+ 0x200ac2,
+ 0x2058c2,
+ 0x2057c2,
+ 0x2a8405,
+ 0x20a045,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x481,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x249943,
+ 0x2257c3,
+ 0x20f0c3,
+ 0x3216cdc6,
+ 0x110083,
+ 0x7efc5,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x9f82,
+ 0x894c8,
+ 0x3f5c4,
+ 0xcf905,
+ 0x200882,
+ 0x2bb244,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x268583,
+ 0x2a8f85,
+ 0x202503,
+ 0x332283,
+ 0x249943,
+ 0x209583,
+ 0x2257c3,
+ 0x2161c3,
+ 0x224303,
+ 0x224043,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x204a82,
+ 0x2257c3,
+ 0x894c8,
+ 0x2d9d43,
+ 0x894c8,
+ 0x2c69c3,
+ 0x258403,
+ 0x22ec84,
+ 0x230743,
+ 0x2d9d43,
+ 0x20b9c2,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x20b9c2,
+ 0x230c43,
+ 0x249943,
+ 0x2257c3,
+ 0x2d8843,
+ 0x2161c3,
+ 0x200882,
+ 0x204a82,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x23d1c5,
+ 0xacec6,
+ 0x224284,
+ 0x201582,
+ 0x894c8,
+ 0x200882,
+ 0x1b788,
+ 0x204a82,
+ 0xe386,
+ 0x63604,
+ 0x11bb0b,
+ 0x1d786,
+ 0x63007,
+ 0x230743,
+ 0x2d9d43,
+ 0x158485,
+ 0x127784,
+ 0x262383,
+ 0x47ac7,
+ 0xcdec4,
+ 0x249943,
+ 0x132d84,
+ 0x2257c3,
+ 0x2dfdc4,
+ 0x1473c8,
+ 0x152dc6,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x202883,
+ 0x2257c3,
+ 0x2df103,
+ 0x201582,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201103,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2afc84,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x23d1c6,
+ 0x230743,
+ 0x2d9d43,
+ 0x175583,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x63007,
+ 0x894c8,
+ 0x2d9d43,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x38a58403,
+ 0x230743,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x200882,
+ 0x204a82,
+ 0x258403,
+ 0x2d9d43,
+ 0x249943,
+ 0x2016c2,
+ 0x2257c3,
+ 0x308207,
+ 0x2f538b,
+ 0x206603,
+ 0x22c1c8,
+ 0x355d87,
+ 0x2b76c6,
+ 0x2bc8c5,
+ 0x2f7b09,
+ 0x23cf48,
+ 0x311cc9,
+ 0x311cd0,
+ 0x35a5cb,
+ 0x2e8b89,
+ 0x204903,
+ 0x3a8809,
+ 0x22f786,
+ 0x22f78c,
+ 0x311ec8,
+ 0x3ae5c8,
+ 0x274009,
+ 0x29ce4e,
+ 0x37880b,
+ 0x27c20c,
+ 0x203803,
+ 0x25dfcc,
+ 0x207209,
+ 0x3736c7,
+ 0x23068c,
+ 0x39baca,
+ 0x2054c4,
+ 0x3a398d,
+ 0x25de88,
+ 0x22830d,
+ 0x2692c6,
+ 0x2975cb,
+ 0x3532c9,
+ 0x316ec7,
+ 0x31d846,
+ 0x322309,
+ 0x33264a,
+ 0x301708,
+ 0x2ded04,
+ 0x35eb47,
+ 0x275547,
+ 0x347a44,
+ 0x226d04,
+ 0x2615c9,
+ 0x2e7ac9,
+ 0x3114c8,
+ 0x20ffc5,
+ 0x392805,
+ 0x20cc06,
+ 0x3a3849,
+ 0x2187cd,
+ 0x27bc88,
+ 0x20cb07,
+ 0x2bc948,
+ 0x27d286,
+ 0x3a2044,
+ 0x37b205,
+ 0x204506,
+ 0x206704,
+ 0x207107,
+ 0x209bca,
+ 0x212f44,
+ 0x2169c6,
+ 0x2173c9,
+ 0x2173cf,
+ 0x217c0d,
+ 0x218146,
+ 0x21b390,
+ 0x21b786,
+ 0x21bec7,
+ 0x21c4c7,
+ 0x21c4cf,
+ 0x21dc89,
+ 0x221946,
+ 0x2246c7,
+ 0x2246c8,
+ 0x225449,
+ 0x28e488,
+ 0x2d5747,
+ 0x20b803,
+ 0x375746,
+ 0x2e1788,
+ 0x29d10a,
+ 0x21a2c9,
+ 0x20d883,
+ 0x33f606,
+ 0x36160a,
+ 0x2f6307,
+ 0x37350a,
+ 0x3a9dce,
+ 0x21ddc6,
+ 0x2a4b07,
+ 0x212046,
+ 0x2072c6,
+ 0x37ad4b,
+ 0x3b058a,
+ 0x2232cd,
+ 0x210687,
+ 0x355548,
+ 0x355549,
+ 0x35554f,
+ 0x205e8c,
+ 0x27ab09,
+ 0x3772ce,
+ 0x3ad20a,
+ 0x24c486,
+ 0x2ff406,
+ 0x30238c,
+ 0x3043cc,
+ 0x30e188,
+ 0x339a47,
+ 0x211a85,
+ 0x208a84,
+ 0x2531ce,
+ 0x3328c4,
+ 0x22b747,
+ 0x25f88a,
+ 0x369f14,
+ 0x36f74f,
+ 0x21c688,
+ 0x375608,
+ 0x36becd,
+ 0x36bece,
+ 0x380289,
+ 0x392988,
+ 0x39298f,
+ 0x23038c,
+ 0x23038f,
+ 0x231887,
+ 0x2336ca,
+ 0x21ac8b,
+ 0x235208,
+ 0x236407,
+ 0x259ccd,
+ 0x20ab46,
+ 0x3a3b46,
+ 0x239589,
+ 0x306248,
+ 0x23d548,
+ 0x23d54e,
+ 0x2f5487,
+ 0x2a9985,
+ 0x23ee45,
+ 0x20a884,
+ 0x2b7986,
+ 0x3113c8,
+ 0x2527c3,
+ 0x20524e,
+ 0x25a088,
+ 0x22784b,
+ 0x33fd07,
+ 0x3a3085,
+ 0x25e146,
+ 0x2aa9c7,
+ 0x2e6888,
+ 0x24ab09,
+ 0x292f85,
+ 0x2852c8,
+ 0x218ac6,
+ 0x37b9ca,
+ 0x2530c9,
+ 0x230749,
+ 0x23074b,
+ 0x323fc8,
+ 0x347909,
+ 0x210086,
+ 0x3591ca,
+ 0x2b5b8a,
+ 0x2338cc,
+ 0x340647,
+ 0x2a010a,
+ 0x35ef4b,
+ 0x35ef59,
+ 0x2dc808,
+ 0x23d245,
+ 0x259e86,
+ 0x2d9949,
+ 0x3178c6,
+ 0x2156ca,
+ 0x262e86,
+ 0x213544,
+ 0x2c0bcd,
+ 0x305d07,
+ 0x213549,
+ 0x241585,
+ 0x2416c8,
+ 0x242009,
+ 0x242244,
+ 0x242947,
+ 0x242948,
+ 0x2432c7,
+ 0x265948,
+ 0x2480c7,
+ 0x240845,
+ 0x25118c,
+ 0x251849,
+ 0x35b0ca,
+ 0x38e889,
+ 0x3a8909,
+ 0x26f90c,
+ 0x2587cb,
+ 0x258a88,
+ 0x25a708,
+ 0x25dac4,
+ 0x282bc8,
+ 0x283f49,
+ 0x39bb87,
+ 0x217606,
+ 0x23bb87,
+ 0x377089,
+ 0x34028b,
+ 0x327f47,
+ 0x36c507,
+ 0x2f4dc7,
+ 0x228284,
+ 0x228285,
+ 0x2a7845,
+ 0x3355cb,
+ 0x3989c4,
+ 0x318a88,
+ 0x2a958a,
+ 0x218b87,
+ 0x34d287,
+ 0x28a312,
+ 0x283986,
+ 0x2e0006,
+ 0x32704e,
+ 0x285a46,
+ 0x28f748,
+ 0x29020f,
+ 0x2286c8,
+ 0x286508,
+ 0x2b400a,
+ 0x2b4011,
+ 0x2a038e,
+ 0x23670a,
+ 0x23670c,
+ 0x2348c7,
+ 0x392b90,
+ 0x203b08,
+ 0x2a0585,
+ 0x2aae8a,
+ 0x20674c,
+ 0x2b2b0d,
+ 0x2abb46,
+ 0x2abb47,
+ 0x2abb4c,
+ 0x2f00cc,
+ 0x2d814c,
+ 0x28d70b,
+ 0x284c84,
+ 0x21b244,
+ 0x372689,
+ 0x2daac7,
+ 0x2e58c9,
+ 0x2b59c9,
+ 0x366687,
+ 0x39b946,
+ 0x39b949,
+ 0x3a51c3,
+ 0x2a54ca,
+ 0x208cc7,
+ 0x309ecb,
+ 0x22314a,
+ 0x237784,
+ 0x351606,
+ 0x27f809,
+ 0x31ca44,
+ 0x3a8dca,
+ 0x2e78c5,
+ 0x2b5e05,
+ 0x2b5e0d,
+ 0x2b614e,
+ 0x28f285,
+ 0x315286,
+ 0x23cdc7,
+ 0x2688ca,
+ 0x2e6a86,
+ 0x319bc4,
+ 0x314e87,
+ 0x219a8b,
+ 0x27d347,
+ 0x359404,
+ 0x24fdc6,
+ 0x24fdcd,
+ 0x23478c,
+ 0x325dc6,
+ 0x27be8a,
+ 0x20c646,
+ 0x2146c8,
+ 0x21e447,
+ 0x26834a,
+ 0x37c606,
+ 0x210583,
+ 0x254846,
+ 0x2015c8,
+ 0x29864a,
+ 0x268fc7,
+ 0x268fc8,
+ 0x26e6c4,
+ 0x283187,
+ 0x2c0488,
+ 0x2127c8,
+ 0x3a6708,
+ 0x28810a,
+ 0x2cf185,
+ 0x2c7707,
+ 0x236553,
+ 0x258486,
+ 0x2d2fc8,
+ 0x21fcc9,
+ 0x23c8c8,
+ 0x267c8b,
+ 0x2b8688,
+ 0x219bc4,
+ 0x2999c6,
+ 0x3b23c6,
+ 0x2d6a09,
+ 0x385687,
+ 0x251288,
+ 0x3ae246,
+ 0x21f4c4,
+ 0x2c5405,
+ 0x2bf148,
+ 0x2bfa0a,
+ 0x2c0848,
+ 0x2c5b46,
+ 0x298d4a,
+ 0x2334c8,
+ 0x2c8c88,
+ 0x2ca008,
+ 0x2ca906,
+ 0x2cc706,
+ 0x31dd4c,
+ 0x2ccc90,
+ 0x288885,
+ 0x2284c8,
+ 0x2f8490,
+ 0x2284d0,
+ 0x311b4e,
+ 0x31d9ce,
+ 0x31d9d4,
+ 0x32418f,
+ 0x324546,
+ 0x347e51,
+ 0x306413,
+ 0x306888,
+ 0x31d1c5,
+ 0x3587c8,
+ 0x20e545,
+ 0x228fcc,
+ 0x249d89,
+ 0x22b589,
+ 0x23b907,
+ 0x21a5c9,
+ 0x305f47,
+ 0x3af506,
+ 0x37b007,
+ 0x253945,
+ 0x2e5ac3,
+ 0x252989,
+ 0x223689,
+ 0x375583,
+ 0x3acd44,
+ 0x325a0d,
+ 0x37e40f,
+ 0x33b205,
+ 0x3194c6,
+ 0x213807,
+ 0x3b09c7,
+ 0x287686,
+ 0x28768b,
+ 0x2a21c5,
+ 0x256946,
+ 0x20bb87,
+ 0x26ed49,
+ 0x328c86,
+ 0x200d85,
+ 0x22020b,
+ 0x268606,
+ 0x242fc5,
+ 0x28b888,
+ 0x2b5248,
+ 0x2b66cc,
+ 0x2b66d0,
+ 0x2cae09,
+ 0x2f6b87,
+ 0x2d480b,
+ 0x2d4346,
+ 0x2d560a,
+ 0x2d678b,
+ 0x2d730a,
+ 0x2d7586,
+ 0x2d8705,
+ 0x355c86,
+ 0x277908,
+ 0x23b9ca,
+ 0x36bb5c,
+ 0x2df1cc,
+ 0x2df4c8,
+ 0x23d1c5,
+ 0x2e1c47,
+ 0x29ca86,
+ 0x399805,
+ 0x219e86,
+ 0x287848,
+ 0x2b47c7,
+ 0x29cd48,
+ 0x2a4c0a,
+ 0x32268c,
+ 0x322909,
+ 0x399b47,
+ 0x204b04,
+ 0x23f786,
+ 0x28608a,
+ 0x2b5ac5,
+ 0x364b4c,
+ 0x37d1c8,
+ 0x34bb08,
+ 0x20558c,
+ 0x20f98c,
+ 0x210949,
+ 0x210b87,
+ 0x2af94c,
+ 0x377784,
+ 0x339d4a,
+ 0x31cd4c,
+ 0x27018b,
+ 0x23588b,
+ 0x236286,
+ 0x238247,
+ 0x238dc7,
+ 0x392dcf,
+ 0x2f1211,
+ 0x3b2cd2,
+ 0x238dcd,
+ 0x238dce,
+ 0x23910e,
+ 0x324348,
+ 0x324352,
+ 0x23c4c8,
+ 0x2fb047,
+ 0x245f4a,
+ 0x20d0c8,
+ 0x285a05,
+ 0x32b0ca,
+ 0x21bcc7,
+ 0x2e1944,
+ 0x2636c3,
+ 0x31e545,
+ 0x2b4287,
+ 0x2f2187,
+ 0x2b2d0e,
+ 0x35db8d,
+ 0x36abc9,
+ 0x210ec5,
+ 0x39c543,
+ 0x252106,
+ 0x36aa45,
+ 0x273a48,
+ 0x2b1149,
+ 0x259ec5,
+ 0x259ecf,
+ 0x2d8547,
+ 0x2f7a45,
+ 0x3a058a,
+ 0x39a146,
+ 0x239c09,
+ 0x2e878c,
+ 0x2eab49,
+ 0x203d46,
+ 0x2a938c,
+ 0x2eb806,
+ 0x2eefc8,
+ 0x2ef1c6,
+ 0x2dc986,
+ 0x305a84,
+ 0x258e03,
+ 0x2ec3ca,
+ 0x321651,
+ 0x27acca,
+ 0x3644c5,
+ 0x38e287,
+ 0x24d547,
+ 0x2c0584,
+ 0x2c058b,
+ 0x317248,
+ 0x2b1d06,
+ 0x22cd85,
+ 0x25f344,
+ 0x26bd09,
+ 0x275284,
+ 0x3b0e87,
+ 0x2efd05,
+ 0x2efd07,
+ 0x327285,
+ 0x2a84c3,
+ 0x2faf08,
+ 0x32820a,
+ 0x23b0c3,
+ 0x35408a,
+ 0x26f786,
+ 0x259c4f,
+ 0x356e89,
+ 0x2051d0,
+ 0x2df9c8,
+ 0x2c6049,
+ 0x297e87,
+ 0x24fd4f,
+ 0x24b244,
+ 0x2afd04,
+ 0x21b606,
+ 0x2342c6,
+ 0x314bca,
+ 0x380786,
+ 0x3450c7,
+ 0x2f7148,
+ 0x2f7347,
+ 0x2f7d47,
+ 0x348a4a,
+ 0x2fad4b,
+ 0x27ce85,
+ 0x3b2908,
+ 0x22b843,
+ 0x36d5cc,
+ 0x21180f,
+ 0x26090d,
+ 0x2bc047,
+ 0x36ad09,
+ 0x22ca87,
+ 0x258ec8,
+ 0x36a10c,
+ 0x26b0c8,
+ 0x24cbc8,
+ 0x30a7ce,
+ 0x3202d4,
+ 0x3207e4,
+ 0x33cf0a,
+ 0x35aa0b,
+ 0x306004,
+ 0x306009,
+ 0x3a5708,
+ 0x23f945,
+ 0x2522ca,
+ 0x265247,
+ 0x2ee604,
+ 0x323743,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x219bc3,
+ 0x2ccc86,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x200882,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2d9d43,
+ 0x202503,
+ 0x2ccc86,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x7112c8,
+ 0x201742,
+ 0x200482,
+ 0x204a82,
+ 0x258403,
+ 0x201e02,
+ 0x201042,
+ 0x201104,
+ 0x30ac84,
+ 0x218f82,
+ 0x2021c4,
+ 0x2016c2,
+ 0x2257c3,
+ 0x219683,
+ 0x236286,
+ 0x21ce42,
+ 0x203d02,
+ 0x21a642,
+ 0x3b21fc03,
+ 0x3b606343,
+ 0x4df06,
+ 0x4df06,
+ 0x224284,
+ 0xf5dcc,
+ 0x18ce0c,
+ 0x7edcd,
+ 0xd9547,
+ 0x182c8,
+ 0x1e808,
+ 0x1a7e8a,
+ 0x3c2fc045,
+ 0x11fe09,
+ 0x153b08,
+ 0x19ec8a,
+ 0x190d0e,
+ 0x143c6cb,
+ 0x63604,
+ 0x1702c8,
+ 0x7a287,
+ 0x1a8387,
+ 0x10d109,
+ 0x11cac7,
+ 0x132948,
+ 0x1a2e49,
+ 0x12ba85,
+ 0x53e0e,
+ 0xa88cd,
+ 0x62e88,
+ 0x3c665e86,
+ 0x5ec87,
+ 0x5f747,
+ 0x68787,
+ 0x6df87,
+ 0xcbc2,
+ 0x122d07,
+ 0x1b074c,
+ 0xe94c7,
+ 0x8eb46,
+ 0xa3609,
+ 0xa5ec8,
+ 0xd4c2,
+ 0x1042,
+ 0x17dc0b,
+ 0x13349,
+ 0x3f209,
+ 0x295c8,
+ 0xaf102,
+ 0x40ec9,
+ 0xcd649,
+ 0xce408,
+ 0xce947,
+ 0xcf109,
+ 0xd1dc5,
+ 0xd21d0,
+ 0x19a286,
+ 0x555c5,
+ 0x23ccd,
+ 0x11c686,
+ 0xda487,
+ 0xdfdd8,
+ 0x156288,
+ 0x1a080a,
+ 0x162d0d,
+ 0x3e42,
+ 0x7e246,
+ 0x8ad48,
+ 0x174a08,
+ 0x89389,
+ 0x42b08,
+ 0x4e10e,
+ 0xe6f85,
+ 0x4a148,
+ 0x35c2,
+ 0x152dc6,
+ 0x6c2,
+ 0xc01,
+ 0x3cae0644,
+ 0x3ce91043,
+ 0x141,
+ 0x17d386,
+ 0x141,
+ 0x1,
+ 0x17d386,
+ 0x1564a45,
+ 0x2054c4,
+ 0x258403,
+ 0x2446c4,
+ 0x201104,
+ 0x249943,
+ 0x21fb85,
+ 0x20f0c3,
+ 0x244443,
+ 0x2e82c5,
+ 0x224043,
+ 0x3de58403,
+ 0x230743,
+ 0x2d9d43,
+ 0x200041,
+ 0x219bc3,
+ 0x30ac84,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+ 0x894c8,
+ 0x200882,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x201042,
+ 0x201104,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x224043,
+ 0x894c8,
+ 0x38c082,
+ 0x4a82,
+ 0xf750e,
+ 0x3ee00142,
+ 0x274948,
+ 0x2221c6,
+ 0x2634c6,
+ 0x221b47,
+ 0x3f207d42,
+ 0x3f756d08,
+ 0x20828a,
+ 0x25e708,
+ 0x200dc2,
+ 0x208b09,
+ 0x27cec7,
+ 0x217586,
+ 0x207c49,
+ 0x24f9c4,
+ 0x2b75c6,
+ 0x2d7804,
+ 0x276684,
+ 0x250689,
+ 0x354786,
+ 0x20a105,
+ 0x380a85,
+ 0x383a87,
+ 0x2b8b07,
+ 0x3809c4,
+ 0x221d86,
+ 0x2fe205,
+ 0x2e13c5,
+ 0x2f4005,
+ 0x3925c7,
+ 0x33fb45,
+ 0x307409,
+ 0x30f905,
+ 0x2cfec4,
+ 0x2e69c7,
+ 0x24b3ce,
+ 0x261c49,
+ 0x326f09,
+ 0x346646,
+ 0x33c4c8,
+ 0x2ae2cb,
+ 0x2d158c,
+ 0x25a586,
+ 0x3786c7,
+ 0x20a605,
+ 0x226d0a,
+ 0x31b0c9,
+ 0x24a909,
+ 0x295506,
+ 0x2edd05,
+ 0x34bf85,
+ 0x363c49,
+ 0x2f418b,
+ 0x279f46,
+ 0x330ac6,
+ 0x20cb04,
+ 0x289fc6,
+ 0x2a9a08,
+ 0x201446,
+ 0x3a3e46,
+ 0x209648,
+ 0x209e47,
+ 0x20a389,
+ 0x20b085,
+ 0x894c8,
+ 0x3a8304,
+ 0x37c344,
+ 0x211605,
+ 0x395c09,
+ 0x21ef07,
+ 0x21ef0b,
+ 0x22108a,
+ 0x226a05,
+ 0x3fa0b802,
+ 0x223007,
+ 0x3fe28d88,
+ 0x285007,
+ 0x354ac5,
+ 0x23458a,
+ 0x4a82,
+ 0x3aa0cb,
+ 0x3874ca,
+ 0x21fe86,
+ 0x3a3083,
+ 0x32dc0d,
+ 0x363e4c,
+ 0x3a214d,
+ 0x3828c5,
+ 0x238985,
+ 0x252807,
+ 0x201e09,
+ 0x208186,
+ 0x380605,
+ 0x2f1bc8,
+ 0x289ec3,
+ 0x2d8bc8,
+ 0x289ec8,
+ 0x2bd3c7,
+ 0x3b1588,
+ 0x200b09,
+ 0x232687,
+ 0x2f4f07,
+ 0x2f4688,
+ 0x3a9944,
+ 0x3a9947,
+ 0x2691c8,
+ 0x204786,
+ 0x37e78f,
+ 0x221407,
+ 0x2d58c6,
+ 0x259045,
+ 0x220803,
+ 0x36dd07,
+ 0x368c83,
+ 0x243486,
+ 0x245306,
+ 0x2466c6,
+ 0x28df45,
+ 0x265943,
+ 0x388ac8,
+ 0x36b4c9,
+ 0x37ef8b,
+ 0x246848,
+ 0x247d85,
+ 0x248d45,
+ 0x40237842,
+ 0x37b0c9,
+ 0x201187,
+ 0x2569c5,
+ 0x250587,
+ 0x255b46,
+ 0x360405,
+ 0x36a88b,
+ 0x258a84,
+ 0x25e2c5,
+ 0x25e407,
+ 0x273986,
+ 0x275385,
+ 0x282dc7,
+ 0x283347,
+ 0x26f744,
+ 0x288a8a,
+ 0x288f48,
+ 0x24bc89,
+ 0x2ee845,
+ 0x332f86,
+ 0x2a9bca,
+ 0x3aa306,
+ 0x20c4c7,
+ 0x31754d,
+ 0x22c6c9,
+ 0x24c745,
+ 0x253647,
+ 0x2637c8,
+ 0x263e48,
+ 0x311807,
+ 0x323086,
+ 0x2101c7,
+ 0x244c03,
+ 0x3355c4,
+ 0x35ce05,
+ 0x38d347,
+ 0x391fc9,
+ 0x21a048,
+ 0x22a6c5,
+ 0x2e62c4,
+ 0x36a3c5,
+ 0x23fccd,
+ 0x204042,
+ 0x302d86,
+ 0x27e186,
+ 0x2a3a8a,
+ 0x363386,
+ 0x374405,
+ 0x317d45,
+ 0x317d47,
+ 0x37b80c,
+ 0x271b8a,
+ 0x289c86,
+ 0x208945,
+ 0x289e06,
+ 0x28a147,
+ 0x28bd86,
+ 0x28de4c,
+ 0x207d89,
+ 0x4077d807,
+ 0x2905c5,
+ 0x2905c6,
+ 0x290ac8,
+ 0x2b0c05,
+ 0x2a29c5,
+ 0x2a2c08,
+ 0x2a2e0a,
+ 0x40a6b682,
+ 0x40e0f402,
+ 0x37ff85,
+ 0x2d5843,
+ 0x2e5c88,
+ 0x21d543,
+ 0x2a3084,
+ 0x239d4b,
+ 0x35edc8,
+ 0x2a6d08,
+ 0x41329249,
+ 0x2a8109,
+ 0x2a87c6,
+ 0x2aa648,
+ 0x2aa849,
+ 0x2ab406,
+ 0x2ab585,
+ 0x381146,
+ 0x2ac0c9,
+ 0x31b987,
+ 0x246406,
+ 0x2d9007,
+ 0x208007,
+ 0x240204,
+ 0x416fb349,
+ 0x2c4408,
+ 0x356c08,
+ 0x33e947,
+ 0x2bedc6,
+ 0x33b409,
+ 0x263487,
+ 0x341a8a,
+ 0x381908,
+ 0x3231c7,
+ 0x3330c6,
+ 0x260c0a,
+ 0x2488c8,
+ 0x28bec5,
+ 0x225b85,
+ 0x2d38c7,
+ 0x2d4fc9,
+ 0x2d64cb,
+ 0x2e9908,
+ 0x30f989,
+ 0x246b47,
+ 0x3aef0c,
+ 0x2b07cc,
+ 0x2b0aca,
+ 0x2b0d4c,
+ 0x2bc388,
+ 0x2bc588,
+ 0x2bc784,
+ 0x2bcb49,
+ 0x2bcd89,
+ 0x2bcfca,
+ 0x2bd249,
+ 0x2bd587,
+ 0x20010c,
+ 0x22bd06,
+ 0x273dc8,
+ 0x3aa3c6,
+ 0x386986,
+ 0x24c647,
+ 0x311988,
+ 0x254ecb,
+ 0x284ec7,
+ 0x2ed9c9,
+ 0x244849,
+ 0x250247,
+ 0x2d7a44,
+ 0x3608c7,
+ 0x329b86,
+ 0x216386,
+ 0x27c045,
+ 0x2cd448,
+ 0x20e444,
+ 0x20e446,
+ 0x271a4b,
+ 0x2a57c9,
+ 0x261a86,
+ 0x31bf49,
+ 0x392746,
+ 0x2ff808,
+ 0x23e383,
+ 0x20bac5,
+ 0x3aa509,
+ 0x3b1745,
+ 0x2f2904,
+ 0x272fc6,
+ 0x221805,
+ 0x2da246,
+ 0x2fc947,
+ 0x340546,
+ 0x296a4b,
+ 0x3590c7,
+ 0x2d0846,
+ 0x372806,
+ 0x383b46,
+ 0x380989,
+ 0x26a48a,
+ 0x2b4f05,
+ 0x22634d,
+ 0x2a2f06,
+ 0x3a0a06,
+ 0x2df8c6,
+ 0x214645,
+ 0x2d24c7,
+ 0x29a587,
+ 0x29e54e,
+ 0x219bc3,
+ 0x2bed89,
+ 0x317a89,
+ 0x227107,
+ 0x2794c7,
+ 0x2a4685,
+ 0x30ffc5,
+ 0x41a6170f,
+ 0x2c6287,
+ 0x2c6448,
+ 0x2c6b44,
+ 0x2c6e46,
+ 0x41e272c2,
+ 0x2cab86,
+ 0x2ccc86,
+ 0x25520e,
+ 0x2d8a0a,
+ 0x222b06,
+ 0x3a60ca,
+ 0x201c09,
+ 0x315a85,
+ 0x3941c8,
+ 0x3aedc6,
+ 0x31bd88,
+ 0x321c88,
+ 0x2597cb,
+ 0x221c45,
+ 0x33fbc8,
+ 0x20978c,
+ 0x354987,
+ 0x245e86,
+ 0x27d4c8,
+ 0x2b7848,
+ 0x4220ba82,
+ 0x36480b,
+ 0x2aedc9,
+ 0x365209,
+ 0x20a787,
+ 0x31c4c8,
+ 0x4260b288,
+ 0x3aab8b,
+ 0x229449,
+ 0x21e14d,
+ 0x212b48,
+ 0x29a9c8,
+ 0x42a02542,
+ 0x3a4104,
+ 0x42e05842,
+ 0x2ea586,
+ 0x432011c2,
+ 0x21f50a,
+ 0x352ec6,
+ 0x229a88,
+ 0x33c7c8,
+ 0x2b74c6,
+ 0x385fc6,
+ 0x2e4906,
+ 0x227a05,
+ 0x235b84,
+ 0x436ff784,
+ 0x336c86,
+ 0x26ddc7,
+ 0x43a2e647,
+ 0x2ebbcb,
+ 0x24b789,
+ 0x2389ca,
+ 0x254ac4,
+ 0x317e88,
+ 0x2461cd,
+ 0x2ddb49,
+ 0x2ddd88,
+ 0x2de849,
+ 0x2dfdc4,
+ 0x207b44,
+ 0x26ad85,
+ 0x309c8b,
+ 0x35ed46,
+ 0x336ac5,
+ 0x354c49,
+ 0x221e48,
+ 0x29de04,
+ 0x226e89,
+ 0x2ae605,
+ 0x2b8b48,
+ 0x2f55c7,
+ 0x327308,
+ 0x27fa06,
+ 0x222ec7,
+ 0x28f509,
+ 0x220389,
+ 0x243045,
+ 0x32dec5,
+ 0x43e24902,
+ 0x2e6784,
+ 0x22e005,
+ 0x29fec6,
+ 0x2e7645,
+ 0x248e07,
+ 0x269945,
+ 0x2699c4,
+ 0x346706,
+ 0x380687,
+ 0x23ef06,
+ 0x376fc5,
+ 0x31d008,
+ 0x2223c5,
+ 0x332207,
+ 0x39a449,
+ 0x2a590a,
+ 0x27afc7,
+ 0x27afcc,
+ 0x20a0c6,
+ 0x225649,
+ 0x377bc5,
+ 0x32b8c8,
+ 0x210043,
+ 0x210045,
+ 0x2e5805,
+ 0x251687,
+ 0x442121c2,
+ 0x2385c7,
+ 0x2e0ac6,
+ 0x2fb286,
+ 0x2e7086,
+ 0x2b7786,
+ 0x2d1bc8,
+ 0x358905,
+ 0x2d5987,
+ 0x2d598d,
+ 0x2636c3,
+ 0x3a3485,
+ 0x3a0347,
+ 0x385b08,
+ 0x39ff05,
+ 0x340048,
+ 0x22e446,
+ 0x31ffc7,
+ 0x2be3c5,
+ 0x221cc6,
+ 0x36c205,
+ 0x2bb2ca,
+ 0x2eb286,
+ 0x232a07,
+ 0x2c5e05,
+ 0x2fe5c7,
+ 0x314e04,
+ 0x2f2886,
+ 0x300f85,
+ 0x35458b,
+ 0x329a09,
+ 0x23ebca,
+ 0x2430c8,
+ 0x3312c8,
+ 0x333b0c,
+ 0x334047,
+ 0x335848,
+ 0x3507c8,
+ 0x35adc5,
+ 0x2bd80a,
+ 0x39c549,
+ 0x44601d42,
+ 0x204386,
+ 0x204f44,
+ 0x204f49,
+ 0x220e49,
+ 0x339607,
+ 0x270e07,
+ 0x2b5849,
+ 0x214848,
+ 0x21484f,
+ 0x33dbc6,
+ 0x3535cb,
+ 0x2e8105,
+ 0x2e8107,
+ 0x2e8549,
+ 0x226e06,
+ 0x226e07,
+ 0x3b3045,
+ 0x22f2c4,
+ 0x268206,
+ 0x200c44,
+ 0x30d607,
+ 0x2eb608,
+ 0x44aedc08,
+ 0x2ee205,
+ 0x2ee347,
+ 0x249749,
+ 0x2a4384,
+ 0x3b3308,
+ 0x44f8c408,
+ 0x2c0584,
+ 0x22fc08,
+ 0x31d904,
+ 0x21e9c9,
+ 0x35c705,
+ 0x45201582,
+ 0x33dc05,
+ 0x21bfc5,
+ 0x247888,
+ 0x2316c7,
+ 0x45606882,
+ 0x2c8905,
+ 0x24e446,
+ 0x266ac6,
+ 0x2e6748,
+ 0x32e888,
+ 0x2e7606,
+ 0x2ead46,
+ 0x20ee89,
+ 0x2fb1c6,
+ 0x30564b,
+ 0x24ddc5,
+ 0x20d006,
+ 0x380448,
+ 0x20ac46,
+ 0x292e06,
+ 0x21938a,
+ 0x25784a,
+ 0x23ffc5,
+ 0x3589c7,
+ 0x344786,
+ 0x45a01502,
+ 0x3a0487,
+ 0x22d205,
+ 0x2a9b44,
+ 0x2a9b45,
+ 0x2549c6,
+ 0x272447,
+ 0x204cc5,
+ 0x220f04,
+ 0x2732c8,
+ 0x292ec5,
+ 0x291347,
+ 0x2cefc5,
+ 0x215305,
+ 0x244e84,
+ 0x28d949,
+ 0x2fe048,
+ 0x30f506,
+ 0x3a8546,
+ 0x2c0286,
+ 0x45fa6b08,
+ 0x2f2007,
+ 0x2f234d,
+ 0x2f2d0c,
+ 0x2f3309,
+ 0x2f3549,
+ 0x4634f642,
+ 0x3a4f83,
+ 0x2049c3,
+ 0x329c45,
+ 0x38d44a,
+ 0x319a06,
+ 0x2f98c5,
+ 0x2fce84,
+ 0x2fce8b,
+ 0x30b64c,
+ 0x30be8c,
+ 0x30c195,
+ 0x30cb4d,
+ 0x31244f,
+ 0x312812,
+ 0x312c8f,
+ 0x313052,
+ 0x3134d3,
+ 0x31398d,
+ 0x313f4d,
+ 0x3142ce,
+ 0x31478e,
+ 0x31504c,
+ 0x31540c,
+ 0x31584b,
+ 0x315bce,
+ 0x318c92,
+ 0x3197cc,
+ 0x319d50,
+ 0x32bbd2,
+ 0x32cb8c,
+ 0x32d24d,
+ 0x32d58c,
+ 0x32fa91,
+ 0x330c4d,
+ 0x33420d,
+ 0x33480a,
+ 0x334a8c,
+ 0x33538c,
+ 0x3367cc,
+ 0x33704c,
+ 0x339fd3,
+ 0x33a5d0,
+ 0x33a9d0,
+ 0x33b64d,
+ 0x33bc4c,
+ 0x33cc49,
+ 0x33eb0d,
+ 0x33ee53,
+ 0x340f91,
+ 0x3413d3,
+ 0x3423cf,
+ 0x34278c,
+ 0x342a8f,
+ 0x342e4d,
+ 0x34344f,
+ 0x343810,
+ 0x34428e,
+ 0x34858e,
+ 0x348cd0,
+ 0x3498cd,
+ 0x34a24e,
+ 0x34a5cc,
+ 0x34b593,
+ 0x34d44e,
+ 0x34db90,
+ 0x34df91,
+ 0x34e3cf,
+ 0x34e793,
+ 0x34f1cd,
+ 0x34f50f,
+ 0x34f8ce,
+ 0x350010,
+ 0x350409,
+ 0x351150,
+ 0x35178f,
+ 0x351e0f,
+ 0x3521d2,
+ 0x35650e,
+ 0x357dcd,
+ 0x359f8d,
+ 0x35a2cd,
+ 0x35b34d,
+ 0x35b68d,
+ 0x35b9d0,
+ 0x35bdcb,
+ 0x35cbcc,
+ 0x35cf4c,
+ 0x35d24c,
+ 0x35d54e,
+ 0x36e7d0,
+ 0x370952,
+ 0x370dcb,
+ 0x37138e,
+ 0x37170e,
+ 0x371f8e,
+ 0x37298b,
+ 0x46772f56,
+ 0x37410d,
+ 0x374e14,
+ 0x3758cd,
+ 0x378215,
+ 0x37934d,
+ 0x379ccf,
+ 0x37a50f,
+ 0x37f24f,
+ 0x37f60e,
+ 0x380bcd,
+ 0x382451,
+ 0x38614c,
+ 0x38644c,
+ 0x38674b,
+ 0x386d0c,
+ 0x3882cf,
+ 0x388692,
+ 0x38904d,
+ 0x38a00c,
+ 0x38a48c,
+ 0x38a78d,
+ 0x38aacf,
+ 0x38ae8e,
+ 0x38d10c,
+ 0x38d6cd,
+ 0x38da0b,
+ 0x38e64c,
+ 0x38ebcd,
+ 0x38ef0e,
+ 0x38f389,
+ 0x38fd93,
+ 0x39144d,
+ 0x39178d,
+ 0x391d8c,
+ 0x39220e,
+ 0x39318f,
+ 0x39354c,
+ 0x39384d,
+ 0x393b8f,
+ 0x393f4c,
+ 0x39464c,
+ 0x394acc,
+ 0x394dcc,
+ 0x39548d,
+ 0x3957d2,
+ 0x395e4c,
+ 0x39614c,
+ 0x396451,
+ 0x39688f,
+ 0x396c4f,
+ 0x397013,
+ 0x397e4e,
+ 0x3983cf,
+ 0x39878c,
+ 0x46b98ace,
+ 0x398e4f,
+ 0x399216,
+ 0x39a692,
+ 0x39bd4c,
+ 0x39c78f,
+ 0x39ce0d,
+ 0x39d14f,
+ 0x39d50c,
+ 0x39d80d,
+ 0x39db4d,
+ 0x39f28e,
+ 0x3a10cc,
+ 0x3a13cc,
+ 0x3a16d0,
+ 0x3a4211,
+ 0x3a464b,
+ 0x3a4b8c,
+ 0x3a4e8e,
+ 0x3a7011,
+ 0x3a744e,
+ 0x3a77cd,
+ 0x3aeb8b,
+ 0x3af9cf,
+ 0x3b1054,
+ 0x220442,
+ 0x220442,
+ 0x201cc3,
+ 0x220442,
+ 0x201cc3,
+ 0x220442,
+ 0x204f02,
+ 0x381185,
+ 0x3a6d0c,
+ 0x220442,
+ 0x220442,
+ 0x204f02,
+ 0x220442,
+ 0x291145,
+ 0x2a5905,
+ 0x220442,
+ 0x220442,
+ 0x210842,
+ 0x291145,
+ 0x30d7c9,
+ 0x340c8c,
+ 0x220442,
+ 0x220442,
+ 0x220442,
+ 0x220442,
+ 0x381185,
+ 0x220442,
+ 0x220442,
+ 0x220442,
+ 0x220442,
+ 0x210842,
+ 0x30d7c9,
+ 0x220442,
+ 0x220442,
+ 0x220442,
+ 0x2a5905,
+ 0x220442,
+ 0x2a5905,
+ 0x340c8c,
+ 0x3a6d0c,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x249943,
+ 0x2257c3,
+ 0x105dc8,
+ 0x4e244,
+ 0x1a97c8,
+ 0x200882,
+ 0x47a04a82,
+ 0x23b383,
+ 0x2296c4,
+ 0x2099c3,
+ 0x2d9d44,
+ 0x2e0006,
+ 0x230243,
+ 0x230204,
+ 0x276f85,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x24388a,
+ 0x236286,
+ 0x371a8c,
+ 0x894c8,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x230c43,
+ 0x2ccc86,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x7982,
+ 0xd9547,
+ 0xb6b88,
+ 0xf20e,
+ 0x87092,
+ 0x8e0b,
+ 0x486fc045,
+ 0x48afc04c,
+ 0x40907,
+ 0x11864a,
+ 0x370d0,
+ 0x1702c8,
+ 0x7a287,
+ 0x574cb,
+ 0x10d109,
+ 0x1701c7,
+ 0x11cac7,
+ 0x7a187,
+ 0x1d6c6,
+ 0x132948,
+ 0x4901f186,
+ 0xa88cd,
+ 0x118010,
+ 0x494079c2,
+ 0x62e88,
+ 0x69707,
+ 0xa7e09,
+ 0x4dfc6,
+ 0x90cc8,
+ 0x6dc2,
+ 0x9d6ca,
+ 0xef947,
+ 0xe94c7,
+ 0xa3609,
+ 0xa5ec8,
+ 0x158485,
+ 0xded8e,
+ 0xd74e,
+ 0x11f0f,
+ 0x13349,
+ 0x3f209,
+ 0x6c9cb,
+ 0x81e4f,
+ 0x8db4c,
+ 0xac48b,
+ 0x15b008,
+ 0xebac7,
+ 0xf0ac8,
+ 0x13c2cb,
+ 0x144e8c,
+ 0x14cf8c,
+ 0x14fd0c,
+ 0x16efcd,
+ 0x295c8,
+ 0x40ec9,
+ 0x14934b,
+ 0xbefc6,
+ 0xceb05,
+ 0xd21d0,
+ 0x126a46,
+ 0x555c5,
+ 0xd4b88,
+ 0xda487,
+ 0xdac87,
+ 0x153d47,
+ 0xea3ca,
+ 0xb6a0a,
+ 0x7e246,
+ 0x8e90d,
+ 0x174a08,
+ 0x42b08,
+ 0x44dc9,
+ 0xe9f0c,
+ 0x16f1cb,
+ 0x12af84,
+ 0xf1dc9,
+ 0x126906,
+ 0x3d02,
+ 0x152dc6,
+ 0x6c2,
+ 0xc35c5,
+ 0x481,
+ 0x35f83,
+ 0x48f8b906,
+ 0x91043,
+ 0x95c2,
+ 0x35604,
+ 0xdc2,
+ 0x24284,
+ 0x9c2,
+ 0x7dc2,
+ 0x6442,
+ 0x52f42,
+ 0x1742,
+ 0xfc042,
+ 0x8c2,
+ 0x19fc2,
+ 0x33942,
+ 0x682,
+ 0x1842,
+ 0xb0a82,
+ 0x30743,
+ 0x3f42,
+ 0x202,
+ 0xbc82,
+ 0x5642,
+ 0x1a042,
+ 0x2f542,
+ 0xd4c2,
+ 0x42,
+ 0x4542,
+ 0x1042,
+ 0x2503,
+ 0x3dc2,
+ 0x2602,
+ 0xaf102,
+ 0xa482,
+ 0x120c2,
+ 0x67c2,
+ 0x326c2,
+ 0x8a42,
+ 0x2242,
+ 0x16ecc2,
+ 0x7a42,
+ 0x2c902,
+ 0x49943,
+ 0x1ec2,
+ 0xba82,
+ 0x1a82,
+ 0x10342,
+ 0x42fc5,
+ 0x9d02,
+ 0x3c782,
+ 0x394c3,
+ 0x3b82,
+ 0xb482,
+ 0x3e42,
+ 0x1b842,
+ 0x14202,
+ 0x6882,
+ 0x35c2,
+ 0x3d02,
+ 0x6fb47,
+ 0x2115c3,
+ 0x200882,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x213ac3,
+ 0x230c43,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x291083,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x219bc3,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x200041,
+ 0x219bc3,
+ 0x249943,
+ 0x209583,
+ 0x2257c3,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x262e83,
+ 0x2095c3,
+ 0x31e683,
+ 0x281c83,
+ 0x2a2283,
+ 0x24c2c3,
+ 0x2d9d43,
+ 0x201104,
+ 0x249943,
+ 0x2257c3,
+ 0x224043,
+ 0x262044,
+ 0x223a83,
+ 0x3803,
+ 0x201543,
+ 0x365a88,
+ 0x260c44,
+ 0x316a4a,
+ 0x37db06,
+ 0xdc784,
+ 0x3a2d07,
+ 0x21c7ca,
+ 0x33da89,
+ 0x3b27c7,
+ 0x20054a,
+ 0x323743,
+ 0x38000b,
+ 0x24c209,
+ 0x2c0385,
+ 0x2ca9c7,
+ 0x4a82,
+ 0x258403,
+ 0x333687,
+ 0x20ebc5,
+ 0x2d7909,
+ 0x230743,
+ 0x221a46,
+ 0x2baf83,
+ 0xe0b43,
+ 0xfba46,
+ 0x52b86,
+ 0x106a47,
+ 0x3aa706,
+ 0x216fc5,
+ 0x20b147,
+ 0x336e87,
+ 0x4b6d9d43,
+ 0x32cdc7,
+ 0x3607c3,
+ 0x27cdc5,
+ 0x201104,
+ 0x226808,
+ 0x2add4c,
+ 0x2ad045,
+ 0x364f86,
+ 0x333547,
+ 0x399c07,
+ 0x214247,
+ 0x2167c8,
+ 0x29ec8f,
+ 0x2aed05,
+ 0x23b487,
+ 0x28ba87,
+ 0x2a31ca,
+ 0x2f1a09,
+ 0x2da985,
+ 0x2db14a,
+ 0x274c6,
+ 0x2bb005,
+ 0x371004,
+ 0x2b7406,
+ 0x36d007,
+ 0x236ac7,
+ 0x353008,
+ 0x3a9ac5,
+ 0x20eac6,
+ 0x3a3dc5,
+ 0x2212c5,
+ 0x221744,
+ 0x33c6c7,
+ 0x2d1a0a,
+ 0x38c948,
+ 0x2ecc06,
+ 0x30c43,
+ 0x2cf185,
+ 0x22b9c6,
+ 0x200346,
+ 0x2554c6,
+ 0x219bc3,
+ 0x3892c7,
+ 0x28ba05,
+ 0x249943,
+ 0x3b2a4d,
+ 0x202883,
+ 0x353108,
+ 0x3acdc4,
+ 0x206f05,
+ 0x2a30c6,
+ 0x232c46,
+ 0x20cf07,
+ 0x2a22c7,
+ 0x267785,
+ 0x2257c3,
+ 0x326c87,
+ 0x338ec9,
+ 0x256dc9,
+ 0x269a0a,
+ 0x23dfc2,
+ 0x27cd84,
+ 0x2d5504,
+ 0x219947,
+ 0x238488,
+ 0x2dbac9,
+ 0x3a3349,
+ 0x2dcb07,
+ 0x2ae806,
+ 0xdeb06,
+ 0x2dfdc4,
+ 0x2e03ca,
+ 0x2e40c8,
+ 0x2e47c9,
+ 0x294a46,
+ 0x302a85,
+ 0x38c808,
+ 0x2c094a,
+ 0x25a383,
+ 0x234a06,
+ 0x2dcc07,
+ 0x20f545,
+ 0x3acc85,
+ 0x23d2c3,
+ 0x24ccc4,
+ 0x225b45,
+ 0x283447,
+ 0x2fe185,
+ 0x2e97c6,
+ 0x12e785,
+ 0x222bc3,
+ 0x222bc9,
+ 0x22ba8c,
+ 0x2aa18c,
+ 0x2c7348,
+ 0x2931c7,
+ 0x2ef348,
+ 0x2efeca,
+ 0x2f104b,
+ 0x24c348,
+ 0x365088,
+ 0x362a06,
+ 0x322e85,
+ 0x323dca,
+ 0x216005,
+ 0x201582,
+ 0x2be287,
+ 0x26cc46,
+ 0x350c85,
+ 0x3418c9,
+ 0x27c585,
+ 0x381845,
+ 0x27c989,
+ 0x22b846,
+ 0x36d448,
+ 0x261483,
+ 0x3aa846,
+ 0x272f06,
+ 0x2fed85,
+ 0x2fed89,
+ 0x2dc209,
+ 0x23e307,
+ 0xfec04,
+ 0x2fec07,
+ 0x3a3249,
+ 0x21c9c5,
+ 0x2bf88,
+ 0x352cc5,
+ 0x3529c5,
+ 0x22b209,
+ 0x20e3c2,
+ 0x223884,
+ 0x205e42,
+ 0x203dc2,
+ 0x2953c5,
+ 0x2dc508,
+ 0x377f85,
+ 0x2bd743,
+ 0x2bd745,
+ 0x2cad83,
+ 0x20fcc2,
+ 0x266704,
+ 0x233443,
+ 0x200d82,
+ 0x358c44,
+ 0x2d61c3,
+ 0x2137c2,
+ 0x295443,
+ 0x28acc4,
+ 0x2b51c3,
+ 0x2375c4,
+ 0x203182,
+ 0x270583,
+ 0x22e443,
+ 0x2063c2,
+ 0x2e74c2,
+ 0x2dc049,
+ 0x2030c2,
+ 0x287c04,
+ 0x207a02,
+ 0x38c684,
+ 0x2ae7c4,
+ 0x2e2484,
+ 0x203d02,
+ 0x2397c2,
+ 0x210b03,
+ 0x2f04c3,
+ 0x23aa84,
+ 0x261504,
+ 0x2c5c84,
+ 0x2dc404,
+ 0x2fdc83,
+ 0x346e83,
+ 0x227444,
+ 0x2ffa04,
+ 0x2ffd06,
+ 0x242f82,
+ 0x204a82,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x200882,
+ 0x323743,
+ 0x258403,
+ 0x230743,
+ 0x201d03,
+ 0x2d9d43,
+ 0x201104,
+ 0x2dc304,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x2e0c44,
+ 0x274903,
+ 0x2b2483,
+ 0x341804,
+ 0x352ac6,
+ 0x203403,
+ 0x224f03,
+ 0x218903,
+ 0x2b0703,
+ 0x206f43,
+ 0x230c43,
+ 0x2fc2c5,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x2d91c3,
+ 0x2a3e03,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x202503,
+ 0x249943,
+ 0x231344,
+ 0x2257c3,
+ 0x29ca84,
+ 0x2b7205,
+ 0x204a82,
+ 0x201802,
+ 0x2095c2,
+ 0x201cc2,
+ 0x2016c2,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+ 0x224284,
+ 0x894c8,
+ 0x258403,
+ 0x202883,
+ 0x2054c4,
+ 0x894c8,
+ 0x258403,
+ 0x2446c4,
+ 0x201104,
+ 0x202883,
+ 0x202542,
+ 0x2257c3,
+ 0x244443,
+ 0x2e82c5,
+ 0x201582,
+ 0x2ffb43,
+ 0x200882,
+ 0x894c8,
+ 0x204a82,
+ 0x230743,
+ 0x2d9d43,
+ 0x201042,
+ 0x2257c3,
+ 0x200882,
+ 0x200707,
+ 0x24f9c5,
+ 0x2b9f84,
+ 0x385a06,
+ 0x33fe0b,
+ 0x261209,
+ 0x364ec6,
+ 0x33f2c9,
+ 0x2b1988,
+ 0x207103,
+ 0x894c8,
+ 0x225a07,
+ 0x370548,
+ 0x252f03,
+ 0x337784,
+ 0x33778b,
+ 0x260585,
+ 0x2f4a48,
+ 0x2e7289,
+ 0x258e43,
+ 0x258403,
+ 0x204288,
+ 0x2eddc7,
+ 0x253506,
+ 0x230743,
+ 0x253007,
+ 0x2d9d43,
+ 0x337f06,
+ 0x202503,
+ 0x22e307,
+ 0x233d47,
+ 0x390247,
+ 0x33c645,
+ 0x209e83,
+ 0x206d0b,
+ 0x265588,
+ 0x22c848,
+ 0x339086,
+ 0x264209,
+ 0x3234c7,
+ 0x2f9c05,
+ 0x377284,
+ 0x33dcc8,
+ 0x236c4a,
+ 0x236e89,
+ 0x33d303,
+ 0x26abc5,
+ 0x2aabc3,
+ 0x22a586,
+ 0x2b9dc4,
+ 0x33b0c8,
+ 0x3903cb,
+ 0x33d1c5,
+ 0x2b6f86,
+ 0x2b9cc5,
+ 0x2ba388,
+ 0x2bb147,
+ 0x365407,
+ 0x316647,
+ 0x2127c4,
+ 0x308a07,
+ 0x295cc6,
+ 0x219bc3,
+ 0x2c4208,
+ 0x248e83,
+ 0x2cb048,
+ 0x2d31c5,
+ 0x373d88,
+ 0x230947,
+ 0x249943,
+ 0x23fbc3,
+ 0x2891c4,
+ 0x30fc47,
+ 0x209a43,
+ 0x233e0b,
+ 0x2141c3,
+ 0x248e44,
+ 0x2e8348,
+ 0x2257c3,
+ 0x2ea8c5,
+ 0x31e505,
+ 0x373c86,
+ 0x2102c5,
+ 0x2d3584,
+ 0x20bb42,
+ 0x2e4a83,
+ 0x37108a,
+ 0x3a20c3,
+ 0x39fd49,
+ 0x308706,
+ 0x214008,
+ 0x28a806,
+ 0x220a87,
+ 0x2e4ec8,
+ 0x2ea6c8,
+ 0x319c83,
+ 0x295483,
+ 0x275889,
+ 0x2f3383,
+ 0x344686,
+ 0x24f646,
+ 0x314a86,
+ 0x3a8b09,
+ 0x2eaac4,
+ 0x2112c3,
+ 0x2da885,
+ 0x347249,
+ 0x2249c3,
+ 0x319b44,
+ 0x36cb04,
+ 0x39e084,
+ 0x2b43c6,
+ 0x20b4c3,
+ 0x20b4c8,
+ 0x2513c8,
+ 0x2f8e06,
+ 0x2f9a0b,
+ 0x2f9d48,
+ 0x2f9f4b,
+ 0x2fc689,
+ 0x2fb947,
+ 0x2fcb08,
+ 0x2fd6c3,
+ 0x22e886,
+ 0x20f747,
+ 0x2969c5,
+ 0x348889,
+ 0x263b4d,
+ 0x213e51,
+ 0x232d85,
+ 0x200882,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2afc84,
+ 0x2d9d43,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x230c43,
+ 0x249943,
+ 0x2257c3,
+ 0x29ca83,
+ 0x2161c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x230c43,
+ 0x249943,
+ 0x2257c3,
+ 0x21ce42,
+ 0x200141,
+ 0x200882,
+ 0x200001,
+ 0x312542,
+ 0x894c8,
+ 0x21b385,
+ 0x200481,
+ 0x58403,
+ 0x200741,
+ 0x200081,
+ 0x201181,
+ 0x233302,
+ 0x368c84,
+ 0x381103,
+ 0x2007c1,
+ 0x200901,
+ 0x200041,
+ 0x2001c1,
+ 0x390647,
+ 0x2bda4f,
+ 0x2d0986,
+ 0x2000c1,
+ 0x25a446,
+ 0x200341,
+ 0x200cc1,
+ 0x347b0e,
+ 0x200e81,
+ 0x2257c3,
+ 0x200ac1,
+ 0x26c8c5,
+ 0x20bb42,
+ 0x23d1c5,
+ 0x200c01,
+ 0x200241,
+ 0x200a01,
+ 0x201582,
+ 0x2002c1,
+ 0x203701,
+ 0x203fc1,
+ 0x200781,
+ 0x200641,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x20f0c3,
+ 0x258403,
+ 0x2d9d43,
+ 0x8b2c8,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x14d7f48,
+ 0x894c8,
+ 0x3f5c4,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x249943,
+ 0x2257c3,
+ 0x203803,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2afc84,
+ 0x2257c3,
+ 0x293485,
+ 0x328204,
+ 0x258403,
+ 0x249943,
+ 0x2257c3,
+ 0x27a8a,
+ 0x204a82,
+ 0x258403,
+ 0x22f209,
+ 0x230743,
+ 0x237cc9,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x2dfbc8,
+ 0x214507,
+ 0x2e82c5,
+ 0x200707,
+ 0x33fe0b,
+ 0x37d448,
+ 0x33f2c9,
+ 0x225a07,
+ 0x204288,
+ 0x337f06,
+ 0x233d47,
+ 0x22c848,
+ 0x339086,
+ 0x3234c7,
+ 0x236e89,
+ 0x386ac9,
+ 0x2b6f86,
+ 0x2b8c85,
+ 0x2c4208,
+ 0x248e83,
+ 0x2cb048,
+ 0x230947,
+ 0x209a43,
+ 0x3333c7,
+ 0x2102c5,
+ 0x2daf88,
+ 0x3554c5,
+ 0x295483,
+ 0x2c7b89,
+ 0x2aaa47,
+ 0x319b44,
+ 0x36cb04,
+ 0x2f9a0b,
+ 0x2f9d48,
+ 0x2fb947,
+ 0x258403,
+ 0x230743,
+ 0x2095c3,
+ 0x2257c3,
+ 0x225dc3,
+ 0x2d9d43,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x200882,
+ 0x204a82,
+ 0x2257c3,
+ 0x894c8,
+ 0x200882,
+ 0x204a82,
+ 0x2095c2,
+ 0x201042,
+ 0x200342,
+ 0x249943,
+ 0x2016c2,
+ 0x200882,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2095c2,
+ 0x2d9d43,
+ 0x202503,
+ 0x219bc3,
+ 0x2021c4,
+ 0x249943,
+ 0x2174c3,
+ 0x2257c3,
+ 0x2eaac4,
+ 0x224043,
+ 0x2d9d43,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x202883,
+ 0x2257c3,
+ 0x39c207,
+ 0x258403,
+ 0x251547,
+ 0x2f0c46,
+ 0x219443,
+ 0x20e8c3,
+ 0x2d9d43,
+ 0x21bbc3,
+ 0x201104,
+ 0x286104,
+ 0x2d3646,
+ 0x2284c3,
+ 0x249943,
+ 0x2257c3,
+ 0x293485,
+ 0x20cd04,
+ 0x318b43,
+ 0x223643,
+ 0x2be287,
+ 0x2f5545,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x21fd82,
+ 0x374b43,
+ 0x27bc83,
+ 0x323743,
+ 0x55e58403,
+ 0x201e02,
+ 0x230743,
+ 0x2099c3,
+ 0x2d9d43,
+ 0x201104,
+ 0x265743,
+ 0x2aed03,
+ 0x219bc3,
+ 0x2021c4,
+ 0x56205702,
+ 0x249943,
+ 0x2257c3,
+ 0x22f903,
+ 0x242103,
+ 0x21ce42,
+ 0x224043,
+ 0x894c8,
+ 0x2d9d43,
+ 0x2ee604,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x232ec4,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x30f384,
+ 0x30ac84,
+ 0x2ccc86,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x26cc46,
+ 0x1d94b,
+ 0x1f186,
+ 0x23e8a,
+ 0xfd78a,
+ 0x894c8,
+ 0x3a3d84,
+ 0x258403,
+ 0x323704,
+ 0x230743,
+ 0x244f04,
+ 0x2d9d43,
+ 0x254943,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x32538b,
+ 0x39de8a,
+ 0x3b1c4c,
+ 0x200882,
+ 0x204a82,
+ 0x2095c2,
+ 0x2a8f85,
+ 0x201104,
+ 0x202242,
+ 0x219bc3,
+ 0x30ac84,
+ 0x201cc2,
+ 0x2016c2,
+ 0x2057c2,
+ 0x21ce42,
+ 0x123743,
+ 0x2ec0c9,
+ 0x24f4c8,
+ 0x35c349,
+ 0x233b89,
+ 0x2411ca,
+ 0x24954a,
+ 0x20b782,
+ 0x219fc2,
+ 0x4a82,
+ 0x258403,
+ 0x207802,
+ 0x23b646,
+ 0x351c82,
+ 0x200d02,
+ 0x3a004e,
+ 0x2705ce,
+ 0x27a987,
+ 0x325e87,
+ 0x26fc02,
+ 0x230743,
+ 0x2d9d43,
+ 0x203542,
+ 0x201042,
+ 0x29e90f,
+ 0x214082,
+ 0x2400c7,
+ 0x339287,
+ 0x2503c7,
+ 0x26a14c,
+ 0x27090c,
+ 0x204704,
+ 0x26abca,
+ 0x2953c2,
+ 0x20a482,
+ 0x2b1384,
+ 0x222942,
+ 0x2bc382,
+ 0x270b44,
+ 0x2175c2,
+ 0x2120c2,
+ 0x339107,
+ 0x224945,
+ 0x2326c2,
+ 0x29e884,
+ 0x36ecc2,
+ 0x2cee08,
+ 0x249943,
+ 0x3a9008,
+ 0x208fc2,
+ 0x231c05,
+ 0x3a92c6,
+ 0x2257c3,
+ 0x209d02,
+ 0x2dbd07,
+ 0xbb42,
+ 0x26ff05,
+ 0x394505,
+ 0x203ec2,
+ 0x225742,
+ 0x31710a,
+ 0x26760a,
+ 0x219b82,
+ 0x2fbf44,
+ 0x2013c2,
+ 0x27cc48,
+ 0x20a242,
+ 0x22dec8,
+ 0x2f61c7,
+ 0x2f64c9,
+ 0x26ff82,
+ 0x2fc8c5,
+ 0x24f985,
+ 0x2c154b,
+ 0x2c228c,
+ 0x22e188,
+ 0x2fcc88,
+ 0x242f82,
+ 0x20cfc2,
+ 0x200882,
+ 0x894c8,
+ 0x204a82,
+ 0x258403,
+ 0x2095c2,
+ 0x201cc2,
+ 0x2016c2,
+ 0x2257c3,
+ 0x2057c2,
+ 0x200882,
+ 0x58204a82,
+ 0x586d9d43,
+ 0x332283,
+ 0x202242,
+ 0x249943,
+ 0x39a3c3,
+ 0x2257c3,
+ 0x2d8843,
+ 0x26fc46,
+ 0x16161c3,
+ 0x894c8,
+ 0x555c5,
+ 0x65b07,
+ 0x58e00182,
+ 0x59200dc2,
+ 0x59603442,
+ 0x59a00f82,
+ 0x59e0dec2,
+ 0x5a201742,
+ 0x5a604a82,
+ 0x5aa06082,
+ 0x5ae1dd82,
+ 0x5b201842,
+ 0x2705c3,
+ 0xb444,
+ 0x2017c3,
+ 0x5b616342,
+ 0x5ba022c2,
+ 0x44c07,
+ 0x5be2c282,
+ 0x5c200902,
+ 0x5c60b642,
+ 0x5ca0b9c2,
+ 0x5ce04542,
+ 0x5d201042,
+ 0xba545,
+ 0x222383,
+ 0x31ca44,
+ 0x5d622942,
+ 0x5da34082,
+ 0x5de00102,
+ 0x77a0b,
+ 0x5e200982,
+ 0x5ea0a582,
+ 0x5ee02242,
+ 0x5f200342,
+ 0x5f653702,
+ 0x5fa08f82,
+ 0x5fe0dc02,
+ 0x60207a42,
+ 0x60605702,
+ 0x60a00cc2,
+ 0x60e01cc2,
+ 0x61227982,
+ 0x6160d302,
+ 0x61a3d982,
+ 0x132d84,
+ 0x319c43,
+ 0x61e092c2,
+ 0x62213e02,
+ 0x62601ac2,
+ 0x62a02102,
+ 0x62e016c2,
+ 0x63200d82,
+ 0xda747,
+ 0x63605fc2,
+ 0x63a024c2,
+ 0x63e057c2,
+ 0x64205202,
+ 0xe9f0c,
+ 0x6461f6c2,
+ 0x64a712c2,
+ 0x64e00f02,
+ 0x65201502,
+ 0x656049c2,
+ 0x65a41342,
+ 0x65e03702,
+ 0x6620eb42,
+ 0x66673282,
+ 0x66a736c2,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x5e665743,
+ 0x27da43,
+ 0x2fc344,
+ 0x24f3c6,
+ 0x2e4b43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x200482,
+ 0x200482,
+ 0x265743,
+ 0x27da43,
+ 0x67258403,
+ 0x230743,
+ 0x365d83,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x894c8,
+ 0x204a82,
+ 0x258403,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x2054c4,
+ 0x204a82,
+ 0x258403,
+ 0x356443,
+ 0x230743,
+ 0x2446c4,
+ 0x2095c3,
+ 0x2d9d43,
+ 0x201104,
+ 0x202503,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x244443,
+ 0x2e82c5,
+ 0x27c343,
+ 0x224043,
+ 0x204a82,
+ 0x258403,
+ 0x265743,
+ 0x249943,
+ 0x2257c3,
+ 0x200882,
+ 0x323743,
+ 0x894c8,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x2e0006,
+ 0x201104,
+ 0x202503,
+ 0x2021c4,
+ 0x249943,
+ 0x2257c3,
+ 0x219683,
+ 0x258403,
+ 0x230743,
+ 0x249943,
+ 0x2257c3,
+ 0x258403,
+ 0x1f186,
+ 0x230743,
+ 0x2d9d43,
+ 0xd0d86,
+ 0x249943,
+ 0x2257c3,
+ 0x307288,
+ 0x30a189,
+ 0x31a149,
+ 0x32adc8,
+ 0x37ab88,
+ 0x37ab89,
+ 0x33305,
+ 0x200882,
+ 0x2f5385,
+ 0x22eb43,
+ 0x69e04a82,
+ 0x230743,
+ 0x2d9d43,
+ 0x225847,
+ 0x206f43,
+ 0x219bc3,
+ 0x249943,
+ 0x209583,
+ 0x20dd83,
+ 0x202883,
+ 0x2257c3,
+ 0x236286,
+ 0x201582,
+ 0x224043,
+ 0x894c8,
+ 0x200882,
+ 0x323743,
+ 0x204a82,
+ 0x258403,
+ 0x230743,
+ 0x2d9d43,
+ 0x201104,
+ 0x219bc3,
+ 0x249943,
+ 0x2257c3,
+ 0x2161c3,
+}
+
+// children is the list of nodes' children, the parent's wildcard bit and the
+// parent's node type. If a node has no children then their children index
+// will be in the range [0, 6), depending on the wildcard bit and node type.
+//
+// The layout within the uint32, from MSB to LSB, is:
+// [ 1 bits] unused
+// [ 1 bits] wildcard bit
+// [ 2 bits] node type
+// [14 bits] high nodes index (exclusive) of children
+// [14 bits] low nodes index (inclusive) of children
+var children = [...]uint32{
+ 0x0,
+ 0x10000000,
+ 0x20000000,
+ 0x40000000,
+ 0x50000000,
+ 0x60000000,
+ 0x1858610,
+ 0x185c616,
+ 0x187c617,
+ 0x19d861f,
+ 0x19ec676,
+ 0x1a0067b,
+ 0x1a10680,
+ 0x1a2c684,
+ 0x1a3068b,
+ 0x1a4868c,
+ 0x1a6c692,
+ 0x1a7069b,
+ 0x1a8869c,
+ 0x1a8c6a2,
+ 0x1aa86a3,
+ 0x1aac6aa,
+ 0x1af46ab,
+ 0x1af86bd,
+ 0x1b186be,
+ 0x1b2c6c6,
+ 0x1b306cb,
+ 0x1b606cc,
+ 0x1b7c6d8,
+ 0x1ba46df,
+ 0x1bac6e9,
+ 0x1bb06eb,
+ 0x1c446ec,
+ 0x1c58711,
+ 0x1c6c716,
+ 0x1c9871b,
+ 0x1ca8726,
+ 0x1cbc72a,
+ 0x1ce072f,
+ 0x1df8738,
+ 0x1dfc77e,
+ 0x1e1077f,
+ 0x1e24784,
+ 0x1e2c789,
+ 0x1e3c78b,
+ 0x1e4078f,
+ 0x1e58790,
+ 0x1ea0796,
+ 0x1eb47a8,
+ 0x1eb87ad,
+ 0x1ebc7ae,
+ 0x1ec47af,
+ 0x1f007b1,
+ 0x61f047c0,
+ 0x1f187c1,
+ 0x1f1c7c6,
+ 0x1f2c7c7,
+ 0x1fdc7cb,
+ 0x1fe07f7,
+ 0x21fe87f8,
+ 0x21fec7fa,
+ 0x1ff07fb,
+ 0x20247fc,
+ 0x2028809,
+ 0x245880a,
+ 0x224a8916,
+ 0x224ac92a,
+ 0x24d492b,
+ 0x24dc935,
+ 0x224e0937,
+ 0x24e8938,
+ 0x224f893a,
+ 0x224fc93e,
+ 0x250893f,
+ 0x250c942,
+ 0x22510943,
+ 0x252c944,
+ 0x254494b,
+ 0x2548951,
+ 0x2558952,
+ 0x2560956,
+ 0x22594958,
+ 0x2598965,
+ 0x25a8966,
+ 0x25d496a,
+ 0x25ec975,
+ 0x260097b,
+ 0x2628980,
+ 0x264898a,
+ 0x2678992,
+ 0x26a099e,
+ 0x26a49a8,
+ 0x26c89a9,
+ 0x26cc9b2,
+ 0x26e09b3,
+ 0x26e49b8,
+ 0x26e89b9,
+ 0x27089ba,
+ 0x270c9c2,
+ 0x271c9c3,
+ 0x27909c7,
+ 0x27ac9e4,
+ 0x27b89eb,
+ 0x27cc9ee,
+ 0x27e49f3,
+ 0x27f89f9,
+ 0x28109fe,
+ 0x2828a04,
+ 0x2840a0a,
+ 0x285ca10,
+ 0x2874a17,
+ 0x28d4a1d,
+ 0x28eca35,
+ 0x2900a3b,
+ 0x2944a40,
+ 0x29c4a51,
+ 0x29f0a71,
+ 0x29f4a7c,
+ 0x29fca7d,
+ 0x2a1ca7f,
+ 0x2a20a87,
+ 0x2a3ca88,
+ 0x2a44a8f,
+ 0x2a78a91,
+ 0x2ab0a9e,
+ 0x2ab4aac,
+ 0x2af0aad,
+ 0x2b08abc,
+ 0x2b2cac2,
+ 0x2b4cacb,
+ 0x3110ad3,
+ 0x311cc44,
+ 0x313cc47,
+ 0x32f8c4f,
+ 0x33c8cbe,
+ 0x3438cf2,
+ 0x3490d0e,
+ 0x3578d24,
+ 0x35d0d5e,
+ 0x360cd74,
+ 0x3708d83,
+ 0x37d4dc2,
+ 0x386cdf5,
+ 0x38fce1b,
+ 0x3960e3f,
+ 0x3b98e58,
+ 0x3c50ee6,
+ 0x3d1cf14,
+ 0x3d68f47,
+ 0x3df0f5a,
+ 0x3e2cf7c,
+ 0x3e7cf8b,
+ 0x3ef4f9f,
+ 0x63ef8fbd,
+ 0x63efcfbe,
+ 0x63f00fbf,
+ 0x3f7cfc0,
+ 0x3fe0fdf,
+ 0x405cff8,
+ 0x40d5017,
+ 0x4155035,
+ 0x41c1055,
+ 0x42ed070,
+ 0x43450bb,
+ 0x643490d1,
+ 0x43e10d2,
+ 0x44690f8,
+ 0x44b511a,
+ 0x451d12d,
+ 0x45c5147,
+ 0x468d171,
+ 0x46f51a3,
+ 0x48091bd,
+ 0x6480d202,
+ 0x64811203,
+ 0x486d204,
+ 0x48c921b,
+ 0x4959232,
+ 0x49d5256,
+ 0x4a19275,
+ 0x4afd286,
+ 0x4b312bf,
+ 0x4b912cc,
+ 0x4c052e4,
+ 0x4c8d301,
+ 0x4ccd323,
+ 0x4d3d333,
+ 0x64d4134f,
+ 0x64d45350,
+ 0x24d49351,
+ 0x4d61352,
+ 0x4d7d358,
+ 0x4dc135f,
+ 0x4dd1370,
+ 0x4de9374,
+ 0x4e6137a,
+ 0x4e75398,
+ 0x4e8d39d,
+ 0x4eb13a3,
+ 0x4eb53ac,
+ 0x4ebd3ad,
+ 0x4ed13af,
+ 0x4eed3b4,
+ 0x4ef13bb,
+ 0x4ef93bc,
+ 0x4f353be,
+ 0x4f493cd,
+ 0x4f513d2,
+ 0x4f593d4,
+ 0x4f5d3d6,
+ 0x4f813d7,
+ 0x4fa53e0,
+ 0x4fbd3e9,
+ 0x4fc13ef,
+ 0x4fc93f0,
+ 0x4fcd3f2,
+ 0x50213f3,
+ 0x5045408,
+ 0x5065411,
+ 0x5081419,
+ 0x5091420,
+ 0x50a5424,
+ 0x50a9429,
+ 0x50b142a,
+ 0x50c542c,
+ 0x50d5431,
+ 0x50d9435,
+ 0x50f5436,
+ 0x598543d,
+ 0x59bd661,
+ 0x59e966f,
+ 0x5a0167a,
+ 0x5a21680,
+ 0x65a25688,
+ 0x5a69689,
+ 0x5a7169a,
+ 0x25a7569c,
+ 0x25a7969d,
+ 0x5a7d69e,
+ 0x5b9d69f,
+ 0x25ba16e7,
+ 0x25ba96e8,
+ 0x25bb16ea,
+ 0x25bbd6ec,
+ 0x5bc16ef,
+ 0x5be96f0,
+ 0x5c116fa,
+ 0x5c15704,
+ 0x25c4d705,
+ 0x5c5d713,
+ 0x67b5717,
+ 0x67b99ed,
+ 0x67bd9ee,
+ 0x267c19ef,
+ 0x67c59f0,
+ 0x267c99f1,
+ 0x67cd9f2,
+ 0x267d99f3,
+ 0x67dd9f6,
+ 0x67e19f7,
+ 0x267e59f8,
+ 0x67e99f9,
+ 0x267f19fa,
+ 0x67f59fc,
+ 0x67f99fd,
+ 0x268099fe,
+ 0x680da02,
+ 0x6811a03,
+ 0x6815a04,
+ 0x6819a05,
+ 0x2681da06,
+ 0x6821a07,
+ 0x6825a08,
+ 0x6829a09,
+ 0x682da0a,
+ 0x26835a0b,
+ 0x6839a0d,
+ 0x683da0e,
+ 0x6841a0f,
+ 0x26845a10,
+ 0x6849a11,
+ 0x26851a12,
+ 0x26855a14,
+ 0x6871a15,
+ 0x687da1c,
+ 0x68bda1f,
+ 0x68c1a2f,
+ 0x68e5a30,
+ 0x6a29a39,
+ 0x26a31a8a,
+ 0x26a35a8c,
+ 0x26a39a8d,
+ 0x6a41a8e,
+ 0x6b1da90,
+ 0x6b21ac7,
+ 0x6b4dac8,
+ 0x6b6dad3,
+ 0x6b79adb,
+ 0x6b99ade,
+ 0x6bd1ae6,
+ 0x6e69af4,
+ 0x6f25b9a,
+ 0x6f39bc9,
+ 0x6f6dbce,
+ 0x6f99bdb,
+ 0x6fb5be6,
+ 0x6fd9bed,
+ 0x6ff1bf6,
+ 0x700dbfc,
+ 0x7031c03,
+ 0x7041c0c,
+ 0x7071c10,
+ 0x708dc1c,
+ 0x7299c23,
+ 0x72bdca6,
+ 0x72ddcaf,
+ 0x72f1cb7,
+ 0x7305cbc,
+ 0x7325cc1,
+ 0x73c9cc9,
+ 0x73e5cf2,
+ 0x7401cf9,
+ 0x7405d00,
+ 0x7409d01,
+ 0x740dd02,
+ 0x7421d03,
+ 0x7441d08,
+ 0x744dd10,
+ 0x7451d13,
+ 0x7481d14,
+ 0x7501d20,
+ 0x7515d40,
+ 0x7519d45,
+ 0x7531d46,
+ 0x753dd4c,
+ 0x7541d4f,
+ 0x755dd50,
+ 0x7599d57,
+ 0x759dd66,
+ 0x75bdd67,
+ 0x760dd6f,
+ 0x7625d83,
+ 0x7679d89,
+ 0x767dd9e,
+ 0x7681d9f,
+ 0x76c5da0,
+ 0x76d5db1,
+ 0x770ddb5,
+ 0x773ddc3,
+ 0x7879dcf,
+ 0x789de1e,
+ 0x78c9e27,
+ 0x78d1e32,
+ 0x78d5e34,
+ 0x79e1e35,
+ 0x79ede78,
+ 0x79f9e7b,
+ 0x7a05e7e,
+ 0x7a11e81,
+ 0x7a1de84,
+ 0x7a29e87,
+ 0x7a35e8a,
+ 0x7a41e8d,
+ 0x7a4de90,
+ 0x7a59e93,
+ 0x7a65e96,
+ 0x7a71e99,
+ 0x7a7de9c,
+ 0x7a85e9f,
+ 0x7a91ea1,
+ 0x7a9dea4,
+ 0x7aa9ea7,
+ 0x7ab5eaa,
+ 0x7ac1ead,
+ 0x7acdeb0,
+ 0x7ad9eb3,
+ 0x7ae5eb6,
+ 0x7af1eb9,
+ 0x7afdebc,
+ 0x7b09ebf,
+ 0x7b15ec2,
+ 0x7b21ec5,
+ 0x7b2dec8,
+ 0x7b39ecb,
+ 0x7b45ece,
+ 0x7b51ed1,
+ 0x7b59ed4,
+ 0x7b65ed6,
+ 0x7b71ed9,
+ 0x7b7dedc,
+ 0x7b89edf,
+ 0x7b95ee2,
+ 0x7ba1ee5,
+ 0x7badee8,
+ 0x7bb9eeb,
+ 0x7bc5eee,
+ 0x7bd1ef1,
+ 0x7bddef4,
+ 0x7be9ef7,
+ 0x7bf5efa,
+ 0x7bfdefd,
+ 0x7c09eff,
+ 0x7c15f02,
+ 0x7c21f05,
+ 0x7c2df08,
+ 0x7c39f0b,
+ 0x7c45f0e,
+ 0x7c51f11,
+ 0x7c5df14,
+ 0x7c61f17,
+ 0x7c6df18,
+ 0x7c85f1b,
+ 0x7c89f21,
+ 0x7c99f22,
+ 0x7cb1f26,
+ 0x7cf5f2c,
+ 0x7d09f3d,
+ 0x7d3df42,
+ 0x7d4df4f,
+ 0x7d69f53,
+ 0x7d81f5a,
+ 0x7d85f60,
+ 0x27dc9f61,
+ 0x7dcdf72,
+ 0x7df9f73,
+}
+
+// max children 424 (capacity 511)
+// max text offset 27866 (capacity 32767)
+// max text length 36 (capacity 63)
+// max hi 8062 (capacity 16383)
+// max lo 8051 (capacity 16383)
diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go
new file mode 100644
index 000000000..9e921e718
--- /dev/null
+++ b/vendor/golang.org/x/net/publicsuffix/table_test.go
@@ -0,0 +1,16101 @@
+// generated by go run gen.go; DO NOT EDIT
+
+package publicsuffix
+
+var rules = [...]string{
+ "ac",
+ "com.ac",
+ "edu.ac",
+ "gov.ac",
+ "net.ac",
+ "mil.ac",
+ "org.ac",
+ "ad",
+ "nom.ad",
+ "ae",
+ "co.ae",
+ "net.ae",
+ "org.ae",
+ "sch.ae",
+ "ac.ae",
+ "gov.ae",
+ "mil.ae",
+ "aero",
+ "accident-investigation.aero",
+ "accident-prevention.aero",
+ "aerobatic.aero",
+ "aeroclub.aero",
+ "aerodrome.aero",
+ "agents.aero",
+ "aircraft.aero",
+ "airline.aero",
+ "airport.aero",
+ "air-surveillance.aero",
+ "airtraffic.aero",
+ "air-traffic-control.aero",
+ "ambulance.aero",
+ "amusement.aero",
+ "association.aero",
+ "author.aero",
+ "ballooning.aero",
+ "broker.aero",
+ "caa.aero",
+ "cargo.aero",
+ "catering.aero",
+ "certification.aero",
+ "championship.aero",
+ "charter.aero",
+ "civilaviation.aero",
+ "club.aero",
+ "conference.aero",
+ "consultant.aero",
+ "consulting.aero",
+ "control.aero",
+ "council.aero",
+ "crew.aero",
+ "design.aero",
+ "dgca.aero",
+ "educator.aero",
+ "emergency.aero",
+ "engine.aero",
+ "engineer.aero",
+ "entertainment.aero",
+ "equipment.aero",
+ "exchange.aero",
+ "express.aero",
+ "federation.aero",
+ "flight.aero",
+ "freight.aero",
+ "fuel.aero",
+ "gliding.aero",
+ "government.aero",
+ "groundhandling.aero",
+ "group.aero",
+ "hanggliding.aero",
+ "homebuilt.aero",
+ "insurance.aero",
+ "journal.aero",
+ "journalist.aero",
+ "leasing.aero",
+ "logistics.aero",
+ "magazine.aero",
+ "maintenance.aero",
+ "media.aero",
+ "microlight.aero",
+ "modelling.aero",
+ "navigation.aero",
+ "parachuting.aero",
+ "paragliding.aero",
+ "passenger-association.aero",
+ "pilot.aero",
+ "press.aero",
+ "production.aero",
+ "recreation.aero",
+ "repbody.aero",
+ "res.aero",
+ "research.aero",
+ "rotorcraft.aero",
+ "safety.aero",
+ "scientist.aero",
+ "services.aero",
+ "show.aero",
+ "skydiving.aero",
+ "software.aero",
+ "student.aero",
+ "trader.aero",
+ "trading.aero",
+ "trainer.aero",
+ "union.aero",
+ "workinggroup.aero",
+ "works.aero",
+ "af",
+ "gov.af",
+ "com.af",
+ "org.af",
+ "net.af",
+ "edu.af",
+ "ag",
+ "com.ag",
+ "org.ag",
+ "net.ag",
+ "co.ag",
+ "nom.ag",
+ "ai",
+ "off.ai",
+ "com.ai",
+ "net.ai",
+ "org.ai",
+ "al",
+ "com.al",
+ "edu.al",
+ "gov.al",
+ "mil.al",
+ "net.al",
+ "org.al",
+ "am",
+ "ao",
+ "ed.ao",
+ "gv.ao",
+ "og.ao",
+ "co.ao",
+ "pb.ao",
+ "it.ao",
+ "aq",
+ "ar",
+ "com.ar",
+ "edu.ar",
+ "gob.ar",
+ "gov.ar",
+ "int.ar",
+ "mil.ar",
+ "net.ar",
+ "org.ar",
+ "tur.ar",
+ "arpa",
+ "e164.arpa",
+ "in-addr.arpa",
+ "ip6.arpa",
+ "iris.arpa",
+ "uri.arpa",
+ "urn.arpa",
+ "as",
+ "gov.as",
+ "asia",
+ "at",
+ "ac.at",
+ "co.at",
+ "gv.at",
+ "or.at",
+ "au",
+ "com.au",
+ "net.au",
+ "org.au",
+ "edu.au",
+ "gov.au",
+ "asn.au",
+ "id.au",
+ "info.au",
+ "conf.au",
+ "oz.au",
+ "act.au",
+ "nsw.au",
+ "nt.au",
+ "qld.au",
+ "sa.au",
+ "tas.au",
+ "vic.au",
+ "wa.au",
+ "act.edu.au",
+ "nsw.edu.au",
+ "nt.edu.au",
+ "qld.edu.au",
+ "sa.edu.au",
+ "tas.edu.au",
+ "vic.edu.au",
+ "wa.edu.au",
+ "qld.gov.au",
+ "sa.gov.au",
+ "tas.gov.au",
+ "vic.gov.au",
+ "wa.gov.au",
+ "aw",
+ "com.aw",
+ "ax",
+ "az",
+ "com.az",
+ "net.az",
+ "int.az",
+ "gov.az",
+ "org.az",
+ "edu.az",
+ "info.az",
+ "pp.az",
+ "mil.az",
+ "name.az",
+ "pro.az",
+ "biz.az",
+ "ba",
+ "com.ba",
+ "edu.ba",
+ "gov.ba",
+ "mil.ba",
+ "net.ba",
+ "org.ba",
+ "bb",
+ "biz.bb",
+ "co.bb",
+ "com.bb",
+ "edu.bb",
+ "gov.bb",
+ "info.bb",
+ "net.bb",
+ "org.bb",
+ "store.bb",
+ "tv.bb",
+ "*.bd",
+ "be",
+ "ac.be",
+ "bf",
+ "gov.bf",
+ "bg",
+ "a.bg",
+ "b.bg",
+ "c.bg",
+ "d.bg",
+ "e.bg",
+ "f.bg",
+ "g.bg",
+ "h.bg",
+ "i.bg",
+ "j.bg",
+ "k.bg",
+ "l.bg",
+ "m.bg",
+ "n.bg",
+ "o.bg",
+ "p.bg",
+ "q.bg",
+ "r.bg",
+ "s.bg",
+ "t.bg",
+ "u.bg",
+ "v.bg",
+ "w.bg",
+ "x.bg",
+ "y.bg",
+ "z.bg",
+ "0.bg",
+ "1.bg",
+ "2.bg",
+ "3.bg",
+ "4.bg",
+ "5.bg",
+ "6.bg",
+ "7.bg",
+ "8.bg",
+ "9.bg",
+ "bh",
+ "com.bh",
+ "edu.bh",
+ "net.bh",
+ "org.bh",
+ "gov.bh",
+ "bi",
+ "co.bi",
+ "com.bi",
+ "edu.bi",
+ "or.bi",
+ "org.bi",
+ "biz",
+ "bj",
+ "asso.bj",
+ "barreau.bj",
+ "gouv.bj",
+ "bm",
+ "com.bm",
+ "edu.bm",
+ "gov.bm",
+ "net.bm",
+ "org.bm",
+ "*.bn",
+ "bo",
+ "com.bo",
+ "edu.bo",
+ "gov.bo",
+ "gob.bo",
+ "int.bo",
+ "org.bo",
+ "net.bo",
+ "mil.bo",
+ "tv.bo",
+ "br",
+ "adm.br",
+ "adv.br",
+ "agr.br",
+ "am.br",
+ "arq.br",
+ "art.br",
+ "ato.br",
+ "b.br",
+ "bio.br",
+ "blog.br",
+ "bmd.br",
+ "cim.br",
+ "cng.br",
+ "cnt.br",
+ "com.br",
+ "coop.br",
+ "ecn.br",
+ "eco.br",
+ "edu.br",
+ "emp.br",
+ "eng.br",
+ "esp.br",
+ "etc.br",
+ "eti.br",
+ "far.br",
+ "flog.br",
+ "fm.br",
+ "fnd.br",
+ "fot.br",
+ "fst.br",
+ "g12.br",
+ "ggf.br",
+ "gov.br",
+ "imb.br",
+ "ind.br",
+ "inf.br",
+ "jor.br",
+ "jus.br",
+ "leg.br",
+ "lel.br",
+ "mat.br",
+ "med.br",
+ "mil.br",
+ "mp.br",
+ "mus.br",
+ "net.br",
+ "*.nom.br",
+ "not.br",
+ "ntr.br",
+ "odo.br",
+ "org.br",
+ "ppg.br",
+ "pro.br",
+ "psc.br",
+ "psi.br",
+ "qsl.br",
+ "radio.br",
+ "rec.br",
+ "slg.br",
+ "srv.br",
+ "taxi.br",
+ "teo.br",
+ "tmp.br",
+ "trd.br",
+ "tur.br",
+ "tv.br",
+ "vet.br",
+ "vlog.br",
+ "wiki.br",
+ "zlg.br",
+ "bs",
+ "com.bs",
+ "net.bs",
+ "org.bs",
+ "edu.bs",
+ "gov.bs",
+ "bt",
+ "com.bt",
+ "edu.bt",
+ "gov.bt",
+ "net.bt",
+ "org.bt",
+ "bv",
+ "bw",
+ "co.bw",
+ "org.bw",
+ "by",
+ "gov.by",
+ "mil.by",
+ "com.by",
+ "of.by",
+ "bz",
+ "com.bz",
+ "net.bz",
+ "org.bz",
+ "edu.bz",
+ "gov.bz",
+ "ca",
+ "ab.ca",
+ "bc.ca",
+ "mb.ca",
+ "nb.ca",
+ "nf.ca",
+ "nl.ca",
+ "ns.ca",
+ "nt.ca",
+ "nu.ca",
+ "on.ca",
+ "pe.ca",
+ "qc.ca",
+ "sk.ca",
+ "yk.ca",
+ "gc.ca",
+ "cat",
+ "cc",
+ "cd",
+ "gov.cd",
+ "cf",
+ "cg",
+ "ch",
+ "ci",
+ "org.ci",
+ "or.ci",
+ "com.ci",
+ "co.ci",
+ "edu.ci",
+ "ed.ci",
+ "ac.ci",
+ "net.ci",
+ "go.ci",
+ "asso.ci",
+ "xn--aroport-bya.ci",
+ "int.ci",
+ "presse.ci",
+ "md.ci",
+ "gouv.ci",
+ "*.ck",
+ "!www.ck",
+ "cl",
+ "gov.cl",
+ "gob.cl",
+ "co.cl",
+ "mil.cl",
+ "cm",
+ "co.cm",
+ "com.cm",
+ "gov.cm",
+ "net.cm",
+ "cn",
+ "ac.cn",
+ "com.cn",
+ "edu.cn",
+ "gov.cn",
+ "net.cn",
+ "org.cn",
+ "mil.cn",
+ "xn--55qx5d.cn",
+ "xn--io0a7i.cn",
+ "xn--od0alg.cn",
+ "ah.cn",
+ "bj.cn",
+ "cq.cn",
+ "fj.cn",
+ "gd.cn",
+ "gs.cn",
+ "gz.cn",
+ "gx.cn",
+ "ha.cn",
+ "hb.cn",
+ "he.cn",
+ "hi.cn",
+ "hl.cn",
+ "hn.cn",
+ "jl.cn",
+ "js.cn",
+ "jx.cn",
+ "ln.cn",
+ "nm.cn",
+ "nx.cn",
+ "qh.cn",
+ "sc.cn",
+ "sd.cn",
+ "sh.cn",
+ "sn.cn",
+ "sx.cn",
+ "tj.cn",
+ "xj.cn",
+ "xz.cn",
+ "yn.cn",
+ "zj.cn",
+ "hk.cn",
+ "mo.cn",
+ "tw.cn",
+ "co",
+ "arts.co",
+ "com.co",
+ "edu.co",
+ "firm.co",
+ "gov.co",
+ "info.co",
+ "int.co",
+ "mil.co",
+ "net.co",
+ "nom.co",
+ "org.co",
+ "rec.co",
+ "web.co",
+ "com",
+ "coop",
+ "cr",
+ "ac.cr",
+ "co.cr",
+ "ed.cr",
+ "fi.cr",
+ "go.cr",
+ "or.cr",
+ "sa.cr",
+ "cu",
+ "com.cu",
+ "edu.cu",
+ "org.cu",
+ "net.cu",
+ "gov.cu",
+ "inf.cu",
+ "cv",
+ "cw",
+ "com.cw",
+ "edu.cw",
+ "net.cw",
+ "org.cw",
+ "cx",
+ "gov.cx",
+ "ac.cy",
+ "biz.cy",
+ "com.cy",
+ "ekloges.cy",
+ "gov.cy",
+ "ltd.cy",
+ "name.cy",
+ "net.cy",
+ "org.cy",
+ "parliament.cy",
+ "press.cy",
+ "pro.cy",
+ "tm.cy",
+ "cz",
+ "de",
+ "dj",
+ "dk",
+ "dm",
+ "com.dm",
+ "net.dm",
+ "org.dm",
+ "edu.dm",
+ "gov.dm",
+ "do",
+ "art.do",
+ "com.do",
+ "edu.do",
+ "gob.do",
+ "gov.do",
+ "mil.do",
+ "net.do",
+ "org.do",
+ "sld.do",
+ "web.do",
+ "dz",
+ "com.dz",
+ "org.dz",
+ "net.dz",
+ "gov.dz",
+ "edu.dz",
+ "asso.dz",
+ "pol.dz",
+ "art.dz",
+ "ec",
+ "com.ec",
+ "info.ec",
+ "net.ec",
+ "fin.ec",
+ "k12.ec",
+ "med.ec",
+ "pro.ec",
+ "org.ec",
+ "edu.ec",
+ "gov.ec",
+ "gob.ec",
+ "mil.ec",
+ "edu",
+ "ee",
+ "edu.ee",
+ "gov.ee",
+ "riik.ee",
+ "lib.ee",
+ "med.ee",
+ "com.ee",
+ "pri.ee",
+ "aip.ee",
+ "org.ee",
+ "fie.ee",
+ "eg",
+ "com.eg",
+ "edu.eg",
+ "eun.eg",
+ "gov.eg",
+ "mil.eg",
+ "name.eg",
+ "net.eg",
+ "org.eg",
+ "sci.eg",
+ "*.er",
+ "es",
+ "com.es",
+ "nom.es",
+ "org.es",
+ "gob.es",
+ "edu.es",
+ "et",
+ "com.et",
+ "gov.et",
+ "org.et",
+ "edu.et",
+ "biz.et",
+ "name.et",
+ "info.et",
+ "net.et",
+ "eu",
+ "fi",
+ "aland.fi",
+ "*.fj",
+ "*.fk",
+ "fm",
+ "fo",
+ "fr",
+ "com.fr",
+ "asso.fr",
+ "nom.fr",
+ "prd.fr",
+ "presse.fr",
+ "tm.fr",
+ "aeroport.fr",
+ "assedic.fr",
+ "avocat.fr",
+ "avoues.fr",
+ "cci.fr",
+ "chambagri.fr",
+ "chirurgiens-dentistes.fr",
+ "experts-comptables.fr",
+ "geometre-expert.fr",
+ "gouv.fr",
+ "greta.fr",
+ "huissier-justice.fr",
+ "medecin.fr",
+ "notaires.fr",
+ "pharmacien.fr",
+ "port.fr",
+ "veterinaire.fr",
+ "ga",
+ "gb",
+ "gd",
+ "ge",
+ "com.ge",
+ "edu.ge",
+ "gov.ge",
+ "org.ge",
+ "mil.ge",
+ "net.ge",
+ "pvt.ge",
+ "gf",
+ "gg",
+ "co.gg",
+ "net.gg",
+ "org.gg",
+ "gh",
+ "com.gh",
+ "edu.gh",
+ "gov.gh",
+ "org.gh",
+ "mil.gh",
+ "gi",
+ "com.gi",
+ "ltd.gi",
+ "gov.gi",
+ "mod.gi",
+ "edu.gi",
+ "org.gi",
+ "gl",
+ "co.gl",
+ "com.gl",
+ "edu.gl",
+ "net.gl",
+ "org.gl",
+ "gm",
+ "gn",
+ "ac.gn",
+ "com.gn",
+ "edu.gn",
+ "gov.gn",
+ "org.gn",
+ "net.gn",
+ "gov",
+ "gp",
+ "com.gp",
+ "net.gp",
+ "mobi.gp",
+ "edu.gp",
+ "org.gp",
+ "asso.gp",
+ "gq",
+ "gr",
+ "com.gr",
+ "edu.gr",
+ "net.gr",
+ "org.gr",
+ "gov.gr",
+ "gs",
+ "gt",
+ "com.gt",
+ "edu.gt",
+ "gob.gt",
+ "ind.gt",
+ "mil.gt",
+ "net.gt",
+ "org.gt",
+ "*.gu",
+ "gw",
+ "gy",
+ "co.gy",
+ "com.gy",
+ "edu.gy",
+ "gov.gy",
+ "net.gy",
+ "org.gy",
+ "hk",
+ "com.hk",
+ "edu.hk",
+ "gov.hk",
+ "idv.hk",
+ "net.hk",
+ "org.hk",
+ "xn--55qx5d.hk",
+ "xn--wcvs22d.hk",
+ "xn--lcvr32d.hk",
+ "xn--mxtq1m.hk",
+ "xn--gmqw5a.hk",
+ "xn--ciqpn.hk",
+ "xn--gmq050i.hk",
+ "xn--zf0avx.hk",
+ "xn--io0a7i.hk",
+ "xn--mk0axi.hk",
+ "xn--od0alg.hk",
+ "xn--od0aq3b.hk",
+ "xn--tn0ag.hk",
+ "xn--uc0atv.hk",
+ "xn--uc0ay4a.hk",
+ "hm",
+ "hn",
+ "com.hn",
+ "edu.hn",
+ "org.hn",
+ "net.hn",
+ "mil.hn",
+ "gob.hn",
+ "hr",
+ "iz.hr",
+ "from.hr",
+ "name.hr",
+ "com.hr",
+ "ht",
+ "com.ht",
+ "shop.ht",
+ "firm.ht",
+ "info.ht",
+ "adult.ht",
+ "net.ht",
+ "pro.ht",
+ "org.ht",
+ "med.ht",
+ "art.ht",
+ "coop.ht",
+ "pol.ht",
+ "asso.ht",
+ "edu.ht",
+ "rel.ht",
+ "gouv.ht",
+ "perso.ht",
+ "hu",
+ "co.hu",
+ "info.hu",
+ "org.hu",
+ "priv.hu",
+ "sport.hu",
+ "tm.hu",
+ "2000.hu",
+ "agrar.hu",
+ "bolt.hu",
+ "casino.hu",
+ "city.hu",
+ "erotica.hu",
+ "erotika.hu",
+ "film.hu",
+ "forum.hu",
+ "games.hu",
+ "hotel.hu",
+ "ingatlan.hu",
+ "jogasz.hu",
+ "konyvelo.hu",
+ "lakas.hu",
+ "media.hu",
+ "news.hu",
+ "reklam.hu",
+ "sex.hu",
+ "shop.hu",
+ "suli.hu",
+ "szex.hu",
+ "tozsde.hu",
+ "utazas.hu",
+ "video.hu",
+ "id",
+ "ac.id",
+ "biz.id",
+ "co.id",
+ "desa.id",
+ "go.id",
+ "mil.id",
+ "my.id",
+ "net.id",
+ "or.id",
+ "sch.id",
+ "web.id",
+ "ie",
+ "gov.ie",
+ "il",
+ "ac.il",
+ "co.il",
+ "gov.il",
+ "idf.il",
+ "k12.il",
+ "muni.il",
+ "net.il",
+ "org.il",
+ "im",
+ "ac.im",
+ "co.im",
+ "com.im",
+ "ltd.co.im",
+ "net.im",
+ "org.im",
+ "plc.co.im",
+ "tt.im",
+ "tv.im",
+ "in",
+ "co.in",
+ "firm.in",
+ "net.in",
+ "org.in",
+ "gen.in",
+ "ind.in",
+ "nic.in",
+ "ac.in",
+ "edu.in",
+ "res.in",
+ "gov.in",
+ "mil.in",
+ "info",
+ "int",
+ "eu.int",
+ "io",
+ "com.io",
+ "iq",
+ "gov.iq",
+ "edu.iq",
+ "mil.iq",
+ "com.iq",
+ "org.iq",
+ "net.iq",
+ "ir",
+ "ac.ir",
+ "co.ir",
+ "gov.ir",
+ "id.ir",
+ "net.ir",
+ "org.ir",
+ "sch.ir",
+ "xn--mgba3a4f16a.ir",
+ "xn--mgba3a4fra.ir",
+ "is",
+ "net.is",
+ "com.is",
+ "edu.is",
+ "gov.is",
+ "org.is",
+ "int.is",
+ "it",
+ "gov.it",
+ "edu.it",
+ "abr.it",
+ "abruzzo.it",
+ "aosta-valley.it",
+ "aostavalley.it",
+ "bas.it",
+ "basilicata.it",
+ "cal.it",
+ "calabria.it",
+ "cam.it",
+ "campania.it",
+ "emilia-romagna.it",
+ "emiliaromagna.it",
+ "emr.it",
+ "friuli-v-giulia.it",
+ "friuli-ve-giulia.it",
+ "friuli-vegiulia.it",
+ "friuli-venezia-giulia.it",
+ "friuli-veneziagiulia.it",
+ "friuli-vgiulia.it",
+ "friuliv-giulia.it",
+ "friulive-giulia.it",
+ "friulivegiulia.it",
+ "friulivenezia-giulia.it",
+ "friuliveneziagiulia.it",
+ "friulivgiulia.it",
+ "fvg.it",
+ "laz.it",
+ "lazio.it",
+ "lig.it",
+ "liguria.it",
+ "lom.it",
+ "lombardia.it",
+ "lombardy.it",
+ "lucania.it",
+ "mar.it",
+ "marche.it",
+ "mol.it",
+ "molise.it",
+ "piedmont.it",
+ "piemonte.it",
+ "pmn.it",
+ "pug.it",
+ "puglia.it",
+ "sar.it",
+ "sardegna.it",
+ "sardinia.it",
+ "sic.it",
+ "sicilia.it",
+ "sicily.it",
+ "taa.it",
+ "tos.it",
+ "toscana.it",
+ "trentino-a-adige.it",
+ "trentino-aadige.it",
+ "trentino-alto-adige.it",
+ "trentino-altoadige.it",
+ "trentino-s-tirol.it",
+ "trentino-stirol.it",
+ "trentino-sud-tirol.it",
+ "trentino-sudtirol.it",
+ "trentino-sued-tirol.it",
+ "trentino-suedtirol.it",
+ "trentinoa-adige.it",
+ "trentinoaadige.it",
+ "trentinoalto-adige.it",
+ "trentinoaltoadige.it",
+ "trentinos-tirol.it",
+ "trentinostirol.it",
+ "trentinosud-tirol.it",
+ "trentinosudtirol.it",
+ "trentinosued-tirol.it",
+ "trentinosuedtirol.it",
+ "tuscany.it",
+ "umb.it",
+ "umbria.it",
+ "val-d-aosta.it",
+ "val-daosta.it",
+ "vald-aosta.it",
+ "valdaosta.it",
+ "valle-aosta.it",
+ "valle-d-aosta.it",
+ "valle-daosta.it",
+ "valleaosta.it",
+ "valled-aosta.it",
+ "valledaosta.it",
+ "vallee-aoste.it",
+ "valleeaoste.it",
+ "vao.it",
+ "vda.it",
+ "ven.it",
+ "veneto.it",
+ "ag.it",
+ "agrigento.it",
+ "al.it",
+ "alessandria.it",
+ "alto-adige.it",
+ "altoadige.it",
+ "an.it",
+ "ancona.it",
+ "andria-barletta-trani.it",
+ "andria-trani-barletta.it",
+ "andriabarlettatrani.it",
+ "andriatranibarletta.it",
+ "ao.it",
+ "aosta.it",
+ "aoste.it",
+ "ap.it",
+ "aq.it",
+ "aquila.it",
+ "ar.it",
+ "arezzo.it",
+ "ascoli-piceno.it",
+ "ascolipiceno.it",
+ "asti.it",
+ "at.it",
+ "av.it",
+ "avellino.it",
+ "ba.it",
+ "balsan.it",
+ "bari.it",
+ "barletta-trani-andria.it",
+ "barlettatraniandria.it",
+ "belluno.it",
+ "benevento.it",
+ "bergamo.it",
+ "bg.it",
+ "bi.it",
+ "biella.it",
+ "bl.it",
+ "bn.it",
+ "bo.it",
+ "bologna.it",
+ "bolzano.it",
+ "bozen.it",
+ "br.it",
+ "brescia.it",
+ "brindisi.it",
+ "bs.it",
+ "bt.it",
+ "bz.it",
+ "ca.it",
+ "cagliari.it",
+ "caltanissetta.it",
+ "campidano-medio.it",
+ "campidanomedio.it",
+ "campobasso.it",
+ "carbonia-iglesias.it",
+ "carboniaiglesias.it",
+ "carrara-massa.it",
+ "carraramassa.it",
+ "caserta.it",
+ "catania.it",
+ "catanzaro.it",
+ "cb.it",
+ "ce.it",
+ "cesena-forli.it",
+ "cesenaforli.it",
+ "ch.it",
+ "chieti.it",
+ "ci.it",
+ "cl.it",
+ "cn.it",
+ "co.it",
+ "como.it",
+ "cosenza.it",
+ "cr.it",
+ "cremona.it",
+ "crotone.it",
+ "cs.it",
+ "ct.it",
+ "cuneo.it",
+ "cz.it",
+ "dell-ogliastra.it",
+ "dellogliastra.it",
+ "en.it",
+ "enna.it",
+ "fc.it",
+ "fe.it",
+ "fermo.it",
+ "ferrara.it",
+ "fg.it",
+ "fi.it",
+ "firenze.it",
+ "florence.it",
+ "fm.it",
+ "foggia.it",
+ "forli-cesena.it",
+ "forlicesena.it",
+ "fr.it",
+ "frosinone.it",
+ "ge.it",
+ "genoa.it",
+ "genova.it",
+ "go.it",
+ "gorizia.it",
+ "gr.it",
+ "grosseto.it",
+ "iglesias-carbonia.it",
+ "iglesiascarbonia.it",
+ "im.it",
+ "imperia.it",
+ "is.it",
+ "isernia.it",
+ "kr.it",
+ "la-spezia.it",
+ "laquila.it",
+ "laspezia.it",
+ "latina.it",
+ "lc.it",
+ "le.it",
+ "lecce.it",
+ "lecco.it",
+ "li.it",
+ "livorno.it",
+ "lo.it",
+ "lodi.it",
+ "lt.it",
+ "lu.it",
+ "lucca.it",
+ "macerata.it",
+ "mantova.it",
+ "massa-carrara.it",
+ "massacarrara.it",
+ "matera.it",
+ "mb.it",
+ "mc.it",
+ "me.it",
+ "medio-campidano.it",
+ "mediocampidano.it",
+ "messina.it",
+ "mi.it",
+ "milan.it",
+ "milano.it",
+ "mn.it",
+ "mo.it",
+ "modena.it",
+ "monza-brianza.it",
+ "monza-e-della-brianza.it",
+ "monza.it",
+ "monzabrianza.it",
+ "monzaebrianza.it",
+ "monzaedellabrianza.it",
+ "ms.it",
+ "mt.it",
+ "na.it",
+ "naples.it",
+ "napoli.it",
+ "no.it",
+ "novara.it",
+ "nu.it",
+ "nuoro.it",
+ "og.it",
+ "ogliastra.it",
+ "olbia-tempio.it",
+ "olbiatempio.it",
+ "or.it",
+ "oristano.it",
+ "ot.it",
+ "pa.it",
+ "padova.it",
+ "padua.it",
+ "palermo.it",
+ "parma.it",
+ "pavia.it",
+ "pc.it",
+ "pd.it",
+ "pe.it",
+ "perugia.it",
+ "pesaro-urbino.it",
+ "pesarourbino.it",
+ "pescara.it",
+ "pg.it",
+ "pi.it",
+ "piacenza.it",
+ "pisa.it",
+ "pistoia.it",
+ "pn.it",
+ "po.it",
+ "pordenone.it",
+ "potenza.it",
+ "pr.it",
+ "prato.it",
+ "pt.it",
+ "pu.it",
+ "pv.it",
+ "pz.it",
+ "ra.it",
+ "ragusa.it",
+ "ravenna.it",
+ "rc.it",
+ "re.it",
+ "reggio-calabria.it",
+ "reggio-emilia.it",
+ "reggiocalabria.it",
+ "reggioemilia.it",
+ "rg.it",
+ "ri.it",
+ "rieti.it",
+ "rimini.it",
+ "rm.it",
+ "rn.it",
+ "ro.it",
+ "roma.it",
+ "rome.it",
+ "rovigo.it",
+ "sa.it",
+ "salerno.it",
+ "sassari.it",
+ "savona.it",
+ "si.it",
+ "siena.it",
+ "siracusa.it",
+ "so.it",
+ "sondrio.it",
+ "sp.it",
+ "sr.it",
+ "ss.it",
+ "suedtirol.it",
+ "sv.it",
+ "ta.it",
+ "taranto.it",
+ "te.it",
+ "tempio-olbia.it",
+ "tempioolbia.it",
+ "teramo.it",
+ "terni.it",
+ "tn.it",
+ "to.it",
+ "torino.it",
+ "tp.it",
+ "tr.it",
+ "trani-andria-barletta.it",
+ "trani-barletta-andria.it",
+ "traniandriabarletta.it",
+ "tranibarlettaandria.it",
+ "trapani.it",
+ "trentino.it",
+ "trento.it",
+ "treviso.it",
+ "trieste.it",
+ "ts.it",
+ "turin.it",
+ "tv.it",
+ "ud.it",
+ "udine.it",
+ "urbino-pesaro.it",
+ "urbinopesaro.it",
+ "va.it",
+ "varese.it",
+ "vb.it",
+ "vc.it",
+ "ve.it",
+ "venezia.it",
+ "venice.it",
+ "verbania.it",
+ "vercelli.it",
+ "verona.it",
+ "vi.it",
+ "vibo-valentia.it",
+ "vibovalentia.it",
+ "vicenza.it",
+ "viterbo.it",
+ "vr.it",
+ "vs.it",
+ "vt.it",
+ "vv.it",
+ "je",
+ "co.je",
+ "net.je",
+ "org.je",
+ "*.jm",
+ "jo",
+ "com.jo",
+ "org.jo",
+ "net.jo",
+ "edu.jo",
+ "sch.jo",
+ "gov.jo",
+ "mil.jo",
+ "name.jo",
+ "jobs",
+ "jp",
+ "ac.jp",
+ "ad.jp",
+ "co.jp",
+ "ed.jp",
+ "go.jp",
+ "gr.jp",
+ "lg.jp",
+ "ne.jp",
+ "or.jp",
+ "aichi.jp",
+ "akita.jp",
+ "aomori.jp",
+ "chiba.jp",
+ "ehime.jp",
+ "fukui.jp",
+ "fukuoka.jp",
+ "fukushima.jp",
+ "gifu.jp",
+ "gunma.jp",
+ "hiroshima.jp",
+ "hokkaido.jp",
+ "hyogo.jp",
+ "ibaraki.jp",
+ "ishikawa.jp",
+ "iwate.jp",
+ "kagawa.jp",
+ "kagoshima.jp",
+ "kanagawa.jp",
+ "kochi.jp",
+ "kumamoto.jp",
+ "kyoto.jp",
+ "mie.jp",
+ "miyagi.jp",
+ "miyazaki.jp",
+ "nagano.jp",
+ "nagasaki.jp",
+ "nara.jp",
+ "niigata.jp",
+ "oita.jp",
+ "okayama.jp",
+ "okinawa.jp",
+ "osaka.jp",
+ "saga.jp",
+ "saitama.jp",
+ "shiga.jp",
+ "shimane.jp",
+ "shizuoka.jp",
+ "tochigi.jp",
+ "tokushima.jp",
+ "tokyo.jp",
+ "tottori.jp",
+ "toyama.jp",
+ "wakayama.jp",
+ "yamagata.jp",
+ "yamaguchi.jp",
+ "yamanashi.jp",
+ "xn--4pvxs.jp",
+ "xn--vgu402c.jp",
+ "xn--c3s14m.jp",
+ "xn--f6qx53a.jp",
+ "xn--8pvr4u.jp",
+ "xn--uist22h.jp",
+ "xn--djrs72d6uy.jp",
+ "xn--mkru45i.jp",
+ "xn--0trq7p7nn.jp",
+ "xn--8ltr62k.jp",
+ "xn--2m4a15e.jp",
+ "xn--efvn9s.jp",
+ "xn--32vp30h.jp",
+ "xn--4it797k.jp",
+ "xn--1lqs71d.jp",
+ "xn--5rtp49c.jp",
+ "xn--5js045d.jp",
+ "xn--ehqz56n.jp",
+ "xn--1lqs03n.jp",
+ "xn--qqqt11m.jp",
+ "xn--kbrq7o.jp",
+ "xn--pssu33l.jp",
+ "xn--ntsq17g.jp",
+ "xn--uisz3g.jp",
+ "xn--6btw5a.jp",
+ "xn--1ctwo.jp",
+ "xn--6orx2r.jp",
+ "xn--rht61e.jp",
+ "xn--rht27z.jp",
+ "xn--djty4k.jp",
+ "xn--nit225k.jp",
+ "xn--rht3d.jp",
+ "xn--klty5x.jp",
+ "xn--kltx9a.jp",
+ "xn--kltp7d.jp",
+ "xn--uuwu58a.jp",
+ "xn--zbx025d.jp",
+ "xn--ntso0iqx3a.jp",
+ "xn--elqq16h.jp",
+ "xn--4it168d.jp",
+ "xn--klt787d.jp",
+ "xn--rny31h.jp",
+ "xn--7t0a264c.jp",
+ "xn--5rtq34k.jp",
+ "xn--k7yn95e.jp",
+ "xn--tor131o.jp",
+ "xn--d5qv7z876c.jp",
+ "*.kawasaki.jp",
+ "*.kitakyushu.jp",
+ "*.kobe.jp",
+ "*.nagoya.jp",
+ "*.sapporo.jp",
+ "*.sendai.jp",
+ "*.yokohama.jp",
+ "!city.kawasaki.jp",
+ "!city.kitakyushu.jp",
+ "!city.kobe.jp",
+ "!city.nagoya.jp",
+ "!city.sapporo.jp",
+ "!city.sendai.jp",
+ "!city.yokohama.jp",
+ "aisai.aichi.jp",
+ "ama.aichi.jp",
+ "anjo.aichi.jp",
+ "asuke.aichi.jp",
+ "chiryu.aichi.jp",
+ "chita.aichi.jp",
+ "fuso.aichi.jp",
+ "gamagori.aichi.jp",
+ "handa.aichi.jp",
+ "hazu.aichi.jp",
+ "hekinan.aichi.jp",
+ "higashiura.aichi.jp",
+ "ichinomiya.aichi.jp",
+ "inazawa.aichi.jp",
+ "inuyama.aichi.jp",
+ "isshiki.aichi.jp",
+ "iwakura.aichi.jp",
+ "kanie.aichi.jp",
+ "kariya.aichi.jp",
+ "kasugai.aichi.jp",
+ "kira.aichi.jp",
+ "kiyosu.aichi.jp",
+ "komaki.aichi.jp",
+ "konan.aichi.jp",
+ "kota.aichi.jp",
+ "mihama.aichi.jp",
+ "miyoshi.aichi.jp",
+ "nishio.aichi.jp",
+ "nisshin.aichi.jp",
+ "obu.aichi.jp",
+ "oguchi.aichi.jp",
+ "oharu.aichi.jp",
+ "okazaki.aichi.jp",
+ "owariasahi.aichi.jp",
+ "seto.aichi.jp",
+ "shikatsu.aichi.jp",
+ "shinshiro.aichi.jp",
+ "shitara.aichi.jp",
+ "tahara.aichi.jp",
+ "takahama.aichi.jp",
+ "tobishima.aichi.jp",
+ "toei.aichi.jp",
+ "togo.aichi.jp",
+ "tokai.aichi.jp",
+ "tokoname.aichi.jp",
+ "toyoake.aichi.jp",
+ "toyohashi.aichi.jp",
+ "toyokawa.aichi.jp",
+ "toyone.aichi.jp",
+ "toyota.aichi.jp",
+ "tsushima.aichi.jp",
+ "yatomi.aichi.jp",
+ "akita.akita.jp",
+ "daisen.akita.jp",
+ "fujisato.akita.jp",
+ "gojome.akita.jp",
+ "hachirogata.akita.jp",
+ "happou.akita.jp",
+ "higashinaruse.akita.jp",
+ "honjo.akita.jp",
+ "honjyo.akita.jp",
+ "ikawa.akita.jp",
+ "kamikoani.akita.jp",
+ "kamioka.akita.jp",
+ "katagami.akita.jp",
+ "kazuno.akita.jp",
+ "kitaakita.akita.jp",
+ "kosaka.akita.jp",
+ "kyowa.akita.jp",
+ "misato.akita.jp",
+ "mitane.akita.jp",
+ "moriyoshi.akita.jp",
+ "nikaho.akita.jp",
+ "noshiro.akita.jp",
+ "odate.akita.jp",
+ "oga.akita.jp",
+ "ogata.akita.jp",
+ "semboku.akita.jp",
+ "yokote.akita.jp",
+ "yurihonjo.akita.jp",
+ "aomori.aomori.jp",
+ "gonohe.aomori.jp",
+ "hachinohe.aomori.jp",
+ "hashikami.aomori.jp",
+ "hiranai.aomori.jp",
+ "hirosaki.aomori.jp",
+ "itayanagi.aomori.jp",
+ "kuroishi.aomori.jp",
+ "misawa.aomori.jp",
+ "mutsu.aomori.jp",
+ "nakadomari.aomori.jp",
+ "noheji.aomori.jp",
+ "oirase.aomori.jp",
+ "owani.aomori.jp",
+ "rokunohe.aomori.jp",
+ "sannohe.aomori.jp",
+ "shichinohe.aomori.jp",
+ "shingo.aomori.jp",
+ "takko.aomori.jp",
+ "towada.aomori.jp",
+ "tsugaru.aomori.jp",
+ "tsuruta.aomori.jp",
+ "abiko.chiba.jp",
+ "asahi.chiba.jp",
+ "chonan.chiba.jp",
+ "chosei.chiba.jp",
+ "choshi.chiba.jp",
+ "chuo.chiba.jp",
+ "funabashi.chiba.jp",
+ "futtsu.chiba.jp",
+ "hanamigawa.chiba.jp",
+ "ichihara.chiba.jp",
+ "ichikawa.chiba.jp",
+ "ichinomiya.chiba.jp",
+ "inzai.chiba.jp",
+ "isumi.chiba.jp",
+ "kamagaya.chiba.jp",
+ "kamogawa.chiba.jp",
+ "kashiwa.chiba.jp",
+ "katori.chiba.jp",
+ "katsuura.chiba.jp",
+ "kimitsu.chiba.jp",
+ "kisarazu.chiba.jp",
+ "kozaki.chiba.jp",
+ "kujukuri.chiba.jp",
+ "kyonan.chiba.jp",
+ "matsudo.chiba.jp",
+ "midori.chiba.jp",
+ "mihama.chiba.jp",
+ "minamiboso.chiba.jp",
+ "mobara.chiba.jp",
+ "mutsuzawa.chiba.jp",
+ "nagara.chiba.jp",
+ "nagareyama.chiba.jp",
+ "narashino.chiba.jp",
+ "narita.chiba.jp",
+ "noda.chiba.jp",
+ "oamishirasato.chiba.jp",
+ "omigawa.chiba.jp",
+ "onjuku.chiba.jp",
+ "otaki.chiba.jp",
+ "sakae.chiba.jp",
+ "sakura.chiba.jp",
+ "shimofusa.chiba.jp",
+ "shirako.chiba.jp",
+ "shiroi.chiba.jp",
+ "shisui.chiba.jp",
+ "sodegaura.chiba.jp",
+ "sosa.chiba.jp",
+ "tako.chiba.jp",
+ "tateyama.chiba.jp",
+ "togane.chiba.jp",
+ "tohnosho.chiba.jp",
+ "tomisato.chiba.jp",
+ "urayasu.chiba.jp",
+ "yachimata.chiba.jp",
+ "yachiyo.chiba.jp",
+ "yokaichiba.chiba.jp",
+ "yokoshibahikari.chiba.jp",
+ "yotsukaido.chiba.jp",
+ "ainan.ehime.jp",
+ "honai.ehime.jp",
+ "ikata.ehime.jp",
+ "imabari.ehime.jp",
+ "iyo.ehime.jp",
+ "kamijima.ehime.jp",
+ "kihoku.ehime.jp",
+ "kumakogen.ehime.jp",
+ "masaki.ehime.jp",
+ "matsuno.ehime.jp",
+ "matsuyama.ehime.jp",
+ "namikata.ehime.jp",
+ "niihama.ehime.jp",
+ "ozu.ehime.jp",
+ "saijo.ehime.jp",
+ "seiyo.ehime.jp",
+ "shikokuchuo.ehime.jp",
+ "tobe.ehime.jp",
+ "toon.ehime.jp",
+ "uchiko.ehime.jp",
+ "uwajima.ehime.jp",
+ "yawatahama.ehime.jp",
+ "echizen.fukui.jp",
+ "eiheiji.fukui.jp",
+ "fukui.fukui.jp",
+ "ikeda.fukui.jp",
+ "katsuyama.fukui.jp",
+ "mihama.fukui.jp",
+ "minamiechizen.fukui.jp",
+ "obama.fukui.jp",
+ "ohi.fukui.jp",
+ "ono.fukui.jp",
+ "sabae.fukui.jp",
+ "sakai.fukui.jp",
+ "takahama.fukui.jp",
+ "tsuruga.fukui.jp",
+ "wakasa.fukui.jp",
+ "ashiya.fukuoka.jp",
+ "buzen.fukuoka.jp",
+ "chikugo.fukuoka.jp",
+ "chikuho.fukuoka.jp",
+ "chikujo.fukuoka.jp",
+ "chikushino.fukuoka.jp",
+ "chikuzen.fukuoka.jp",
+ "chuo.fukuoka.jp",
+ "dazaifu.fukuoka.jp",
+ "fukuchi.fukuoka.jp",
+ "hakata.fukuoka.jp",
+ "higashi.fukuoka.jp",
+ "hirokawa.fukuoka.jp",
+ "hisayama.fukuoka.jp",
+ "iizuka.fukuoka.jp",
+ "inatsuki.fukuoka.jp",
+ "kaho.fukuoka.jp",
+ "kasuga.fukuoka.jp",
+ "kasuya.fukuoka.jp",
+ "kawara.fukuoka.jp",
+ "keisen.fukuoka.jp",
+ "koga.fukuoka.jp",
+ "kurate.fukuoka.jp",
+ "kurogi.fukuoka.jp",
+ "kurume.fukuoka.jp",
+ "minami.fukuoka.jp",
+ "miyako.fukuoka.jp",
+ "miyama.fukuoka.jp",
+ "miyawaka.fukuoka.jp",
+ "mizumaki.fukuoka.jp",
+ "munakata.fukuoka.jp",
+ "nakagawa.fukuoka.jp",
+ "nakama.fukuoka.jp",
+ "nishi.fukuoka.jp",
+ "nogata.fukuoka.jp",
+ "ogori.fukuoka.jp",
+ "okagaki.fukuoka.jp",
+ "okawa.fukuoka.jp",
+ "oki.fukuoka.jp",
+ "omuta.fukuoka.jp",
+ "onga.fukuoka.jp",
+ "onojo.fukuoka.jp",
+ "oto.fukuoka.jp",
+ "saigawa.fukuoka.jp",
+ "sasaguri.fukuoka.jp",
+ "shingu.fukuoka.jp",
+ "shinyoshitomi.fukuoka.jp",
+ "shonai.fukuoka.jp",
+ "soeda.fukuoka.jp",
+ "sue.fukuoka.jp",
+ "tachiarai.fukuoka.jp",
+ "tagawa.fukuoka.jp",
+ "takata.fukuoka.jp",
+ "toho.fukuoka.jp",
+ "toyotsu.fukuoka.jp",
+ "tsuiki.fukuoka.jp",
+ "ukiha.fukuoka.jp",
+ "umi.fukuoka.jp",
+ "usui.fukuoka.jp",
+ "yamada.fukuoka.jp",
+ "yame.fukuoka.jp",
+ "yanagawa.fukuoka.jp",
+ "yukuhashi.fukuoka.jp",
+ "aizubange.fukushima.jp",
+ "aizumisato.fukushima.jp",
+ "aizuwakamatsu.fukushima.jp",
+ "asakawa.fukushima.jp",
+ "bandai.fukushima.jp",
+ "date.fukushima.jp",
+ "fukushima.fukushima.jp",
+ "furudono.fukushima.jp",
+ "futaba.fukushima.jp",
+ "hanawa.fukushima.jp",
+ "higashi.fukushima.jp",
+ "hirata.fukushima.jp",
+ "hirono.fukushima.jp",
+ "iitate.fukushima.jp",
+ "inawashiro.fukushima.jp",
+ "ishikawa.fukushima.jp",
+ "iwaki.fukushima.jp",
+ "izumizaki.fukushima.jp",
+ "kagamiishi.fukushima.jp",
+ "kaneyama.fukushima.jp",
+ "kawamata.fukushima.jp",
+ "kitakata.fukushima.jp",
+ "kitashiobara.fukushima.jp",
+ "koori.fukushima.jp",
+ "koriyama.fukushima.jp",
+ "kunimi.fukushima.jp",
+ "miharu.fukushima.jp",
+ "mishima.fukushima.jp",
+ "namie.fukushima.jp",
+ "nango.fukushima.jp",
+ "nishiaizu.fukushima.jp",
+ "nishigo.fukushima.jp",
+ "okuma.fukushima.jp",
+ "omotego.fukushima.jp",
+ "ono.fukushima.jp",
+ "otama.fukushima.jp",
+ "samegawa.fukushima.jp",
+ "shimogo.fukushima.jp",
+ "shirakawa.fukushima.jp",
+ "showa.fukushima.jp",
+ "soma.fukushima.jp",
+ "sukagawa.fukushima.jp",
+ "taishin.fukushima.jp",
+ "tamakawa.fukushima.jp",
+ "tanagura.fukushima.jp",
+ "tenei.fukushima.jp",
+ "yabuki.fukushima.jp",
+ "yamato.fukushima.jp",
+ "yamatsuri.fukushima.jp",
+ "yanaizu.fukushima.jp",
+ "yugawa.fukushima.jp",
+ "anpachi.gifu.jp",
+ "ena.gifu.jp",
+ "gifu.gifu.jp",
+ "ginan.gifu.jp",
+ "godo.gifu.jp",
+ "gujo.gifu.jp",
+ "hashima.gifu.jp",
+ "hichiso.gifu.jp",
+ "hida.gifu.jp",
+ "higashishirakawa.gifu.jp",
+ "ibigawa.gifu.jp",
+ "ikeda.gifu.jp",
+ "kakamigahara.gifu.jp",
+ "kani.gifu.jp",
+ "kasahara.gifu.jp",
+ "kasamatsu.gifu.jp",
+ "kawaue.gifu.jp",
+ "kitagata.gifu.jp",
+ "mino.gifu.jp",
+ "minokamo.gifu.jp",
+ "mitake.gifu.jp",
+ "mizunami.gifu.jp",
+ "motosu.gifu.jp",
+ "nakatsugawa.gifu.jp",
+ "ogaki.gifu.jp",
+ "sakahogi.gifu.jp",
+ "seki.gifu.jp",
+ "sekigahara.gifu.jp",
+ "shirakawa.gifu.jp",
+ "tajimi.gifu.jp",
+ "takayama.gifu.jp",
+ "tarui.gifu.jp",
+ "toki.gifu.jp",
+ "tomika.gifu.jp",
+ "wanouchi.gifu.jp",
+ "yamagata.gifu.jp",
+ "yaotsu.gifu.jp",
+ "yoro.gifu.jp",
+ "annaka.gunma.jp",
+ "chiyoda.gunma.jp",
+ "fujioka.gunma.jp",
+ "higashiagatsuma.gunma.jp",
+ "isesaki.gunma.jp",
+ "itakura.gunma.jp",
+ "kanna.gunma.jp",
+ "kanra.gunma.jp",
+ "katashina.gunma.jp",
+ "kawaba.gunma.jp",
+ "kiryu.gunma.jp",
+ "kusatsu.gunma.jp",
+ "maebashi.gunma.jp",
+ "meiwa.gunma.jp",
+ "midori.gunma.jp",
+ "minakami.gunma.jp",
+ "naganohara.gunma.jp",
+ "nakanojo.gunma.jp",
+ "nanmoku.gunma.jp",
+ "numata.gunma.jp",
+ "oizumi.gunma.jp",
+ "ora.gunma.jp",
+ "ota.gunma.jp",
+ "shibukawa.gunma.jp",
+ "shimonita.gunma.jp",
+ "shinto.gunma.jp",
+ "showa.gunma.jp",
+ "takasaki.gunma.jp",
+ "takayama.gunma.jp",
+ "tamamura.gunma.jp",
+ "tatebayashi.gunma.jp",
+ "tomioka.gunma.jp",
+ "tsukiyono.gunma.jp",
+ "tsumagoi.gunma.jp",
+ "ueno.gunma.jp",
+ "yoshioka.gunma.jp",
+ "asaminami.hiroshima.jp",
+ "daiwa.hiroshima.jp",
+ "etajima.hiroshima.jp",
+ "fuchu.hiroshima.jp",
+ "fukuyama.hiroshima.jp",
+ "hatsukaichi.hiroshima.jp",
+ "higashihiroshima.hiroshima.jp",
+ "hongo.hiroshima.jp",
+ "jinsekikogen.hiroshima.jp",
+ "kaita.hiroshima.jp",
+ "kui.hiroshima.jp",
+ "kumano.hiroshima.jp",
+ "kure.hiroshima.jp",
+ "mihara.hiroshima.jp",
+ "miyoshi.hiroshima.jp",
+ "naka.hiroshima.jp",
+ "onomichi.hiroshima.jp",
+ "osakikamijima.hiroshima.jp",
+ "otake.hiroshima.jp",
+ "saka.hiroshima.jp",
+ "sera.hiroshima.jp",
+ "seranishi.hiroshima.jp",
+ "shinichi.hiroshima.jp",
+ "shobara.hiroshima.jp",
+ "takehara.hiroshima.jp",
+ "abashiri.hokkaido.jp",
+ "abira.hokkaido.jp",
+ "aibetsu.hokkaido.jp",
+ "akabira.hokkaido.jp",
+ "akkeshi.hokkaido.jp",
+ "asahikawa.hokkaido.jp",
+ "ashibetsu.hokkaido.jp",
+ "ashoro.hokkaido.jp",
+ "assabu.hokkaido.jp",
+ "atsuma.hokkaido.jp",
+ "bibai.hokkaido.jp",
+ "biei.hokkaido.jp",
+ "bifuka.hokkaido.jp",
+ "bihoro.hokkaido.jp",
+ "biratori.hokkaido.jp",
+ "chippubetsu.hokkaido.jp",
+ "chitose.hokkaido.jp",
+ "date.hokkaido.jp",
+ "ebetsu.hokkaido.jp",
+ "embetsu.hokkaido.jp",
+ "eniwa.hokkaido.jp",
+ "erimo.hokkaido.jp",
+ "esan.hokkaido.jp",
+ "esashi.hokkaido.jp",
+ "fukagawa.hokkaido.jp",
+ "fukushima.hokkaido.jp",
+ "furano.hokkaido.jp",
+ "furubira.hokkaido.jp",
+ "haboro.hokkaido.jp",
+ "hakodate.hokkaido.jp",
+ "hamatonbetsu.hokkaido.jp",
+ "hidaka.hokkaido.jp",
+ "higashikagura.hokkaido.jp",
+ "higashikawa.hokkaido.jp",
+ "hiroo.hokkaido.jp",
+ "hokuryu.hokkaido.jp",
+ "hokuto.hokkaido.jp",
+ "honbetsu.hokkaido.jp",
+ "horokanai.hokkaido.jp",
+ "horonobe.hokkaido.jp",
+ "ikeda.hokkaido.jp",
+ "imakane.hokkaido.jp",
+ "ishikari.hokkaido.jp",
+ "iwamizawa.hokkaido.jp",
+ "iwanai.hokkaido.jp",
+ "kamifurano.hokkaido.jp",
+ "kamikawa.hokkaido.jp",
+ "kamishihoro.hokkaido.jp",
+ "kamisunagawa.hokkaido.jp",
+ "kamoenai.hokkaido.jp",
+ "kayabe.hokkaido.jp",
+ "kembuchi.hokkaido.jp",
+ "kikonai.hokkaido.jp",
+ "kimobetsu.hokkaido.jp",
+ "kitahiroshima.hokkaido.jp",
+ "kitami.hokkaido.jp",
+ "kiyosato.hokkaido.jp",
+ "koshimizu.hokkaido.jp",
+ "kunneppu.hokkaido.jp",
+ "kuriyama.hokkaido.jp",
+ "kuromatsunai.hokkaido.jp",
+ "kushiro.hokkaido.jp",
+ "kutchan.hokkaido.jp",
+ "kyowa.hokkaido.jp",
+ "mashike.hokkaido.jp",
+ "matsumae.hokkaido.jp",
+ "mikasa.hokkaido.jp",
+ "minamifurano.hokkaido.jp",
+ "mombetsu.hokkaido.jp",
+ "moseushi.hokkaido.jp",
+ "mukawa.hokkaido.jp",
+ "muroran.hokkaido.jp",
+ "naie.hokkaido.jp",
+ "nakagawa.hokkaido.jp",
+ "nakasatsunai.hokkaido.jp",
+ "nakatombetsu.hokkaido.jp",
+ "nanae.hokkaido.jp",
+ "nanporo.hokkaido.jp",
+ "nayoro.hokkaido.jp",
+ "nemuro.hokkaido.jp",
+ "niikappu.hokkaido.jp",
+ "niki.hokkaido.jp",
+ "nishiokoppe.hokkaido.jp",
+ "noboribetsu.hokkaido.jp",
+ "numata.hokkaido.jp",
+ "obihiro.hokkaido.jp",
+ "obira.hokkaido.jp",
+ "oketo.hokkaido.jp",
+ "okoppe.hokkaido.jp",
+ "otaru.hokkaido.jp",
+ "otobe.hokkaido.jp",
+ "otofuke.hokkaido.jp",
+ "otoineppu.hokkaido.jp",
+ "oumu.hokkaido.jp",
+ "ozora.hokkaido.jp",
+ "pippu.hokkaido.jp",
+ "rankoshi.hokkaido.jp",
+ "rebun.hokkaido.jp",
+ "rikubetsu.hokkaido.jp",
+ "rishiri.hokkaido.jp",
+ "rishirifuji.hokkaido.jp",
+ "saroma.hokkaido.jp",
+ "sarufutsu.hokkaido.jp",
+ "shakotan.hokkaido.jp",
+ "shari.hokkaido.jp",
+ "shibecha.hokkaido.jp",
+ "shibetsu.hokkaido.jp",
+ "shikabe.hokkaido.jp",
+ "shikaoi.hokkaido.jp",
+ "shimamaki.hokkaido.jp",
+ "shimizu.hokkaido.jp",
+ "shimokawa.hokkaido.jp",
+ "shinshinotsu.hokkaido.jp",
+ "shintoku.hokkaido.jp",
+ "shiranuka.hokkaido.jp",
+ "shiraoi.hokkaido.jp",
+ "shiriuchi.hokkaido.jp",
+ "sobetsu.hokkaido.jp",
+ "sunagawa.hokkaido.jp",
+ "taiki.hokkaido.jp",
+ "takasu.hokkaido.jp",
+ "takikawa.hokkaido.jp",
+ "takinoue.hokkaido.jp",
+ "teshikaga.hokkaido.jp",
+ "tobetsu.hokkaido.jp",
+ "tohma.hokkaido.jp",
+ "tomakomai.hokkaido.jp",
+ "tomari.hokkaido.jp",
+ "toya.hokkaido.jp",
+ "toyako.hokkaido.jp",
+ "toyotomi.hokkaido.jp",
+ "toyoura.hokkaido.jp",
+ "tsubetsu.hokkaido.jp",
+ "tsukigata.hokkaido.jp",
+ "urakawa.hokkaido.jp",
+ "urausu.hokkaido.jp",
+ "uryu.hokkaido.jp",
+ "utashinai.hokkaido.jp",
+ "wakkanai.hokkaido.jp",
+ "wassamu.hokkaido.jp",
+ "yakumo.hokkaido.jp",
+ "yoichi.hokkaido.jp",
+ "aioi.hyogo.jp",
+ "akashi.hyogo.jp",
+ "ako.hyogo.jp",
+ "amagasaki.hyogo.jp",
+ "aogaki.hyogo.jp",
+ "asago.hyogo.jp",
+ "ashiya.hyogo.jp",
+ "awaji.hyogo.jp",
+ "fukusaki.hyogo.jp",
+ "goshiki.hyogo.jp",
+ "harima.hyogo.jp",
+ "himeji.hyogo.jp",
+ "ichikawa.hyogo.jp",
+ "inagawa.hyogo.jp",
+ "itami.hyogo.jp",
+ "kakogawa.hyogo.jp",
+ "kamigori.hyogo.jp",
+ "kamikawa.hyogo.jp",
+ "kasai.hyogo.jp",
+ "kasuga.hyogo.jp",
+ "kawanishi.hyogo.jp",
+ "miki.hyogo.jp",
+ "minamiawaji.hyogo.jp",
+ "nishinomiya.hyogo.jp",
+ "nishiwaki.hyogo.jp",
+ "ono.hyogo.jp",
+ "sanda.hyogo.jp",
+ "sannan.hyogo.jp",
+ "sasayama.hyogo.jp",
+ "sayo.hyogo.jp",
+ "shingu.hyogo.jp",
+ "shinonsen.hyogo.jp",
+ "shiso.hyogo.jp",
+ "sumoto.hyogo.jp",
+ "taishi.hyogo.jp",
+ "taka.hyogo.jp",
+ "takarazuka.hyogo.jp",
+ "takasago.hyogo.jp",
+ "takino.hyogo.jp",
+ "tamba.hyogo.jp",
+ "tatsuno.hyogo.jp",
+ "toyooka.hyogo.jp",
+ "yabu.hyogo.jp",
+ "yashiro.hyogo.jp",
+ "yoka.hyogo.jp",
+ "yokawa.hyogo.jp",
+ "ami.ibaraki.jp",
+ "asahi.ibaraki.jp",
+ "bando.ibaraki.jp",
+ "chikusei.ibaraki.jp",
+ "daigo.ibaraki.jp",
+ "fujishiro.ibaraki.jp",
+ "hitachi.ibaraki.jp",
+ "hitachinaka.ibaraki.jp",
+ "hitachiomiya.ibaraki.jp",
+ "hitachiota.ibaraki.jp",
+ "ibaraki.ibaraki.jp",
+ "ina.ibaraki.jp",
+ "inashiki.ibaraki.jp",
+ "itako.ibaraki.jp",
+ "iwama.ibaraki.jp",
+ "joso.ibaraki.jp",
+ "kamisu.ibaraki.jp",
+ "kasama.ibaraki.jp",
+ "kashima.ibaraki.jp",
+ "kasumigaura.ibaraki.jp",
+ "koga.ibaraki.jp",
+ "miho.ibaraki.jp",
+ "mito.ibaraki.jp",
+ "moriya.ibaraki.jp",
+ "naka.ibaraki.jp",
+ "namegata.ibaraki.jp",
+ "oarai.ibaraki.jp",
+ "ogawa.ibaraki.jp",
+ "omitama.ibaraki.jp",
+ "ryugasaki.ibaraki.jp",
+ "sakai.ibaraki.jp",
+ "sakuragawa.ibaraki.jp",
+ "shimodate.ibaraki.jp",
+ "shimotsuma.ibaraki.jp",
+ "shirosato.ibaraki.jp",
+ "sowa.ibaraki.jp",
+ "suifu.ibaraki.jp",
+ "takahagi.ibaraki.jp",
+ "tamatsukuri.ibaraki.jp",
+ "tokai.ibaraki.jp",
+ "tomobe.ibaraki.jp",
+ "tone.ibaraki.jp",
+ "toride.ibaraki.jp",
+ "tsuchiura.ibaraki.jp",
+ "tsukuba.ibaraki.jp",
+ "uchihara.ibaraki.jp",
+ "ushiku.ibaraki.jp",
+ "yachiyo.ibaraki.jp",
+ "yamagata.ibaraki.jp",
+ "yawara.ibaraki.jp",
+ "yuki.ibaraki.jp",
+ "anamizu.ishikawa.jp",
+ "hakui.ishikawa.jp",
+ "hakusan.ishikawa.jp",
+ "kaga.ishikawa.jp",
+ "kahoku.ishikawa.jp",
+ "kanazawa.ishikawa.jp",
+ "kawakita.ishikawa.jp",
+ "komatsu.ishikawa.jp",
+ "nakanoto.ishikawa.jp",
+ "nanao.ishikawa.jp",
+ "nomi.ishikawa.jp",
+ "nonoichi.ishikawa.jp",
+ "noto.ishikawa.jp",
+ "shika.ishikawa.jp",
+ "suzu.ishikawa.jp",
+ "tsubata.ishikawa.jp",
+ "tsurugi.ishikawa.jp",
+ "uchinada.ishikawa.jp",
+ "wajima.ishikawa.jp",
+ "fudai.iwate.jp",
+ "fujisawa.iwate.jp",
+ "hanamaki.iwate.jp",
+ "hiraizumi.iwate.jp",
+ "hirono.iwate.jp",
+ "ichinohe.iwate.jp",
+ "ichinoseki.iwate.jp",
+ "iwaizumi.iwate.jp",
+ "iwate.iwate.jp",
+ "joboji.iwate.jp",
+ "kamaishi.iwate.jp",
+ "kanegasaki.iwate.jp",
+ "karumai.iwate.jp",
+ "kawai.iwate.jp",
+ "kitakami.iwate.jp",
+ "kuji.iwate.jp",
+ "kunohe.iwate.jp",
+ "kuzumaki.iwate.jp",
+ "miyako.iwate.jp",
+ "mizusawa.iwate.jp",
+ "morioka.iwate.jp",
+ "ninohe.iwate.jp",
+ "noda.iwate.jp",
+ "ofunato.iwate.jp",
+ "oshu.iwate.jp",
+ "otsuchi.iwate.jp",
+ "rikuzentakata.iwate.jp",
+ "shiwa.iwate.jp",
+ "shizukuishi.iwate.jp",
+ "sumita.iwate.jp",
+ "tanohata.iwate.jp",
+ "tono.iwate.jp",
+ "yahaba.iwate.jp",
+ "yamada.iwate.jp",
+ "ayagawa.kagawa.jp",
+ "higashikagawa.kagawa.jp",
+ "kanonji.kagawa.jp",
+ "kotohira.kagawa.jp",
+ "manno.kagawa.jp",
+ "marugame.kagawa.jp",
+ "mitoyo.kagawa.jp",
+ "naoshima.kagawa.jp",
+ "sanuki.kagawa.jp",
+ "tadotsu.kagawa.jp",
+ "takamatsu.kagawa.jp",
+ "tonosho.kagawa.jp",
+ "uchinomi.kagawa.jp",
+ "utazu.kagawa.jp",
+ "zentsuji.kagawa.jp",
+ "akune.kagoshima.jp",
+ "amami.kagoshima.jp",
+ "hioki.kagoshima.jp",
+ "isa.kagoshima.jp",
+ "isen.kagoshima.jp",
+ "izumi.kagoshima.jp",
+ "kagoshima.kagoshima.jp",
+ "kanoya.kagoshima.jp",
+ "kawanabe.kagoshima.jp",
+ "kinko.kagoshima.jp",
+ "kouyama.kagoshima.jp",
+ "makurazaki.kagoshima.jp",
+ "matsumoto.kagoshima.jp",
+ "minamitane.kagoshima.jp",
+ "nakatane.kagoshima.jp",
+ "nishinoomote.kagoshima.jp",
+ "satsumasendai.kagoshima.jp",
+ "soo.kagoshima.jp",
+ "tarumizu.kagoshima.jp",
+ "yusui.kagoshima.jp",
+ "aikawa.kanagawa.jp",
+ "atsugi.kanagawa.jp",
+ "ayase.kanagawa.jp",
+ "chigasaki.kanagawa.jp",
+ "ebina.kanagawa.jp",
+ "fujisawa.kanagawa.jp",
+ "hadano.kanagawa.jp",
+ "hakone.kanagawa.jp",
+ "hiratsuka.kanagawa.jp",
+ "isehara.kanagawa.jp",
+ "kaisei.kanagawa.jp",
+ "kamakura.kanagawa.jp",
+ "kiyokawa.kanagawa.jp",
+ "matsuda.kanagawa.jp",
+ "minamiashigara.kanagawa.jp",
+ "miura.kanagawa.jp",
+ "nakai.kanagawa.jp",
+ "ninomiya.kanagawa.jp",
+ "odawara.kanagawa.jp",
+ "oi.kanagawa.jp",
+ "oiso.kanagawa.jp",
+ "sagamihara.kanagawa.jp",
+ "samukawa.kanagawa.jp",
+ "tsukui.kanagawa.jp",
+ "yamakita.kanagawa.jp",
+ "yamato.kanagawa.jp",
+ "yokosuka.kanagawa.jp",
+ "yugawara.kanagawa.jp",
+ "zama.kanagawa.jp",
+ "zushi.kanagawa.jp",
+ "aki.kochi.jp",
+ "geisei.kochi.jp",
+ "hidaka.kochi.jp",
+ "higashitsuno.kochi.jp",
+ "ino.kochi.jp",
+ "kagami.kochi.jp",
+ "kami.kochi.jp",
+ "kitagawa.kochi.jp",
+ "kochi.kochi.jp",
+ "mihara.kochi.jp",
+ "motoyama.kochi.jp",
+ "muroto.kochi.jp",
+ "nahari.kochi.jp",
+ "nakamura.kochi.jp",
+ "nankoku.kochi.jp",
+ "nishitosa.kochi.jp",
+ "niyodogawa.kochi.jp",
+ "ochi.kochi.jp",
+ "okawa.kochi.jp",
+ "otoyo.kochi.jp",
+ "otsuki.kochi.jp",
+ "sakawa.kochi.jp",
+ "sukumo.kochi.jp",
+ "susaki.kochi.jp",
+ "tosa.kochi.jp",
+ "tosashimizu.kochi.jp",
+ "toyo.kochi.jp",
+ "tsuno.kochi.jp",
+ "umaji.kochi.jp",
+ "yasuda.kochi.jp",
+ "yusuhara.kochi.jp",
+ "amakusa.kumamoto.jp",
+ "arao.kumamoto.jp",
+ "aso.kumamoto.jp",
+ "choyo.kumamoto.jp",
+ "gyokuto.kumamoto.jp",
+ "hitoyoshi.kumamoto.jp",
+ "kamiamakusa.kumamoto.jp",
+ "kashima.kumamoto.jp",
+ "kikuchi.kumamoto.jp",
+ "kumamoto.kumamoto.jp",
+ "mashiki.kumamoto.jp",
+ "mifune.kumamoto.jp",
+ "minamata.kumamoto.jp",
+ "minamioguni.kumamoto.jp",
+ "nagasu.kumamoto.jp",
+ "nishihara.kumamoto.jp",
+ "oguni.kumamoto.jp",
+ "ozu.kumamoto.jp",
+ "sumoto.kumamoto.jp",
+ "takamori.kumamoto.jp",
+ "uki.kumamoto.jp",
+ "uto.kumamoto.jp",
+ "yamaga.kumamoto.jp",
+ "yamato.kumamoto.jp",
+ "yatsushiro.kumamoto.jp",
+ "ayabe.kyoto.jp",
+ "fukuchiyama.kyoto.jp",
+ "higashiyama.kyoto.jp",
+ "ide.kyoto.jp",
+ "ine.kyoto.jp",
+ "joyo.kyoto.jp",
+ "kameoka.kyoto.jp",
+ "kamo.kyoto.jp",
+ "kita.kyoto.jp",
+ "kizu.kyoto.jp",
+ "kumiyama.kyoto.jp",
+ "kyotamba.kyoto.jp",
+ "kyotanabe.kyoto.jp",
+ "kyotango.kyoto.jp",
+ "maizuru.kyoto.jp",
+ "minami.kyoto.jp",
+ "minamiyamashiro.kyoto.jp",
+ "miyazu.kyoto.jp",
+ "muko.kyoto.jp",
+ "nagaokakyo.kyoto.jp",
+ "nakagyo.kyoto.jp",
+ "nantan.kyoto.jp",
+ "oyamazaki.kyoto.jp",
+ "sakyo.kyoto.jp",
+ "seika.kyoto.jp",
+ "tanabe.kyoto.jp",
+ "uji.kyoto.jp",
+ "ujitawara.kyoto.jp",
+ "wazuka.kyoto.jp",
+ "yamashina.kyoto.jp",
+ "yawata.kyoto.jp",
+ "asahi.mie.jp",
+ "inabe.mie.jp",
+ "ise.mie.jp",
+ "kameyama.mie.jp",
+ "kawagoe.mie.jp",
+ "kiho.mie.jp",
+ "kisosaki.mie.jp",
+ "kiwa.mie.jp",
+ "komono.mie.jp",
+ "kumano.mie.jp",
+ "kuwana.mie.jp",
+ "matsusaka.mie.jp",
+ "meiwa.mie.jp",
+ "mihama.mie.jp",
+ "minamiise.mie.jp",
+ "misugi.mie.jp",
+ "miyama.mie.jp",
+ "nabari.mie.jp",
+ "shima.mie.jp",
+ "suzuka.mie.jp",
+ "tado.mie.jp",
+ "taiki.mie.jp",
+ "taki.mie.jp",
+ "tamaki.mie.jp",
+ "toba.mie.jp",
+ "tsu.mie.jp",
+ "udono.mie.jp",
+ "ureshino.mie.jp",
+ "watarai.mie.jp",
+ "yokkaichi.mie.jp",
+ "furukawa.miyagi.jp",
+ "higashimatsushima.miyagi.jp",
+ "ishinomaki.miyagi.jp",
+ "iwanuma.miyagi.jp",
+ "kakuda.miyagi.jp",
+ "kami.miyagi.jp",
+ "kawasaki.miyagi.jp",
+ "marumori.miyagi.jp",
+ "matsushima.miyagi.jp",
+ "minamisanriku.miyagi.jp",
+ "misato.miyagi.jp",
+ "murata.miyagi.jp",
+ "natori.miyagi.jp",
+ "ogawara.miyagi.jp",
+ "ohira.miyagi.jp",
+ "onagawa.miyagi.jp",
+ "osaki.miyagi.jp",
+ "rifu.miyagi.jp",
+ "semine.miyagi.jp",
+ "shibata.miyagi.jp",
+ "shichikashuku.miyagi.jp",
+ "shikama.miyagi.jp",
+ "shiogama.miyagi.jp",
+ "shiroishi.miyagi.jp",
+ "tagajo.miyagi.jp",
+ "taiwa.miyagi.jp",
+ "tome.miyagi.jp",
+ "tomiya.miyagi.jp",
+ "wakuya.miyagi.jp",
+ "watari.miyagi.jp",
+ "yamamoto.miyagi.jp",
+ "zao.miyagi.jp",
+ "aya.miyazaki.jp",
+ "ebino.miyazaki.jp",
+ "gokase.miyazaki.jp",
+ "hyuga.miyazaki.jp",
+ "kadogawa.miyazaki.jp",
+ "kawaminami.miyazaki.jp",
+ "kijo.miyazaki.jp",
+ "kitagawa.miyazaki.jp",
+ "kitakata.miyazaki.jp",
+ "kitaura.miyazaki.jp",
+ "kobayashi.miyazaki.jp",
+ "kunitomi.miyazaki.jp",
+ "kushima.miyazaki.jp",
+ "mimata.miyazaki.jp",
+ "miyakonojo.miyazaki.jp",
+ "miyazaki.miyazaki.jp",
+ "morotsuka.miyazaki.jp",
+ "nichinan.miyazaki.jp",
+ "nishimera.miyazaki.jp",
+ "nobeoka.miyazaki.jp",
+ "saito.miyazaki.jp",
+ "shiiba.miyazaki.jp",
+ "shintomi.miyazaki.jp",
+ "takaharu.miyazaki.jp",
+ "takanabe.miyazaki.jp",
+ "takazaki.miyazaki.jp",
+ "tsuno.miyazaki.jp",
+ "achi.nagano.jp",
+ "agematsu.nagano.jp",
+ "anan.nagano.jp",
+ "aoki.nagano.jp",
+ "asahi.nagano.jp",
+ "azumino.nagano.jp",
+ "chikuhoku.nagano.jp",
+ "chikuma.nagano.jp",
+ "chino.nagano.jp",
+ "fujimi.nagano.jp",
+ "hakuba.nagano.jp",
+ "hara.nagano.jp",
+ "hiraya.nagano.jp",
+ "iida.nagano.jp",
+ "iijima.nagano.jp",
+ "iiyama.nagano.jp",
+ "iizuna.nagano.jp",
+ "ikeda.nagano.jp",
+ "ikusaka.nagano.jp",
+ "ina.nagano.jp",
+ "karuizawa.nagano.jp",
+ "kawakami.nagano.jp",
+ "kiso.nagano.jp",
+ "kisofukushima.nagano.jp",
+ "kitaaiki.nagano.jp",
+ "komagane.nagano.jp",
+ "komoro.nagano.jp",
+ "matsukawa.nagano.jp",
+ "matsumoto.nagano.jp",
+ "miasa.nagano.jp",
+ "minamiaiki.nagano.jp",
+ "minamimaki.nagano.jp",
+ "minamiminowa.nagano.jp",
+ "minowa.nagano.jp",
+ "miyada.nagano.jp",
+ "miyota.nagano.jp",
+ "mochizuki.nagano.jp",
+ "nagano.nagano.jp",
+ "nagawa.nagano.jp",
+ "nagiso.nagano.jp",
+ "nakagawa.nagano.jp",
+ "nakano.nagano.jp",
+ "nozawaonsen.nagano.jp",
+ "obuse.nagano.jp",
+ "ogawa.nagano.jp",
+ "okaya.nagano.jp",
+ "omachi.nagano.jp",
+ "omi.nagano.jp",
+ "ookuwa.nagano.jp",
+ "ooshika.nagano.jp",
+ "otaki.nagano.jp",
+ "otari.nagano.jp",
+ "sakae.nagano.jp",
+ "sakaki.nagano.jp",
+ "saku.nagano.jp",
+ "sakuho.nagano.jp",
+ "shimosuwa.nagano.jp",
+ "shinanomachi.nagano.jp",
+ "shiojiri.nagano.jp",
+ "suwa.nagano.jp",
+ "suzaka.nagano.jp",
+ "takagi.nagano.jp",
+ "takamori.nagano.jp",
+ "takayama.nagano.jp",
+ "tateshina.nagano.jp",
+ "tatsuno.nagano.jp",
+ "togakushi.nagano.jp",
+ "togura.nagano.jp",
+ "tomi.nagano.jp",
+ "ueda.nagano.jp",
+ "wada.nagano.jp",
+ "yamagata.nagano.jp",
+ "yamanouchi.nagano.jp",
+ "yasaka.nagano.jp",
+ "yasuoka.nagano.jp",
+ "chijiwa.nagasaki.jp",
+ "futsu.nagasaki.jp",
+ "goto.nagasaki.jp",
+ "hasami.nagasaki.jp",
+ "hirado.nagasaki.jp",
+ "iki.nagasaki.jp",
+ "isahaya.nagasaki.jp",
+ "kawatana.nagasaki.jp",
+ "kuchinotsu.nagasaki.jp",
+ "matsuura.nagasaki.jp",
+ "nagasaki.nagasaki.jp",
+ "obama.nagasaki.jp",
+ "omura.nagasaki.jp",
+ "oseto.nagasaki.jp",
+ "saikai.nagasaki.jp",
+ "sasebo.nagasaki.jp",
+ "seihi.nagasaki.jp",
+ "shimabara.nagasaki.jp",
+ "shinkamigoto.nagasaki.jp",
+ "togitsu.nagasaki.jp",
+ "tsushima.nagasaki.jp",
+ "unzen.nagasaki.jp",
+ "ando.nara.jp",
+ "gose.nara.jp",
+ "heguri.nara.jp",
+ "higashiyoshino.nara.jp",
+ "ikaruga.nara.jp",
+ "ikoma.nara.jp",
+ "kamikitayama.nara.jp",
+ "kanmaki.nara.jp",
+ "kashiba.nara.jp",
+ "kashihara.nara.jp",
+ "katsuragi.nara.jp",
+ "kawai.nara.jp",
+ "kawakami.nara.jp",
+ "kawanishi.nara.jp",
+ "koryo.nara.jp",
+ "kurotaki.nara.jp",
+ "mitsue.nara.jp",
+ "miyake.nara.jp",
+ "nara.nara.jp",
+ "nosegawa.nara.jp",
+ "oji.nara.jp",
+ "ouda.nara.jp",
+ "oyodo.nara.jp",
+ "sakurai.nara.jp",
+ "sango.nara.jp",
+ "shimoichi.nara.jp",
+ "shimokitayama.nara.jp",
+ "shinjo.nara.jp",
+ "soni.nara.jp",
+ "takatori.nara.jp",
+ "tawaramoto.nara.jp",
+ "tenkawa.nara.jp",
+ "tenri.nara.jp",
+ "uda.nara.jp",
+ "yamatokoriyama.nara.jp",
+ "yamatotakada.nara.jp",
+ "yamazoe.nara.jp",
+ "yoshino.nara.jp",
+ "aga.niigata.jp",
+ "agano.niigata.jp",
+ "gosen.niigata.jp",
+ "itoigawa.niigata.jp",
+ "izumozaki.niigata.jp",
+ "joetsu.niigata.jp",
+ "kamo.niigata.jp",
+ "kariwa.niigata.jp",
+ "kashiwazaki.niigata.jp",
+ "minamiuonuma.niigata.jp",
+ "mitsuke.niigata.jp",
+ "muika.niigata.jp",
+ "murakami.niigata.jp",
+ "myoko.niigata.jp",
+ "nagaoka.niigata.jp",
+ "niigata.niigata.jp",
+ "ojiya.niigata.jp",
+ "omi.niigata.jp",
+ "sado.niigata.jp",
+ "sanjo.niigata.jp",
+ "seiro.niigata.jp",
+ "seirou.niigata.jp",
+ "sekikawa.niigata.jp",
+ "shibata.niigata.jp",
+ "tagami.niigata.jp",
+ "tainai.niigata.jp",
+ "tochio.niigata.jp",
+ "tokamachi.niigata.jp",
+ "tsubame.niigata.jp",
+ "tsunan.niigata.jp",
+ "uonuma.niigata.jp",
+ "yahiko.niigata.jp",
+ "yoita.niigata.jp",
+ "yuzawa.niigata.jp",
+ "beppu.oita.jp",
+ "bungoono.oita.jp",
+ "bungotakada.oita.jp",
+ "hasama.oita.jp",
+ "hiji.oita.jp",
+ "himeshima.oita.jp",
+ "hita.oita.jp",
+ "kamitsue.oita.jp",
+ "kokonoe.oita.jp",
+ "kuju.oita.jp",
+ "kunisaki.oita.jp",
+ "kusu.oita.jp",
+ "oita.oita.jp",
+ "saiki.oita.jp",
+ "taketa.oita.jp",
+ "tsukumi.oita.jp",
+ "usa.oita.jp",
+ "usuki.oita.jp",
+ "yufu.oita.jp",
+ "akaiwa.okayama.jp",
+ "asakuchi.okayama.jp",
+ "bizen.okayama.jp",
+ "hayashima.okayama.jp",
+ "ibara.okayama.jp",
+ "kagamino.okayama.jp",
+ "kasaoka.okayama.jp",
+ "kibichuo.okayama.jp",
+ "kumenan.okayama.jp",
+ "kurashiki.okayama.jp",
+ "maniwa.okayama.jp",
+ "misaki.okayama.jp",
+ "nagi.okayama.jp",
+ "niimi.okayama.jp",
+ "nishiawakura.okayama.jp",
+ "okayama.okayama.jp",
+ "satosho.okayama.jp",
+ "setouchi.okayama.jp",
+ "shinjo.okayama.jp",
+ "shoo.okayama.jp",
+ "soja.okayama.jp",
+ "takahashi.okayama.jp",
+ "tamano.okayama.jp",
+ "tsuyama.okayama.jp",
+ "wake.okayama.jp",
+ "yakage.okayama.jp",
+ "aguni.okinawa.jp",
+ "ginowan.okinawa.jp",
+ "ginoza.okinawa.jp",
+ "gushikami.okinawa.jp",
+ "haebaru.okinawa.jp",
+ "higashi.okinawa.jp",
+ "hirara.okinawa.jp",
+ "iheya.okinawa.jp",
+ "ishigaki.okinawa.jp",
+ "ishikawa.okinawa.jp",
+ "itoman.okinawa.jp",
+ "izena.okinawa.jp",
+ "kadena.okinawa.jp",
+ "kin.okinawa.jp",
+ "kitadaito.okinawa.jp",
+ "kitanakagusuku.okinawa.jp",
+ "kumejima.okinawa.jp",
+ "kunigami.okinawa.jp",
+ "minamidaito.okinawa.jp",
+ "motobu.okinawa.jp",
+ "nago.okinawa.jp",
+ "naha.okinawa.jp",
+ "nakagusuku.okinawa.jp",
+ "nakijin.okinawa.jp",
+ "nanjo.okinawa.jp",
+ "nishihara.okinawa.jp",
+ "ogimi.okinawa.jp",
+ "okinawa.okinawa.jp",
+ "onna.okinawa.jp",
+ "shimoji.okinawa.jp",
+ "taketomi.okinawa.jp",
+ "tarama.okinawa.jp",
+ "tokashiki.okinawa.jp",
+ "tomigusuku.okinawa.jp",
+ "tonaki.okinawa.jp",
+ "urasoe.okinawa.jp",
+ "uruma.okinawa.jp",
+ "yaese.okinawa.jp",
+ "yomitan.okinawa.jp",
+ "yonabaru.okinawa.jp",
+ "yonaguni.okinawa.jp",
+ "zamami.okinawa.jp",
+ "abeno.osaka.jp",
+ "chihayaakasaka.osaka.jp",
+ "chuo.osaka.jp",
+ "daito.osaka.jp",
+ "fujiidera.osaka.jp",
+ "habikino.osaka.jp",
+ "hannan.osaka.jp",
+ "higashiosaka.osaka.jp",
+ "higashisumiyoshi.osaka.jp",
+ "higashiyodogawa.osaka.jp",
+ "hirakata.osaka.jp",
+ "ibaraki.osaka.jp",
+ "ikeda.osaka.jp",
+ "izumi.osaka.jp",
+ "izumiotsu.osaka.jp",
+ "izumisano.osaka.jp",
+ "kadoma.osaka.jp",
+ "kaizuka.osaka.jp",
+ "kanan.osaka.jp",
+ "kashiwara.osaka.jp",
+ "katano.osaka.jp",
+ "kawachinagano.osaka.jp",
+ "kishiwada.osaka.jp",
+ "kita.osaka.jp",
+ "kumatori.osaka.jp",
+ "matsubara.osaka.jp",
+ "minato.osaka.jp",
+ "minoh.osaka.jp",
+ "misaki.osaka.jp",
+ "moriguchi.osaka.jp",
+ "neyagawa.osaka.jp",
+ "nishi.osaka.jp",
+ "nose.osaka.jp",
+ "osakasayama.osaka.jp",
+ "sakai.osaka.jp",
+ "sayama.osaka.jp",
+ "sennan.osaka.jp",
+ "settsu.osaka.jp",
+ "shijonawate.osaka.jp",
+ "shimamoto.osaka.jp",
+ "suita.osaka.jp",
+ "tadaoka.osaka.jp",
+ "taishi.osaka.jp",
+ "tajiri.osaka.jp",
+ "takaishi.osaka.jp",
+ "takatsuki.osaka.jp",
+ "tondabayashi.osaka.jp",
+ "toyonaka.osaka.jp",
+ "toyono.osaka.jp",
+ "yao.osaka.jp",
+ "ariake.saga.jp",
+ "arita.saga.jp",
+ "fukudomi.saga.jp",
+ "genkai.saga.jp",
+ "hamatama.saga.jp",
+ "hizen.saga.jp",
+ "imari.saga.jp",
+ "kamimine.saga.jp",
+ "kanzaki.saga.jp",
+ "karatsu.saga.jp",
+ "kashima.saga.jp",
+ "kitagata.saga.jp",
+ "kitahata.saga.jp",
+ "kiyama.saga.jp",
+ "kouhoku.saga.jp",
+ "kyuragi.saga.jp",
+ "nishiarita.saga.jp",
+ "ogi.saga.jp",
+ "omachi.saga.jp",
+ "ouchi.saga.jp",
+ "saga.saga.jp",
+ "shiroishi.saga.jp",
+ "taku.saga.jp",
+ "tara.saga.jp",
+ "tosu.saga.jp",
+ "yoshinogari.saga.jp",
+ "arakawa.saitama.jp",
+ "asaka.saitama.jp",
+ "chichibu.saitama.jp",
+ "fujimi.saitama.jp",
+ "fujimino.saitama.jp",
+ "fukaya.saitama.jp",
+ "hanno.saitama.jp",
+ "hanyu.saitama.jp",
+ "hasuda.saitama.jp",
+ "hatogaya.saitama.jp",
+ "hatoyama.saitama.jp",
+ "hidaka.saitama.jp",
+ "higashichichibu.saitama.jp",
+ "higashimatsuyama.saitama.jp",
+ "honjo.saitama.jp",
+ "ina.saitama.jp",
+ "iruma.saitama.jp",
+ "iwatsuki.saitama.jp",
+ "kamiizumi.saitama.jp",
+ "kamikawa.saitama.jp",
+ "kamisato.saitama.jp",
+ "kasukabe.saitama.jp",
+ "kawagoe.saitama.jp",
+ "kawaguchi.saitama.jp",
+ "kawajima.saitama.jp",
+ "kazo.saitama.jp",
+ "kitamoto.saitama.jp",
+ "koshigaya.saitama.jp",
+ "kounosu.saitama.jp",
+ "kuki.saitama.jp",
+ "kumagaya.saitama.jp",
+ "matsubushi.saitama.jp",
+ "minano.saitama.jp",
+ "misato.saitama.jp",
+ "miyashiro.saitama.jp",
+ "miyoshi.saitama.jp",
+ "moroyama.saitama.jp",
+ "nagatoro.saitama.jp",
+ "namegawa.saitama.jp",
+ "niiza.saitama.jp",
+ "ogano.saitama.jp",
+ "ogawa.saitama.jp",
+ "ogose.saitama.jp",
+ "okegawa.saitama.jp",
+ "omiya.saitama.jp",
+ "otaki.saitama.jp",
+ "ranzan.saitama.jp",
+ "ryokami.saitama.jp",
+ "saitama.saitama.jp",
+ "sakado.saitama.jp",
+ "satte.saitama.jp",
+ "sayama.saitama.jp",
+ "shiki.saitama.jp",
+ "shiraoka.saitama.jp",
+ "soka.saitama.jp",
+ "sugito.saitama.jp",
+ "toda.saitama.jp",
+ "tokigawa.saitama.jp",
+ "tokorozawa.saitama.jp",
+ "tsurugashima.saitama.jp",
+ "urawa.saitama.jp",
+ "warabi.saitama.jp",
+ "yashio.saitama.jp",
+ "yokoze.saitama.jp",
+ "yono.saitama.jp",
+ "yorii.saitama.jp",
+ "yoshida.saitama.jp",
+ "yoshikawa.saitama.jp",
+ "yoshimi.saitama.jp",
+ "aisho.shiga.jp",
+ "gamo.shiga.jp",
+ "higashiomi.shiga.jp",
+ "hikone.shiga.jp",
+ "koka.shiga.jp",
+ "konan.shiga.jp",
+ "kosei.shiga.jp",
+ "koto.shiga.jp",
+ "kusatsu.shiga.jp",
+ "maibara.shiga.jp",
+ "moriyama.shiga.jp",
+ "nagahama.shiga.jp",
+ "nishiazai.shiga.jp",
+ "notogawa.shiga.jp",
+ "omihachiman.shiga.jp",
+ "otsu.shiga.jp",
+ "ritto.shiga.jp",
+ "ryuoh.shiga.jp",
+ "takashima.shiga.jp",
+ "takatsuki.shiga.jp",
+ "torahime.shiga.jp",
+ "toyosato.shiga.jp",
+ "yasu.shiga.jp",
+ "akagi.shimane.jp",
+ "ama.shimane.jp",
+ "gotsu.shimane.jp",
+ "hamada.shimane.jp",
+ "higashiizumo.shimane.jp",
+ "hikawa.shimane.jp",
+ "hikimi.shimane.jp",
+ "izumo.shimane.jp",
+ "kakinoki.shimane.jp",
+ "masuda.shimane.jp",
+ "matsue.shimane.jp",
+ "misato.shimane.jp",
+ "nishinoshima.shimane.jp",
+ "ohda.shimane.jp",
+ "okinoshima.shimane.jp",
+ "okuizumo.shimane.jp",
+ "shimane.shimane.jp",
+ "tamayu.shimane.jp",
+ "tsuwano.shimane.jp",
+ "unnan.shimane.jp",
+ "yakumo.shimane.jp",
+ "yasugi.shimane.jp",
+ "yatsuka.shimane.jp",
+ "arai.shizuoka.jp",
+ "atami.shizuoka.jp",
+ "fuji.shizuoka.jp",
+ "fujieda.shizuoka.jp",
+ "fujikawa.shizuoka.jp",
+ "fujinomiya.shizuoka.jp",
+ "fukuroi.shizuoka.jp",
+ "gotemba.shizuoka.jp",
+ "haibara.shizuoka.jp",
+ "hamamatsu.shizuoka.jp",
+ "higashiizu.shizuoka.jp",
+ "ito.shizuoka.jp",
+ "iwata.shizuoka.jp",
+ "izu.shizuoka.jp",
+ "izunokuni.shizuoka.jp",
+ "kakegawa.shizuoka.jp",
+ "kannami.shizuoka.jp",
+ "kawanehon.shizuoka.jp",
+ "kawazu.shizuoka.jp",
+ "kikugawa.shizuoka.jp",
+ "kosai.shizuoka.jp",
+ "makinohara.shizuoka.jp",
+ "matsuzaki.shizuoka.jp",
+ "minamiizu.shizuoka.jp",
+ "mishima.shizuoka.jp",
+ "morimachi.shizuoka.jp",
+ "nishiizu.shizuoka.jp",
+ "numazu.shizuoka.jp",
+ "omaezaki.shizuoka.jp",
+ "shimada.shizuoka.jp",
+ "shimizu.shizuoka.jp",
+ "shimoda.shizuoka.jp",
+ "shizuoka.shizuoka.jp",
+ "susono.shizuoka.jp",
+ "yaizu.shizuoka.jp",
+ "yoshida.shizuoka.jp",
+ "ashikaga.tochigi.jp",
+ "bato.tochigi.jp",
+ "haga.tochigi.jp",
+ "ichikai.tochigi.jp",
+ "iwafune.tochigi.jp",
+ "kaminokawa.tochigi.jp",
+ "kanuma.tochigi.jp",
+ "karasuyama.tochigi.jp",
+ "kuroiso.tochigi.jp",
+ "mashiko.tochigi.jp",
+ "mibu.tochigi.jp",
+ "moka.tochigi.jp",
+ "motegi.tochigi.jp",
+ "nasu.tochigi.jp",
+ "nasushiobara.tochigi.jp",
+ "nikko.tochigi.jp",
+ "nishikata.tochigi.jp",
+ "nogi.tochigi.jp",
+ "ohira.tochigi.jp",
+ "ohtawara.tochigi.jp",
+ "oyama.tochigi.jp",
+ "sakura.tochigi.jp",
+ "sano.tochigi.jp",
+ "shimotsuke.tochigi.jp",
+ "shioya.tochigi.jp",
+ "takanezawa.tochigi.jp",
+ "tochigi.tochigi.jp",
+ "tsuga.tochigi.jp",
+ "ujiie.tochigi.jp",
+ "utsunomiya.tochigi.jp",
+ "yaita.tochigi.jp",
+ "aizumi.tokushima.jp",
+ "anan.tokushima.jp",
+ "ichiba.tokushima.jp",
+ "itano.tokushima.jp",
+ "kainan.tokushima.jp",
+ "komatsushima.tokushima.jp",
+ "matsushige.tokushima.jp",
+ "mima.tokushima.jp",
+ "minami.tokushima.jp",
+ "miyoshi.tokushima.jp",
+ "mugi.tokushima.jp",
+ "nakagawa.tokushima.jp",
+ "naruto.tokushima.jp",
+ "sanagochi.tokushima.jp",
+ "shishikui.tokushima.jp",
+ "tokushima.tokushima.jp",
+ "wajiki.tokushima.jp",
+ "adachi.tokyo.jp",
+ "akiruno.tokyo.jp",
+ "akishima.tokyo.jp",
+ "aogashima.tokyo.jp",
+ "arakawa.tokyo.jp",
+ "bunkyo.tokyo.jp",
+ "chiyoda.tokyo.jp",
+ "chofu.tokyo.jp",
+ "chuo.tokyo.jp",
+ "edogawa.tokyo.jp",
+ "fuchu.tokyo.jp",
+ "fussa.tokyo.jp",
+ "hachijo.tokyo.jp",
+ "hachioji.tokyo.jp",
+ "hamura.tokyo.jp",
+ "higashikurume.tokyo.jp",
+ "higashimurayama.tokyo.jp",
+ "higashiyamato.tokyo.jp",
+ "hino.tokyo.jp",
+ "hinode.tokyo.jp",
+ "hinohara.tokyo.jp",
+ "inagi.tokyo.jp",
+ "itabashi.tokyo.jp",
+ "katsushika.tokyo.jp",
+ "kita.tokyo.jp",
+ "kiyose.tokyo.jp",
+ "kodaira.tokyo.jp",
+ "koganei.tokyo.jp",
+ "kokubunji.tokyo.jp",
+ "komae.tokyo.jp",
+ "koto.tokyo.jp",
+ "kouzushima.tokyo.jp",
+ "kunitachi.tokyo.jp",
+ "machida.tokyo.jp",
+ "meguro.tokyo.jp",
+ "minato.tokyo.jp",
+ "mitaka.tokyo.jp",
+ "mizuho.tokyo.jp",
+ "musashimurayama.tokyo.jp",
+ "musashino.tokyo.jp",
+ "nakano.tokyo.jp",
+ "nerima.tokyo.jp",
+ "ogasawara.tokyo.jp",
+ "okutama.tokyo.jp",
+ "ome.tokyo.jp",
+ "oshima.tokyo.jp",
+ "ota.tokyo.jp",
+ "setagaya.tokyo.jp",
+ "shibuya.tokyo.jp",
+ "shinagawa.tokyo.jp",
+ "shinjuku.tokyo.jp",
+ "suginami.tokyo.jp",
+ "sumida.tokyo.jp",
+ "tachikawa.tokyo.jp",
+ "taito.tokyo.jp",
+ "tama.tokyo.jp",
+ "toshima.tokyo.jp",
+ "chizu.tottori.jp",
+ "hino.tottori.jp",
+ "kawahara.tottori.jp",
+ "koge.tottori.jp",
+ "kotoura.tottori.jp",
+ "misasa.tottori.jp",
+ "nanbu.tottori.jp",
+ "nichinan.tottori.jp",
+ "sakaiminato.tottori.jp",
+ "tottori.tottori.jp",
+ "wakasa.tottori.jp",
+ "yazu.tottori.jp",
+ "yonago.tottori.jp",
+ "asahi.toyama.jp",
+ "fuchu.toyama.jp",
+ "fukumitsu.toyama.jp",
+ "funahashi.toyama.jp",
+ "himi.toyama.jp",
+ "imizu.toyama.jp",
+ "inami.toyama.jp",
+ "johana.toyama.jp",
+ "kamiichi.toyama.jp",
+ "kurobe.toyama.jp",
+ "nakaniikawa.toyama.jp",
+ "namerikawa.toyama.jp",
+ "nanto.toyama.jp",
+ "nyuzen.toyama.jp",
+ "oyabe.toyama.jp",
+ "taira.toyama.jp",
+ "takaoka.toyama.jp",
+ "tateyama.toyama.jp",
+ "toga.toyama.jp",
+ "tonami.toyama.jp",
+ "toyama.toyama.jp",
+ "unazuki.toyama.jp",
+ "uozu.toyama.jp",
+ "yamada.toyama.jp",
+ "arida.wakayama.jp",
+ "aridagawa.wakayama.jp",
+ "gobo.wakayama.jp",
+ "hashimoto.wakayama.jp",
+ "hidaka.wakayama.jp",
+ "hirogawa.wakayama.jp",
+ "inami.wakayama.jp",
+ "iwade.wakayama.jp",
+ "kainan.wakayama.jp",
+ "kamitonda.wakayama.jp",
+ "katsuragi.wakayama.jp",
+ "kimino.wakayama.jp",
+ "kinokawa.wakayama.jp",
+ "kitayama.wakayama.jp",
+ "koya.wakayama.jp",
+ "koza.wakayama.jp",
+ "kozagawa.wakayama.jp",
+ "kudoyama.wakayama.jp",
+ "kushimoto.wakayama.jp",
+ "mihama.wakayama.jp",
+ "misato.wakayama.jp",
+ "nachikatsuura.wakayama.jp",
+ "shingu.wakayama.jp",
+ "shirahama.wakayama.jp",
+ "taiji.wakayama.jp",
+ "tanabe.wakayama.jp",
+ "wakayama.wakayama.jp",
+ "yuasa.wakayama.jp",
+ "yura.wakayama.jp",
+ "asahi.yamagata.jp",
+ "funagata.yamagata.jp",
+ "higashine.yamagata.jp",
+ "iide.yamagata.jp",
+ "kahoku.yamagata.jp",
+ "kaminoyama.yamagata.jp",
+ "kaneyama.yamagata.jp",
+ "kawanishi.yamagata.jp",
+ "mamurogawa.yamagata.jp",
+ "mikawa.yamagata.jp",
+ "murayama.yamagata.jp",
+ "nagai.yamagata.jp",
+ "nakayama.yamagata.jp",
+ "nanyo.yamagata.jp",
+ "nishikawa.yamagata.jp",
+ "obanazawa.yamagata.jp",
+ "oe.yamagata.jp",
+ "oguni.yamagata.jp",
+ "ohkura.yamagata.jp",
+ "oishida.yamagata.jp",
+ "sagae.yamagata.jp",
+ "sakata.yamagata.jp",
+ "sakegawa.yamagata.jp",
+ "shinjo.yamagata.jp",
+ "shirataka.yamagata.jp",
+ "shonai.yamagata.jp",
+ "takahata.yamagata.jp",
+ "tendo.yamagata.jp",
+ "tozawa.yamagata.jp",
+ "tsuruoka.yamagata.jp",
+ "yamagata.yamagata.jp",
+ "yamanobe.yamagata.jp",
+ "yonezawa.yamagata.jp",
+ "yuza.yamagata.jp",
+ "abu.yamaguchi.jp",
+ "hagi.yamaguchi.jp",
+ "hikari.yamaguchi.jp",
+ "hofu.yamaguchi.jp",
+ "iwakuni.yamaguchi.jp",
+ "kudamatsu.yamaguchi.jp",
+ "mitou.yamaguchi.jp",
+ "nagato.yamaguchi.jp",
+ "oshima.yamaguchi.jp",
+ "shimonoseki.yamaguchi.jp",
+ "shunan.yamaguchi.jp",
+ "tabuse.yamaguchi.jp",
+ "tokuyama.yamaguchi.jp",
+ "toyota.yamaguchi.jp",
+ "ube.yamaguchi.jp",
+ "yuu.yamaguchi.jp",
+ "chuo.yamanashi.jp",
+ "doshi.yamanashi.jp",
+ "fuefuki.yamanashi.jp",
+ "fujikawa.yamanashi.jp",
+ "fujikawaguchiko.yamanashi.jp",
+ "fujiyoshida.yamanashi.jp",
+ "hayakawa.yamanashi.jp",
+ "hokuto.yamanashi.jp",
+ "ichikawamisato.yamanashi.jp",
+ "kai.yamanashi.jp",
+ "kofu.yamanashi.jp",
+ "koshu.yamanashi.jp",
+ "kosuge.yamanashi.jp",
+ "minami-alps.yamanashi.jp",
+ "minobu.yamanashi.jp",
+ "nakamichi.yamanashi.jp",
+ "nanbu.yamanashi.jp",
+ "narusawa.yamanashi.jp",
+ "nirasaki.yamanashi.jp",
+ "nishikatsura.yamanashi.jp",
+ "oshino.yamanashi.jp",
+ "otsuki.yamanashi.jp",
+ "showa.yamanashi.jp",
+ "tabayama.yamanashi.jp",
+ "tsuru.yamanashi.jp",
+ "uenohara.yamanashi.jp",
+ "yamanakako.yamanashi.jp",
+ "yamanashi.yamanashi.jp",
+ "*.ke",
+ "kg",
+ "org.kg",
+ "net.kg",
+ "com.kg",
+ "edu.kg",
+ "gov.kg",
+ "mil.kg",
+ "*.kh",
+ "ki",
+ "edu.ki",
+ "biz.ki",
+ "net.ki",
+ "org.ki",
+ "gov.ki",
+ "info.ki",
+ "com.ki",
+ "km",
+ "org.km",
+ "nom.km",
+ "gov.km",
+ "prd.km",
+ "tm.km",
+ "edu.km",
+ "mil.km",
+ "ass.km",
+ "com.km",
+ "coop.km",
+ "asso.km",
+ "presse.km",
+ "medecin.km",
+ "notaires.km",
+ "pharmaciens.km",
+ "veterinaire.km",
+ "gouv.km",
+ "kn",
+ "net.kn",
+ "org.kn",
+ "edu.kn",
+ "gov.kn",
+ "kp",
+ "com.kp",
+ "edu.kp",
+ "gov.kp",
+ "org.kp",
+ "rep.kp",
+ "tra.kp",
+ "kr",
+ "ac.kr",
+ "co.kr",
+ "es.kr",
+ "go.kr",
+ "hs.kr",
+ "kg.kr",
+ "mil.kr",
+ "ms.kr",
+ "ne.kr",
+ "or.kr",
+ "pe.kr",
+ "re.kr",
+ "sc.kr",
+ "busan.kr",
+ "chungbuk.kr",
+ "chungnam.kr",
+ "daegu.kr",
+ "daejeon.kr",
+ "gangwon.kr",
+ "gwangju.kr",
+ "gyeongbuk.kr",
+ "gyeonggi.kr",
+ "gyeongnam.kr",
+ "incheon.kr",
+ "jeju.kr",
+ "jeonbuk.kr",
+ "jeonnam.kr",
+ "seoul.kr",
+ "ulsan.kr",
+ "*.kw",
+ "ky",
+ "edu.ky",
+ "gov.ky",
+ "com.ky",
+ "org.ky",
+ "net.ky",
+ "kz",
+ "org.kz",
+ "edu.kz",
+ "net.kz",
+ "gov.kz",
+ "mil.kz",
+ "com.kz",
+ "la",
+ "int.la",
+ "net.la",
+ "info.la",
+ "edu.la",
+ "gov.la",
+ "per.la",
+ "com.la",
+ "org.la",
+ "lb",
+ "com.lb",
+ "edu.lb",
+ "gov.lb",
+ "net.lb",
+ "org.lb",
+ "lc",
+ "com.lc",
+ "net.lc",
+ "co.lc",
+ "org.lc",
+ "edu.lc",
+ "gov.lc",
+ "li",
+ "lk",
+ "gov.lk",
+ "sch.lk",
+ "net.lk",
+ "int.lk",
+ "com.lk",
+ "org.lk",
+ "edu.lk",
+ "ngo.lk",
+ "soc.lk",
+ "web.lk",
+ "ltd.lk",
+ "assn.lk",
+ "grp.lk",
+ "hotel.lk",
+ "ac.lk",
+ "lr",
+ "com.lr",
+ "edu.lr",
+ "gov.lr",
+ "org.lr",
+ "net.lr",
+ "ls",
+ "co.ls",
+ "org.ls",
+ "lt",
+ "gov.lt",
+ "lu",
+ "lv",
+ "com.lv",
+ "edu.lv",
+ "gov.lv",
+ "org.lv",
+ "mil.lv",
+ "id.lv",
+ "net.lv",
+ "asn.lv",
+ "conf.lv",
+ "ly",
+ "com.ly",
+ "net.ly",
+ "gov.ly",
+ "plc.ly",
+ "edu.ly",
+ "sch.ly",
+ "med.ly",
+ "org.ly",
+ "id.ly",
+ "ma",
+ "co.ma",
+ "net.ma",
+ "gov.ma",
+ "org.ma",
+ "ac.ma",
+ "press.ma",
+ "mc",
+ "tm.mc",
+ "asso.mc",
+ "md",
+ "me",
+ "co.me",
+ "net.me",
+ "org.me",
+ "edu.me",
+ "ac.me",
+ "gov.me",
+ "its.me",
+ "priv.me",
+ "mg",
+ "org.mg",
+ "nom.mg",
+ "gov.mg",
+ "prd.mg",
+ "tm.mg",
+ "edu.mg",
+ "mil.mg",
+ "com.mg",
+ "co.mg",
+ "mh",
+ "mil",
+ "mk",
+ "com.mk",
+ "org.mk",
+ "net.mk",
+ "edu.mk",
+ "gov.mk",
+ "inf.mk",
+ "name.mk",
+ "ml",
+ "com.ml",
+ "edu.ml",
+ "gouv.ml",
+ "gov.ml",
+ "net.ml",
+ "org.ml",
+ "presse.ml",
+ "*.mm",
+ "mn",
+ "gov.mn",
+ "edu.mn",
+ "org.mn",
+ "mo",
+ "com.mo",
+ "net.mo",
+ "org.mo",
+ "edu.mo",
+ "gov.mo",
+ "mobi",
+ "mp",
+ "mq",
+ "mr",
+ "gov.mr",
+ "ms",
+ "com.ms",
+ "edu.ms",
+ "gov.ms",
+ "net.ms",
+ "org.ms",
+ "mt",
+ "com.mt",
+ "edu.mt",
+ "net.mt",
+ "org.mt",
+ "mu",
+ "com.mu",
+ "net.mu",
+ "org.mu",
+ "gov.mu",
+ "ac.mu",
+ "co.mu",
+ "or.mu",
+ "museum",
+ "academy.museum",
+ "agriculture.museum",
+ "air.museum",
+ "airguard.museum",
+ "alabama.museum",
+ "alaska.museum",
+ "amber.museum",
+ "ambulance.museum",
+ "american.museum",
+ "americana.museum",
+ "americanantiques.museum",
+ "americanart.museum",
+ "amsterdam.museum",
+ "and.museum",
+ "annefrank.museum",
+ "anthro.museum",
+ "anthropology.museum",
+ "antiques.museum",
+ "aquarium.museum",
+ "arboretum.museum",
+ "archaeological.museum",
+ "archaeology.museum",
+ "architecture.museum",
+ "art.museum",
+ "artanddesign.museum",
+ "artcenter.museum",
+ "artdeco.museum",
+ "arteducation.museum",
+ "artgallery.museum",
+ "arts.museum",
+ "artsandcrafts.museum",
+ "asmatart.museum",
+ "assassination.museum",
+ "assisi.museum",
+ "association.museum",
+ "astronomy.museum",
+ "atlanta.museum",
+ "austin.museum",
+ "australia.museum",
+ "automotive.museum",
+ "aviation.museum",
+ "axis.museum",
+ "badajoz.museum",
+ "baghdad.museum",
+ "bahn.museum",
+ "bale.museum",
+ "baltimore.museum",
+ "barcelona.museum",
+ "baseball.museum",
+ "basel.museum",
+ "baths.museum",
+ "bauern.museum",
+ "beauxarts.museum",
+ "beeldengeluid.museum",
+ "bellevue.museum",
+ "bergbau.museum",
+ "berkeley.museum",
+ "berlin.museum",
+ "bern.museum",
+ "bible.museum",
+ "bilbao.museum",
+ "bill.museum",
+ "birdart.museum",
+ "birthplace.museum",
+ "bonn.museum",
+ "boston.museum",
+ "botanical.museum",
+ "botanicalgarden.museum",
+ "botanicgarden.museum",
+ "botany.museum",
+ "brandywinevalley.museum",
+ "brasil.museum",
+ "bristol.museum",
+ "british.museum",
+ "britishcolumbia.museum",
+ "broadcast.museum",
+ "brunel.museum",
+ "brussel.museum",
+ "brussels.museum",
+ "bruxelles.museum",
+ "building.museum",
+ "burghof.museum",
+ "bus.museum",
+ "bushey.museum",
+ "cadaques.museum",
+ "california.museum",
+ "cambridge.museum",
+ "can.museum",
+ "canada.museum",
+ "capebreton.museum",
+ "carrier.museum",
+ "cartoonart.museum",
+ "casadelamoneda.museum",
+ "castle.museum",
+ "castres.museum",
+ "celtic.museum",
+ "center.museum",
+ "chattanooga.museum",
+ "cheltenham.museum",
+ "chesapeakebay.museum",
+ "chicago.museum",
+ "children.museum",
+ "childrens.museum",
+ "childrensgarden.museum",
+ "chiropractic.museum",
+ "chocolate.museum",
+ "christiansburg.museum",
+ "cincinnati.museum",
+ "cinema.museum",
+ "circus.museum",
+ "civilisation.museum",
+ "civilization.museum",
+ "civilwar.museum",
+ "clinton.museum",
+ "clock.museum",
+ "coal.museum",
+ "coastaldefence.museum",
+ "cody.museum",
+ "coldwar.museum",
+ "collection.museum",
+ "colonialwilliamsburg.museum",
+ "coloradoplateau.museum",
+ "columbia.museum",
+ "columbus.museum",
+ "communication.museum",
+ "communications.museum",
+ "community.museum",
+ "computer.museum",
+ "computerhistory.museum",
+ "xn--comunicaes-v6a2o.museum",
+ "contemporary.museum",
+ "contemporaryart.museum",
+ "convent.museum",
+ "copenhagen.museum",
+ "corporation.museum",
+ "xn--correios-e-telecomunicaes-ghc29a.museum",
+ "corvette.museum",
+ "costume.museum",
+ "countryestate.museum",
+ "county.museum",
+ "crafts.museum",
+ "cranbrook.museum",
+ "creation.museum",
+ "cultural.museum",
+ "culturalcenter.museum",
+ "culture.museum",
+ "cyber.museum",
+ "cymru.museum",
+ "dali.museum",
+ "dallas.museum",
+ "database.museum",
+ "ddr.museum",
+ "decorativearts.museum",
+ "delaware.museum",
+ "delmenhorst.museum",
+ "denmark.museum",
+ "depot.museum",
+ "design.museum",
+ "detroit.museum",
+ "dinosaur.museum",
+ "discovery.museum",
+ "dolls.museum",
+ "donostia.museum",
+ "durham.museum",
+ "eastafrica.museum",
+ "eastcoast.museum",
+ "education.museum",
+ "educational.museum",
+ "egyptian.museum",
+ "eisenbahn.museum",
+ "elburg.museum",
+ "elvendrell.museum",
+ "embroidery.museum",
+ "encyclopedic.museum",
+ "england.museum",
+ "entomology.museum",
+ "environment.museum",
+ "environmentalconservation.museum",
+ "epilepsy.museum",
+ "essex.museum",
+ "estate.museum",
+ "ethnology.museum",
+ "exeter.museum",
+ "exhibition.museum",
+ "family.museum",
+ "farm.museum",
+ "farmequipment.museum",
+ "farmers.museum",
+ "farmstead.museum",
+ "field.museum",
+ "figueres.museum",
+ "filatelia.museum",
+ "film.museum",
+ "fineart.museum",
+ "finearts.museum",
+ "finland.museum",
+ "flanders.museum",
+ "florida.museum",
+ "force.museum",
+ "fortmissoula.museum",
+ "fortworth.museum",
+ "foundation.museum",
+ "francaise.museum",
+ "frankfurt.museum",
+ "franziskaner.museum",
+ "freemasonry.museum",
+ "freiburg.museum",
+ "fribourg.museum",
+ "frog.museum",
+ "fundacio.museum",
+ "furniture.museum",
+ "gallery.museum",
+ "garden.museum",
+ "gateway.museum",
+ "geelvinck.museum",
+ "gemological.museum",
+ "geology.museum",
+ "georgia.museum",
+ "giessen.museum",
+ "glas.museum",
+ "glass.museum",
+ "gorge.museum",
+ "grandrapids.museum",
+ "graz.museum",
+ "guernsey.museum",
+ "halloffame.museum",
+ "hamburg.museum",
+ "handson.museum",
+ "harvestcelebration.museum",
+ "hawaii.museum",
+ "health.museum",
+ "heimatunduhren.museum",
+ "hellas.museum",
+ "helsinki.museum",
+ "hembygdsforbund.museum",
+ "heritage.museum",
+ "histoire.museum",
+ "historical.museum",
+ "historicalsociety.museum",
+ "historichouses.museum",
+ "historisch.museum",
+ "historisches.museum",
+ "history.museum",
+ "historyofscience.museum",
+ "horology.museum",
+ "house.museum",
+ "humanities.museum",
+ "illustration.museum",
+ "imageandsound.museum",
+ "indian.museum",
+ "indiana.museum",
+ "indianapolis.museum",
+ "indianmarket.museum",
+ "intelligence.museum",
+ "interactive.museum",
+ "iraq.museum",
+ "iron.museum",
+ "isleofman.museum",
+ "jamison.museum",
+ "jefferson.museum",
+ "jerusalem.museum",
+ "jewelry.museum",
+ "jewish.museum",
+ "jewishart.museum",
+ "jfk.museum",
+ "journalism.museum",
+ "judaica.museum",
+ "judygarland.museum",
+ "juedisches.museum",
+ "juif.museum",
+ "karate.museum",
+ "karikatur.museum",
+ "kids.museum",
+ "koebenhavn.museum",
+ "koeln.museum",
+ "kunst.museum",
+ "kunstsammlung.museum",
+ "kunstunddesign.museum",
+ "labor.museum",
+ "labour.museum",
+ "lajolla.museum",
+ "lancashire.museum",
+ "landes.museum",
+ "lans.museum",
+ "xn--lns-qla.museum",
+ "larsson.museum",
+ "lewismiller.museum",
+ "lincoln.museum",
+ "linz.museum",
+ "living.museum",
+ "livinghistory.museum",
+ "localhistory.museum",
+ "london.museum",
+ "losangeles.museum",
+ "louvre.museum",
+ "loyalist.museum",
+ "lucerne.museum",
+ "luxembourg.museum",
+ "luzern.museum",
+ "mad.museum",
+ "madrid.museum",
+ "mallorca.museum",
+ "manchester.museum",
+ "mansion.museum",
+ "mansions.museum",
+ "manx.museum",
+ "marburg.museum",
+ "maritime.museum",
+ "maritimo.museum",
+ "maryland.museum",
+ "marylhurst.museum",
+ "media.museum",
+ "medical.museum",
+ "medizinhistorisches.museum",
+ "meeres.museum",
+ "memorial.museum",
+ "mesaverde.museum",
+ "michigan.museum",
+ "midatlantic.museum",
+ "military.museum",
+ "mill.museum",
+ "miners.museum",
+ "mining.museum",
+ "minnesota.museum",
+ "missile.museum",
+ "missoula.museum",
+ "modern.museum",
+ "moma.museum",
+ "money.museum",
+ "monmouth.museum",
+ "monticello.museum",
+ "montreal.museum",
+ "moscow.museum",
+ "motorcycle.museum",
+ "muenchen.museum",
+ "muenster.museum",
+ "mulhouse.museum",
+ "muncie.museum",
+ "museet.museum",
+ "museumcenter.museum",
+ "museumvereniging.museum",
+ "music.museum",
+ "national.museum",
+ "nationalfirearms.museum",
+ "nationalheritage.museum",
+ "nativeamerican.museum",
+ "naturalhistory.museum",
+ "naturalhistorymuseum.museum",
+ "naturalsciences.museum",
+ "nature.museum",
+ "naturhistorisches.museum",
+ "natuurwetenschappen.museum",
+ "naumburg.museum",
+ "naval.museum",
+ "nebraska.museum",
+ "neues.museum",
+ "newhampshire.museum",
+ "newjersey.museum",
+ "newmexico.museum",
+ "newport.museum",
+ "newspaper.museum",
+ "newyork.museum",
+ "niepce.museum",
+ "norfolk.museum",
+ "north.museum",
+ "nrw.museum",
+ "nuernberg.museum",
+ "nuremberg.museum",
+ "nyc.museum",
+ "nyny.museum",
+ "oceanographic.museum",
+ "oceanographique.museum",
+ "omaha.museum",
+ "online.museum",
+ "ontario.museum",
+ "openair.museum",
+ "oregon.museum",
+ "oregontrail.museum",
+ "otago.museum",
+ "oxford.museum",
+ "pacific.museum",
+ "paderborn.museum",
+ "palace.museum",
+ "paleo.museum",
+ "palmsprings.museum",
+ "panama.museum",
+ "paris.museum",
+ "pasadena.museum",
+ "pharmacy.museum",
+ "philadelphia.museum",
+ "philadelphiaarea.museum",
+ "philately.museum",
+ "phoenix.museum",
+ "photography.museum",
+ "pilots.museum",
+ "pittsburgh.museum",
+ "planetarium.museum",
+ "plantation.museum",
+ "plants.museum",
+ "plaza.museum",
+ "portal.museum",
+ "portland.museum",
+ "portlligat.museum",
+ "posts-and-telecommunications.museum",
+ "preservation.museum",
+ "presidio.museum",
+ "press.museum",
+ "project.museum",
+ "public.museum",
+ "pubol.museum",
+ "quebec.museum",
+ "railroad.museum",
+ "railway.museum",
+ "research.museum",
+ "resistance.museum",
+ "riodejaneiro.museum",
+ "rochester.museum",
+ "rockart.museum",
+ "roma.museum",
+ "russia.museum",
+ "saintlouis.museum",
+ "salem.museum",
+ "salvadordali.museum",
+ "salzburg.museum",
+ "sandiego.museum",
+ "sanfrancisco.museum",
+ "santabarbara.museum",
+ "santacruz.museum",
+ "santafe.museum",
+ "saskatchewan.museum",
+ "satx.museum",
+ "savannahga.museum",
+ "schlesisches.museum",
+ "schoenbrunn.museum",
+ "schokoladen.museum",
+ "school.museum",
+ "schweiz.museum",
+ "science.museum",
+ "scienceandhistory.museum",
+ "scienceandindustry.museum",
+ "sciencecenter.museum",
+ "sciencecenters.museum",
+ "science-fiction.museum",
+ "sciencehistory.museum",
+ "sciences.museum",
+ "sciencesnaturelles.museum",
+ "scotland.museum",
+ "seaport.museum",
+ "settlement.museum",
+ "settlers.museum",
+ "shell.museum",
+ "sherbrooke.museum",
+ "sibenik.museum",
+ "silk.museum",
+ "ski.museum",
+ "skole.museum",
+ "society.museum",
+ "sologne.museum",
+ "soundandvision.museum",
+ "southcarolina.museum",
+ "southwest.museum",
+ "space.museum",
+ "spy.museum",
+ "square.museum",
+ "stadt.museum",
+ "stalbans.museum",
+ "starnberg.museum",
+ "state.museum",
+ "stateofdelaware.museum",
+ "station.museum",
+ "steam.museum",
+ "steiermark.museum",
+ "stjohn.museum",
+ "stockholm.museum",
+ "stpetersburg.museum",
+ "stuttgart.museum",
+ "suisse.museum",
+ "surgeonshall.museum",
+ "surrey.museum",
+ "svizzera.museum",
+ "sweden.museum",
+ "sydney.museum",
+ "tank.museum",
+ "tcm.museum",
+ "technology.museum",
+ "telekommunikation.museum",
+ "television.museum",
+ "texas.museum",
+ "textile.museum",
+ "theater.museum",
+ "time.museum",
+ "timekeeping.museum",
+ "topology.museum",
+ "torino.museum",
+ "touch.museum",
+ "town.museum",
+ "transport.museum",
+ "tree.museum",
+ "trolley.museum",
+ "trust.museum",
+ "trustee.museum",
+ "uhren.museum",
+ "ulm.museum",
+ "undersea.museum",
+ "university.museum",
+ "usa.museum",
+ "usantiques.museum",
+ "usarts.museum",
+ "uscountryestate.museum",
+ "usculture.museum",
+ "usdecorativearts.museum",
+ "usgarden.museum",
+ "ushistory.museum",
+ "ushuaia.museum",
+ "uslivinghistory.museum",
+ "utah.museum",
+ "uvic.museum",
+ "valley.museum",
+ "vantaa.museum",
+ "versailles.museum",
+ "viking.museum",
+ "village.museum",
+ "virginia.museum",
+ "virtual.museum",
+ "virtuel.museum",
+ "vlaanderen.museum",
+ "volkenkunde.museum",
+ "wales.museum",
+ "wallonie.museum",
+ "war.museum",
+ "washingtondc.museum",
+ "watchandclock.museum",
+ "watch-and-clock.museum",
+ "western.museum",
+ "westfalen.museum",
+ "whaling.museum",
+ "wildlife.museum",
+ "williamsburg.museum",
+ "windmill.museum",
+ "workshop.museum",
+ "york.museum",
+ "yorkshire.museum",
+ "yosemite.museum",
+ "youth.museum",
+ "zoological.museum",
+ "zoology.museum",
+ "xn--9dbhblg6di.museum",
+ "xn--h1aegh.museum",
+ "mv",
+ "aero.mv",
+ "biz.mv",
+ "com.mv",
+ "coop.mv",
+ "edu.mv",
+ "gov.mv",
+ "info.mv",
+ "int.mv",
+ "mil.mv",
+ "museum.mv",
+ "name.mv",
+ "net.mv",
+ "org.mv",
+ "pro.mv",
+ "mw",
+ "ac.mw",
+ "biz.mw",
+ "co.mw",
+ "com.mw",
+ "coop.mw",
+ "edu.mw",
+ "gov.mw",
+ "int.mw",
+ "museum.mw",
+ "net.mw",
+ "org.mw",
+ "mx",
+ "com.mx",
+ "org.mx",
+ "gob.mx",
+ "edu.mx",
+ "net.mx",
+ "my",
+ "com.my",
+ "net.my",
+ "org.my",
+ "gov.my",
+ "edu.my",
+ "mil.my",
+ "name.my",
+ "*.mz",
+ "!teledata.mz",
+ "na",
+ "info.na",
+ "pro.na",
+ "name.na",
+ "school.na",
+ "or.na",
+ "dr.na",
+ "us.na",
+ "mx.na",
+ "ca.na",
+ "in.na",
+ "cc.na",
+ "tv.na",
+ "ws.na",
+ "mobi.na",
+ "co.na",
+ "com.na",
+ "org.na",
+ "name",
+ "nc",
+ "asso.nc",
+ "ne",
+ "net",
+ "nf",
+ "com.nf",
+ "net.nf",
+ "per.nf",
+ "rec.nf",
+ "web.nf",
+ "arts.nf",
+ "firm.nf",
+ "info.nf",
+ "other.nf",
+ "store.nf",
+ "ng",
+ "com.ng",
+ "edu.ng",
+ "gov.ng",
+ "i.ng",
+ "mil.ng",
+ "mobi.ng",
+ "name.ng",
+ "net.ng",
+ "org.ng",
+ "sch.ng",
+ "com.ni",
+ "gob.ni",
+ "edu.ni",
+ "org.ni",
+ "nom.ni",
+ "net.ni",
+ "mil.ni",
+ "co.ni",
+ "biz.ni",
+ "web.ni",
+ "int.ni",
+ "ac.ni",
+ "in.ni",
+ "info.ni",
+ "nl",
+ "bv.nl",
+ "no",
+ "fhs.no",
+ "vgs.no",
+ "fylkesbibl.no",
+ "folkebibl.no",
+ "museum.no",
+ "idrett.no",
+ "priv.no",
+ "mil.no",
+ "stat.no",
+ "dep.no",
+ "kommune.no",
+ "herad.no",
+ "aa.no",
+ "ah.no",
+ "bu.no",
+ "fm.no",
+ "hl.no",
+ "hm.no",
+ "jan-mayen.no",
+ "mr.no",
+ "nl.no",
+ "nt.no",
+ "of.no",
+ "ol.no",
+ "oslo.no",
+ "rl.no",
+ "sf.no",
+ "st.no",
+ "svalbard.no",
+ "tm.no",
+ "tr.no",
+ "va.no",
+ "vf.no",
+ "gs.aa.no",
+ "gs.ah.no",
+ "gs.bu.no",
+ "gs.fm.no",
+ "gs.hl.no",
+ "gs.hm.no",
+ "gs.jan-mayen.no",
+ "gs.mr.no",
+ "gs.nl.no",
+ "gs.nt.no",
+ "gs.of.no",
+ "gs.ol.no",
+ "gs.oslo.no",
+ "gs.rl.no",
+ "gs.sf.no",
+ "gs.st.no",
+ "gs.svalbard.no",
+ "gs.tm.no",
+ "gs.tr.no",
+ "gs.va.no",
+ "gs.vf.no",
+ "akrehamn.no",
+ "xn--krehamn-dxa.no",
+ "algard.no",
+ "xn--lgrd-poac.no",
+ "arna.no",
+ "brumunddal.no",
+ "bryne.no",
+ "bronnoysund.no",
+ "xn--brnnysund-m8ac.no",
+ "drobak.no",
+ "xn--drbak-wua.no",
+ "egersund.no",
+ "fetsund.no",
+ "floro.no",
+ "xn--flor-jra.no",
+ "fredrikstad.no",
+ "hokksund.no",
+ "honefoss.no",
+ "xn--hnefoss-q1a.no",
+ "jessheim.no",
+ "jorpeland.no",
+ "xn--jrpeland-54a.no",
+ "kirkenes.no",
+ "kopervik.no",
+ "krokstadelva.no",
+ "langevag.no",
+ "xn--langevg-jxa.no",
+ "leirvik.no",
+ "mjondalen.no",
+ "xn--mjndalen-64a.no",
+ "mo-i-rana.no",
+ "mosjoen.no",
+ "xn--mosjen-eya.no",
+ "nesoddtangen.no",
+ "orkanger.no",
+ "osoyro.no",
+ "xn--osyro-wua.no",
+ "raholt.no",
+ "xn--rholt-mra.no",
+ "sandnessjoen.no",
+ "xn--sandnessjen-ogb.no",
+ "skedsmokorset.no",
+ "slattum.no",
+ "spjelkavik.no",
+ "stathelle.no",
+ "stavern.no",
+ "stjordalshalsen.no",
+ "xn--stjrdalshalsen-sqb.no",
+ "tananger.no",
+ "tranby.no",
+ "vossevangen.no",
+ "afjord.no",
+ "xn--fjord-lra.no",
+ "agdenes.no",
+ "al.no",
+ "xn--l-1fa.no",
+ "alesund.no",
+ "xn--lesund-hua.no",
+ "alstahaug.no",
+ "alta.no",
+ "xn--lt-liac.no",
+ "alaheadju.no",
+ "xn--laheadju-7ya.no",
+ "alvdal.no",
+ "amli.no",
+ "xn--mli-tla.no",
+ "amot.no",
+ "xn--mot-tla.no",
+ "andebu.no",
+ "andoy.no",
+ "xn--andy-ira.no",
+ "andasuolo.no",
+ "ardal.no",
+ "xn--rdal-poa.no",
+ "aremark.no",
+ "arendal.no",
+ "xn--s-1fa.no",
+ "aseral.no",
+ "xn--seral-lra.no",
+ "asker.no",
+ "askim.no",
+ "askvoll.no",
+ "askoy.no",
+ "xn--asky-ira.no",
+ "asnes.no",
+ "xn--snes-poa.no",
+ "audnedaln.no",
+ "aukra.no",
+ "aure.no",
+ "aurland.no",
+ "aurskog-holand.no",
+ "xn--aurskog-hland-jnb.no",
+ "austevoll.no",
+ "austrheim.no",
+ "averoy.no",
+ "xn--avery-yua.no",
+ "balestrand.no",
+ "ballangen.no",
+ "balat.no",
+ "xn--blt-elab.no",
+ "balsfjord.no",
+ "bahccavuotna.no",
+ "xn--bhccavuotna-k7a.no",
+ "bamble.no",
+ "bardu.no",
+ "beardu.no",
+ "beiarn.no",
+ "bajddar.no",
+ "xn--bjddar-pta.no",
+ "baidar.no",
+ "xn--bidr-5nac.no",
+ "berg.no",
+ "bergen.no",
+ "berlevag.no",
+ "xn--berlevg-jxa.no",
+ "bearalvahki.no",
+ "xn--bearalvhki-y4a.no",
+ "bindal.no",
+ "birkenes.no",
+ "bjarkoy.no",
+ "xn--bjarky-fya.no",
+ "bjerkreim.no",
+ "bjugn.no",
+ "bodo.no",
+ "xn--bod-2na.no",
+ "badaddja.no",
+ "xn--bdddj-mrabd.no",
+ "budejju.no",
+ "bokn.no",
+ "bremanger.no",
+ "bronnoy.no",
+ "xn--brnny-wuac.no",
+ "bygland.no",
+ "bykle.no",
+ "barum.no",
+ "xn--brum-voa.no",
+ "bo.telemark.no",
+ "xn--b-5ga.telemark.no",
+ "bo.nordland.no",
+ "xn--b-5ga.nordland.no",
+ "bievat.no",
+ "xn--bievt-0qa.no",
+ "bomlo.no",
+ "xn--bmlo-gra.no",
+ "batsfjord.no",
+ "xn--btsfjord-9za.no",
+ "bahcavuotna.no",
+ "xn--bhcavuotna-s4a.no",
+ "dovre.no",
+ "drammen.no",
+ "drangedal.no",
+ "dyroy.no",
+ "xn--dyry-ira.no",
+ "donna.no",
+ "xn--dnna-gra.no",
+ "eid.no",
+ "eidfjord.no",
+ "eidsberg.no",
+ "eidskog.no",
+ "eidsvoll.no",
+ "eigersund.no",
+ "elverum.no",
+ "enebakk.no",
+ "engerdal.no",
+ "etne.no",
+ "etnedal.no",
+ "evenes.no",
+ "evenassi.no",
+ "xn--eveni-0qa01ga.no",
+ "evje-og-hornnes.no",
+ "farsund.no",
+ "fauske.no",
+ "fuossko.no",
+ "fuoisku.no",
+ "fedje.no",
+ "fet.no",
+ "finnoy.no",
+ "xn--finny-yua.no",
+ "fitjar.no",
+ "fjaler.no",
+ "fjell.no",
+ "flakstad.no",
+ "flatanger.no",
+ "flekkefjord.no",
+ "flesberg.no",
+ "flora.no",
+ "fla.no",
+ "xn--fl-zia.no",
+ "folldal.no",
+ "forsand.no",
+ "fosnes.no",
+ "frei.no",
+ "frogn.no",
+ "froland.no",
+ "frosta.no",
+ "frana.no",
+ "xn--frna-woa.no",
+ "froya.no",
+ "xn--frya-hra.no",
+ "fusa.no",
+ "fyresdal.no",
+ "forde.no",
+ "xn--frde-gra.no",
+ "gamvik.no",
+ "gangaviika.no",
+ "xn--ggaviika-8ya47h.no",
+ "gaular.no",
+ "gausdal.no",
+ "gildeskal.no",
+ "xn--gildeskl-g0a.no",
+ "giske.no",
+ "gjemnes.no",
+ "gjerdrum.no",
+ "gjerstad.no",
+ "gjesdal.no",
+ "gjovik.no",
+ "xn--gjvik-wua.no",
+ "gloppen.no",
+ "gol.no",
+ "gran.no",
+ "grane.no",
+ "granvin.no",
+ "gratangen.no",
+ "grimstad.no",
+ "grong.no",
+ "kraanghke.no",
+ "xn--kranghke-b0a.no",
+ "grue.no",
+ "gulen.no",
+ "hadsel.no",
+ "halden.no",
+ "halsa.no",
+ "hamar.no",
+ "hamaroy.no",
+ "habmer.no",
+ "xn--hbmer-xqa.no",
+ "hapmir.no",
+ "xn--hpmir-xqa.no",
+ "hammerfest.no",
+ "hammarfeasta.no",
+ "xn--hmmrfeasta-s4ac.no",
+ "haram.no",
+ "hareid.no",
+ "harstad.no",
+ "hasvik.no",
+ "aknoluokta.no",
+ "xn--koluokta-7ya57h.no",
+ "hattfjelldal.no",
+ "aarborte.no",
+ "haugesund.no",
+ "hemne.no",
+ "hemnes.no",
+ "hemsedal.no",
+ "heroy.more-og-romsdal.no",
+ "xn--hery-ira.xn--mre-og-romsdal-qqb.no",
+ "heroy.nordland.no",
+ "xn--hery-ira.nordland.no",
+ "hitra.no",
+ "hjartdal.no",
+ "hjelmeland.no",
+ "hobol.no",
+ "xn--hobl-ira.no",
+ "hof.no",
+ "hol.no",
+ "hole.no",
+ "holmestrand.no",
+ "holtalen.no",
+ "xn--holtlen-hxa.no",
+ "hornindal.no",
+ "horten.no",
+ "hurdal.no",
+ "hurum.no",
+ "hvaler.no",
+ "hyllestad.no",
+ "hagebostad.no",
+ "xn--hgebostad-g3a.no",
+ "hoyanger.no",
+ "xn--hyanger-q1a.no",
+ "hoylandet.no",
+ "xn--hylandet-54a.no",
+ "ha.no",
+ "xn--h-2fa.no",
+ "ibestad.no",
+ "inderoy.no",
+ "xn--indery-fya.no",
+ "iveland.no",
+ "jevnaker.no",
+ "jondal.no",
+ "jolster.no",
+ "xn--jlster-bya.no",
+ "karasjok.no",
+ "karasjohka.no",
+ "xn--krjohka-hwab49j.no",
+ "karlsoy.no",
+ "galsa.no",
+ "xn--gls-elac.no",
+ "karmoy.no",
+ "xn--karmy-yua.no",
+ "kautokeino.no",
+ "guovdageaidnu.no",
+ "klepp.no",
+ "klabu.no",
+ "xn--klbu-woa.no",
+ "kongsberg.no",
+ "kongsvinger.no",
+ "kragero.no",
+ "xn--krager-gya.no",
+ "kristiansand.no",
+ "kristiansund.no",
+ "krodsherad.no",
+ "xn--krdsherad-m8a.no",
+ "kvalsund.no",
+ "rahkkeravju.no",
+ "xn--rhkkervju-01af.no",
+ "kvam.no",
+ "kvinesdal.no",
+ "kvinnherad.no",
+ "kviteseid.no",
+ "kvitsoy.no",
+ "xn--kvitsy-fya.no",
+ "kvafjord.no",
+ "xn--kvfjord-nxa.no",
+ "giehtavuoatna.no",
+ "kvanangen.no",
+ "xn--kvnangen-k0a.no",
+ "navuotna.no",
+ "xn--nvuotna-hwa.no",
+ "kafjord.no",
+ "xn--kfjord-iua.no",
+ "gaivuotna.no",
+ "xn--givuotna-8ya.no",
+ "larvik.no",
+ "lavangen.no",
+ "lavagis.no",
+ "loabat.no",
+ "xn--loabt-0qa.no",
+ "lebesby.no",
+ "davvesiida.no",
+ "leikanger.no",
+ "leirfjord.no",
+ "leka.no",
+ "leksvik.no",
+ "lenvik.no",
+ "leangaviika.no",
+ "xn--leagaviika-52b.no",
+ "lesja.no",
+ "levanger.no",
+ "lier.no",
+ "lierne.no",
+ "lillehammer.no",
+ "lillesand.no",
+ "lindesnes.no",
+ "lindas.no",
+ "xn--linds-pra.no",
+ "lom.no",
+ "loppa.no",
+ "lahppi.no",
+ "xn--lhppi-xqa.no",
+ "lund.no",
+ "lunner.no",
+ "luroy.no",
+ "xn--lury-ira.no",
+ "luster.no",
+ "lyngdal.no",
+ "lyngen.no",
+ "ivgu.no",
+ "lardal.no",
+ "lerdal.no",
+ "xn--lrdal-sra.no",
+ "lodingen.no",
+ "xn--ldingen-q1a.no",
+ "lorenskog.no",
+ "xn--lrenskog-54a.no",
+ "loten.no",
+ "xn--lten-gra.no",
+ "malvik.no",
+ "masoy.no",
+ "xn--msy-ula0h.no",
+ "muosat.no",
+ "xn--muost-0qa.no",
+ "mandal.no",
+ "marker.no",
+ "marnardal.no",
+ "masfjorden.no",
+ "meland.no",
+ "meldal.no",
+ "melhus.no",
+ "meloy.no",
+ "xn--mely-ira.no",
+ "meraker.no",
+ "xn--merker-kua.no",
+ "moareke.no",
+ "xn--moreke-jua.no",
+ "midsund.no",
+ "midtre-gauldal.no",
+ "modalen.no",
+ "modum.no",
+ "molde.no",
+ "moskenes.no",
+ "moss.no",
+ "mosvik.no",
+ "malselv.no",
+ "xn--mlselv-iua.no",
+ "malatvuopmi.no",
+ "xn--mlatvuopmi-s4a.no",
+ "namdalseid.no",
+ "aejrie.no",
+ "namsos.no",
+ "namsskogan.no",
+ "naamesjevuemie.no",
+ "xn--nmesjevuemie-tcba.no",
+ "laakesvuemie.no",
+ "nannestad.no",
+ "narvik.no",
+ "narviika.no",
+ "naustdal.no",
+ "nedre-eiker.no",
+ "nes.akershus.no",
+ "nes.buskerud.no",
+ "nesna.no",
+ "nesodden.no",
+ "nesseby.no",
+ "unjarga.no",
+ "xn--unjrga-rta.no",
+ "nesset.no",
+ "nissedal.no",
+ "nittedal.no",
+ "nord-aurdal.no",
+ "nord-fron.no",
+ "nord-odal.no",
+ "norddal.no",
+ "nordkapp.no",
+ "davvenjarga.no",
+ "xn--davvenjrga-y4a.no",
+ "nordre-land.no",
+ "nordreisa.no",
+ "raisa.no",
+ "xn--risa-5na.no",
+ "nore-og-uvdal.no",
+ "notodden.no",
+ "naroy.no",
+ "xn--nry-yla5g.no",
+ "notteroy.no",
+ "xn--nttery-byae.no",
+ "odda.no",
+ "oksnes.no",
+ "xn--ksnes-uua.no",
+ "oppdal.no",
+ "oppegard.no",
+ "xn--oppegrd-ixa.no",
+ "orkdal.no",
+ "orland.no",
+ "xn--rland-uua.no",
+ "orskog.no",
+ "xn--rskog-uua.no",
+ "orsta.no",
+ "xn--rsta-fra.no",
+ "os.hedmark.no",
+ "os.hordaland.no",
+ "osen.no",
+ "osteroy.no",
+ "xn--ostery-fya.no",
+ "ostre-toten.no",
+ "xn--stre-toten-zcb.no",
+ "overhalla.no",
+ "ovre-eiker.no",
+ "xn--vre-eiker-k8a.no",
+ "oyer.no",
+ "xn--yer-zna.no",
+ "oygarden.no",
+ "xn--ygarden-p1a.no",
+ "oystre-slidre.no",
+ "xn--ystre-slidre-ujb.no",
+ "porsanger.no",
+ "porsangu.no",
+ "xn--porsgu-sta26f.no",
+ "porsgrunn.no",
+ "radoy.no",
+ "xn--rady-ira.no",
+ "rakkestad.no",
+ "rana.no",
+ "ruovat.no",
+ "randaberg.no",
+ "rauma.no",
+ "rendalen.no",
+ "rennebu.no",
+ "rennesoy.no",
+ "xn--rennesy-v1a.no",
+ "rindal.no",
+ "ringebu.no",
+ "ringerike.no",
+ "ringsaker.no",
+ "rissa.no",
+ "risor.no",
+ "xn--risr-ira.no",
+ "roan.no",
+ "rollag.no",
+ "rygge.no",
+ "ralingen.no",
+ "xn--rlingen-mxa.no",
+ "rodoy.no",
+ "xn--rdy-0nab.no",
+ "romskog.no",
+ "xn--rmskog-bya.no",
+ "roros.no",
+ "xn--rros-gra.no",
+ "rost.no",
+ "xn--rst-0na.no",
+ "royken.no",
+ "xn--ryken-vua.no",
+ "royrvik.no",
+ "xn--ryrvik-bya.no",
+ "rade.no",
+ "xn--rde-ula.no",
+ "salangen.no",
+ "siellak.no",
+ "saltdal.no",
+ "salat.no",
+ "xn--slt-elab.no",
+ "xn--slat-5na.no",
+ "samnanger.no",
+ "sande.more-og-romsdal.no",
+ "sande.xn--mre-og-romsdal-qqb.no",
+ "sande.vestfold.no",
+ "sandefjord.no",
+ "sandnes.no",
+ "sandoy.no",
+ "xn--sandy-yua.no",
+ "sarpsborg.no",
+ "sauda.no",
+ "sauherad.no",
+ "sel.no",
+ "selbu.no",
+ "selje.no",
+ "seljord.no",
+ "sigdal.no",
+ "siljan.no",
+ "sirdal.no",
+ "skaun.no",
+ "skedsmo.no",
+ "ski.no",
+ "skien.no",
+ "skiptvet.no",
+ "skjervoy.no",
+ "xn--skjervy-v1a.no",
+ "skierva.no",
+ "xn--skierv-uta.no",
+ "skjak.no",
+ "xn--skjk-soa.no",
+ "skodje.no",
+ "skanland.no",
+ "xn--sknland-fxa.no",
+ "skanit.no",
+ "xn--sknit-yqa.no",
+ "smola.no",
+ "xn--smla-hra.no",
+ "snillfjord.no",
+ "snasa.no",
+ "xn--snsa-roa.no",
+ "snoasa.no",
+ "snaase.no",
+ "xn--snase-nra.no",
+ "sogndal.no",
+ "sokndal.no",
+ "sola.no",
+ "solund.no",
+ "songdalen.no",
+ "sortland.no",
+ "spydeberg.no",
+ "stange.no",
+ "stavanger.no",
+ "steigen.no",
+ "steinkjer.no",
+ "stjordal.no",
+ "xn--stjrdal-s1a.no",
+ "stokke.no",
+ "stor-elvdal.no",
+ "stord.no",
+ "stordal.no",
+ "storfjord.no",
+ "omasvuotna.no",
+ "strand.no",
+ "stranda.no",
+ "stryn.no",
+ "sula.no",
+ "suldal.no",
+ "sund.no",
+ "sunndal.no",
+ "surnadal.no",
+ "sveio.no",
+ "svelvik.no",
+ "sykkylven.no",
+ "sogne.no",
+ "xn--sgne-gra.no",
+ "somna.no",
+ "xn--smna-gra.no",
+ "sondre-land.no",
+ "xn--sndre-land-0cb.no",
+ "sor-aurdal.no",
+ "xn--sr-aurdal-l8a.no",
+ "sor-fron.no",
+ "xn--sr-fron-q1a.no",
+ "sor-odal.no",
+ "xn--sr-odal-q1a.no",
+ "sor-varanger.no",
+ "xn--sr-varanger-ggb.no",
+ "matta-varjjat.no",
+ "xn--mtta-vrjjat-k7af.no",
+ "sorfold.no",
+ "xn--srfold-bya.no",
+ "sorreisa.no",
+ "xn--srreisa-q1a.no",
+ "sorum.no",
+ "xn--srum-gra.no",
+ "tana.no",
+ "deatnu.no",
+ "time.no",
+ "tingvoll.no",
+ "tinn.no",
+ "tjeldsund.no",
+ "dielddanuorri.no",
+ "tjome.no",
+ "xn--tjme-hra.no",
+ "tokke.no",
+ "tolga.no",
+ "torsken.no",
+ "tranoy.no",
+ "xn--trany-yua.no",
+ "tromso.no",
+ "xn--troms-zua.no",
+ "tromsa.no",
+ "romsa.no",
+ "trondheim.no",
+ "troandin.no",
+ "trysil.no",
+ "trana.no",
+ "xn--trna-woa.no",
+ "trogstad.no",
+ "xn--trgstad-r1a.no",
+ "tvedestrand.no",
+ "tydal.no",
+ "tynset.no",
+ "tysfjord.no",
+ "divtasvuodna.no",
+ "divttasvuotna.no",
+ "tysnes.no",
+ "tysvar.no",
+ "xn--tysvr-vra.no",
+ "tonsberg.no",
+ "xn--tnsberg-q1a.no",
+ "ullensaker.no",
+ "ullensvang.no",
+ "ulvik.no",
+ "utsira.no",
+ "vadso.no",
+ "xn--vads-jra.no",
+ "cahcesuolo.no",
+ "xn--hcesuolo-7ya35b.no",
+ "vaksdal.no",
+ "valle.no",
+ "vang.no",
+ "vanylven.no",
+ "vardo.no",
+ "xn--vard-jra.no",
+ "varggat.no",
+ "xn--vrggt-xqad.no",
+ "vefsn.no",
+ "vaapste.no",
+ "vega.no",
+ "vegarshei.no",
+ "xn--vegrshei-c0a.no",
+ "vennesla.no",
+ "verdal.no",
+ "verran.no",
+ "vestby.no",
+ "vestnes.no",
+ "vestre-slidre.no",
+ "vestre-toten.no",
+ "vestvagoy.no",
+ "xn--vestvgy-ixa6o.no",
+ "vevelstad.no",
+ "vik.no",
+ "vikna.no",
+ "vindafjord.no",
+ "volda.no",
+ "voss.no",
+ "varoy.no",
+ "xn--vry-yla5g.no",
+ "vagan.no",
+ "xn--vgan-qoa.no",
+ "voagat.no",
+ "vagsoy.no",
+ "xn--vgsy-qoa0j.no",
+ "vaga.no",
+ "xn--vg-yiab.no",
+ "valer.ostfold.no",
+ "xn--vler-qoa.xn--stfold-9xa.no",
+ "valer.hedmark.no",
+ "xn--vler-qoa.hedmark.no",
+ "*.np",
+ "nr",
+ "biz.nr",
+ "info.nr",
+ "gov.nr",
+ "edu.nr",
+ "org.nr",
+ "net.nr",
+ "com.nr",
+ "nu",
+ "nz",
+ "ac.nz",
+ "co.nz",
+ "cri.nz",
+ "geek.nz",
+ "gen.nz",
+ "govt.nz",
+ "health.nz",
+ "iwi.nz",
+ "kiwi.nz",
+ "maori.nz",
+ "mil.nz",
+ "xn--mori-qsa.nz",
+ "net.nz",
+ "org.nz",
+ "parliament.nz",
+ "school.nz",
+ "om",
+ "co.om",
+ "com.om",
+ "edu.om",
+ "gov.om",
+ "med.om",
+ "museum.om",
+ "net.om",
+ "org.om",
+ "pro.om",
+ "org",
+ "pa",
+ "ac.pa",
+ "gob.pa",
+ "com.pa",
+ "org.pa",
+ "sld.pa",
+ "edu.pa",
+ "net.pa",
+ "ing.pa",
+ "abo.pa",
+ "med.pa",
+ "nom.pa",
+ "pe",
+ "edu.pe",
+ "gob.pe",
+ "nom.pe",
+ "mil.pe",
+ "org.pe",
+ "com.pe",
+ "net.pe",
+ "pf",
+ "com.pf",
+ "org.pf",
+ "edu.pf",
+ "*.pg",
+ "ph",
+ "com.ph",
+ "net.ph",
+ "org.ph",
+ "gov.ph",
+ "edu.ph",
+ "ngo.ph",
+ "mil.ph",
+ "i.ph",
+ "pk",
+ "com.pk",
+ "net.pk",
+ "edu.pk",
+ "org.pk",
+ "fam.pk",
+ "biz.pk",
+ "web.pk",
+ "gov.pk",
+ "gob.pk",
+ "gok.pk",
+ "gon.pk",
+ "gop.pk",
+ "gos.pk",
+ "info.pk",
+ "pl",
+ "com.pl",
+ "net.pl",
+ "org.pl",
+ "aid.pl",
+ "agro.pl",
+ "atm.pl",
+ "auto.pl",
+ "biz.pl",
+ "edu.pl",
+ "gmina.pl",
+ "gsm.pl",
+ "info.pl",
+ "mail.pl",
+ "miasta.pl",
+ "media.pl",
+ "mil.pl",
+ "nieruchomosci.pl",
+ "nom.pl",
+ "pc.pl",
+ "powiat.pl",
+ "priv.pl",
+ "realestate.pl",
+ "rel.pl",
+ "sex.pl",
+ "shop.pl",
+ "sklep.pl",
+ "sos.pl",
+ "szkola.pl",
+ "targi.pl",
+ "tm.pl",
+ "tourism.pl",
+ "travel.pl",
+ "turystyka.pl",
+ "gov.pl",
+ "ap.gov.pl",
+ "ic.gov.pl",
+ "is.gov.pl",
+ "us.gov.pl",
+ "kmpsp.gov.pl",
+ "kppsp.gov.pl",
+ "kwpsp.gov.pl",
+ "psp.gov.pl",
+ "wskr.gov.pl",
+ "kwp.gov.pl",
+ "mw.gov.pl",
+ "ug.gov.pl",
+ "um.gov.pl",
+ "umig.gov.pl",
+ "ugim.gov.pl",
+ "upow.gov.pl",
+ "uw.gov.pl",
+ "starostwo.gov.pl",
+ "pa.gov.pl",
+ "po.gov.pl",
+ "psse.gov.pl",
+ "pup.gov.pl",
+ "rzgw.gov.pl",
+ "sa.gov.pl",
+ "so.gov.pl",
+ "sr.gov.pl",
+ "wsa.gov.pl",
+ "sko.gov.pl",
+ "uzs.gov.pl",
+ "wiih.gov.pl",
+ "winb.gov.pl",
+ "pinb.gov.pl",
+ "wios.gov.pl",
+ "witd.gov.pl",
+ "wzmiuw.gov.pl",
+ "piw.gov.pl",
+ "wiw.gov.pl",
+ "griw.gov.pl",
+ "wif.gov.pl",
+ "oum.gov.pl",
+ "sdn.gov.pl",
+ "zp.gov.pl",
+ "uppo.gov.pl",
+ "mup.gov.pl",
+ "wuoz.gov.pl",
+ "konsulat.gov.pl",
+ "oirm.gov.pl",
+ "augustow.pl",
+ "babia-gora.pl",
+ "bedzin.pl",
+ "beskidy.pl",
+ "bialowieza.pl",
+ "bialystok.pl",
+ "bielawa.pl",
+ "bieszczady.pl",
+ "boleslawiec.pl",
+ "bydgoszcz.pl",
+ "bytom.pl",
+ "cieszyn.pl",
+ "czeladz.pl",
+ "czest.pl",
+ "dlugoleka.pl",
+ "elblag.pl",
+ "elk.pl",
+ "glogow.pl",
+ "gniezno.pl",
+ "gorlice.pl",
+ "grajewo.pl",
+ "ilawa.pl",
+ "jaworzno.pl",
+ "jelenia-gora.pl",
+ "jgora.pl",
+ "kalisz.pl",
+ "kazimierz-dolny.pl",
+ "karpacz.pl",
+ "kartuzy.pl",
+ "kaszuby.pl",
+ "katowice.pl",
+ "kepno.pl",
+ "ketrzyn.pl",
+ "klodzko.pl",
+ "kobierzyce.pl",
+ "kolobrzeg.pl",
+ "konin.pl",
+ "konskowola.pl",
+ "kutno.pl",
+ "lapy.pl",
+ "lebork.pl",
+ "legnica.pl",
+ "lezajsk.pl",
+ "limanowa.pl",
+ "lomza.pl",
+ "lowicz.pl",
+ "lubin.pl",
+ "lukow.pl",
+ "malbork.pl",
+ "malopolska.pl",
+ "mazowsze.pl",
+ "mazury.pl",
+ "mielec.pl",
+ "mielno.pl",
+ "mragowo.pl",
+ "naklo.pl",
+ "nowaruda.pl",
+ "nysa.pl",
+ "olawa.pl",
+ "olecko.pl",
+ "olkusz.pl",
+ "olsztyn.pl",
+ "opoczno.pl",
+ "opole.pl",
+ "ostroda.pl",
+ "ostroleka.pl",
+ "ostrowiec.pl",
+ "ostrowwlkp.pl",
+ "pila.pl",
+ "pisz.pl",
+ "podhale.pl",
+ "podlasie.pl",
+ "polkowice.pl",
+ "pomorze.pl",
+ "pomorskie.pl",
+ "prochowice.pl",
+ "pruszkow.pl",
+ "przeworsk.pl",
+ "pulawy.pl",
+ "radom.pl",
+ "rawa-maz.pl",
+ "rybnik.pl",
+ "rzeszow.pl",
+ "sanok.pl",
+ "sejny.pl",
+ "slask.pl",
+ "slupsk.pl",
+ "sosnowiec.pl",
+ "stalowa-wola.pl",
+ "skoczow.pl",
+ "starachowice.pl",
+ "stargard.pl",
+ "suwalki.pl",
+ "swidnica.pl",
+ "swiebodzin.pl",
+ "swinoujscie.pl",
+ "szczecin.pl",
+ "szczytno.pl",
+ "tarnobrzeg.pl",
+ "tgory.pl",
+ "turek.pl",
+ "tychy.pl",
+ "ustka.pl",
+ "walbrzych.pl",
+ "warmia.pl",
+ "warszawa.pl",
+ "waw.pl",
+ "wegrow.pl",
+ "wielun.pl",
+ "wlocl.pl",
+ "wloclawek.pl",
+ "wodzislaw.pl",
+ "wolomin.pl",
+ "wroclaw.pl",
+ "zachpomor.pl",
+ "zagan.pl",
+ "zarow.pl",
+ "zgora.pl",
+ "zgorzelec.pl",
+ "pm",
+ "pn",
+ "gov.pn",
+ "co.pn",
+ "org.pn",
+ "edu.pn",
+ "net.pn",
+ "post",
+ "pr",
+ "com.pr",
+ "net.pr",
+ "org.pr",
+ "gov.pr",
+ "edu.pr",
+ "isla.pr",
+ "pro.pr",
+ "biz.pr",
+ "info.pr",
+ "name.pr",
+ "est.pr",
+ "prof.pr",
+ "ac.pr",
+ "pro",
+ "aaa.pro",
+ "aca.pro",
+ "acct.pro",
+ "avocat.pro",
+ "bar.pro",
+ "cpa.pro",
+ "eng.pro",
+ "jur.pro",
+ "law.pro",
+ "med.pro",
+ "recht.pro",
+ "ps",
+ "edu.ps",
+ "gov.ps",
+ "sec.ps",
+ "plo.ps",
+ "com.ps",
+ "org.ps",
+ "net.ps",
+ "pt",
+ "net.pt",
+ "gov.pt",
+ "org.pt",
+ "edu.pt",
+ "int.pt",
+ "publ.pt",
+ "com.pt",
+ "nome.pt",
+ "pw",
+ "co.pw",
+ "ne.pw",
+ "or.pw",
+ "ed.pw",
+ "go.pw",
+ "belau.pw",
+ "py",
+ "com.py",
+ "coop.py",
+ "edu.py",
+ "gov.py",
+ "mil.py",
+ "net.py",
+ "org.py",
+ "qa",
+ "com.qa",
+ "edu.qa",
+ "gov.qa",
+ "mil.qa",
+ "name.qa",
+ "net.qa",
+ "org.qa",
+ "sch.qa",
+ "re",
+ "asso.re",
+ "com.re",
+ "nom.re",
+ "ro",
+ "arts.ro",
+ "com.ro",
+ "firm.ro",
+ "info.ro",
+ "nom.ro",
+ "nt.ro",
+ "org.ro",
+ "rec.ro",
+ "store.ro",
+ "tm.ro",
+ "www.ro",
+ "rs",
+ "ac.rs",
+ "co.rs",
+ "edu.rs",
+ "gov.rs",
+ "in.rs",
+ "org.rs",
+ "ru",
+ "ac.ru",
+ "com.ru",
+ "edu.ru",
+ "int.ru",
+ "net.ru",
+ "org.ru",
+ "pp.ru",
+ "adygeya.ru",
+ "altai.ru",
+ "amur.ru",
+ "arkhangelsk.ru",
+ "astrakhan.ru",
+ "bashkiria.ru",
+ "belgorod.ru",
+ "bir.ru",
+ "bryansk.ru",
+ "buryatia.ru",
+ "cbg.ru",
+ "chel.ru",
+ "chelyabinsk.ru",
+ "chita.ru",
+ "chukotka.ru",
+ "chuvashia.ru",
+ "dagestan.ru",
+ "dudinka.ru",
+ "e-burg.ru",
+ "grozny.ru",
+ "irkutsk.ru",
+ "ivanovo.ru",
+ "izhevsk.ru",
+ "jar.ru",
+ "joshkar-ola.ru",
+ "kalmykia.ru",
+ "kaluga.ru",
+ "kamchatka.ru",
+ "karelia.ru",
+ "kazan.ru",
+ "kchr.ru",
+ "kemerovo.ru",
+ "khabarovsk.ru",
+ "khakassia.ru",
+ "khv.ru",
+ "kirov.ru",
+ "koenig.ru",
+ "komi.ru",
+ "kostroma.ru",
+ "krasnoyarsk.ru",
+ "kuban.ru",
+ "kurgan.ru",
+ "kursk.ru",
+ "lipetsk.ru",
+ "magadan.ru",
+ "mari.ru",
+ "mari-el.ru",
+ "marine.ru",
+ "mordovia.ru",
+ "msk.ru",
+ "murmansk.ru",
+ "nalchik.ru",
+ "nnov.ru",
+ "nov.ru",
+ "novosibirsk.ru",
+ "nsk.ru",
+ "omsk.ru",
+ "orenburg.ru",
+ "oryol.ru",
+ "palana.ru",
+ "penza.ru",
+ "perm.ru",
+ "ptz.ru",
+ "rnd.ru",
+ "ryazan.ru",
+ "sakhalin.ru",
+ "samara.ru",
+ "saratov.ru",
+ "simbirsk.ru",
+ "smolensk.ru",
+ "spb.ru",
+ "stavropol.ru",
+ "stv.ru",
+ "surgut.ru",
+ "tambov.ru",
+ "tatarstan.ru",
+ "tom.ru",
+ "tomsk.ru",
+ "tsaritsyn.ru",
+ "tsk.ru",
+ "tula.ru",
+ "tuva.ru",
+ "tver.ru",
+ "tyumen.ru",
+ "udm.ru",
+ "udmurtia.ru",
+ "ulan-ude.ru",
+ "vladikavkaz.ru",
+ "vladimir.ru",
+ "vladivostok.ru",
+ "volgograd.ru",
+ "vologda.ru",
+ "voronezh.ru",
+ "vrn.ru",
+ "vyatka.ru",
+ "yakutia.ru",
+ "yamal.ru",
+ "yaroslavl.ru",
+ "yekaterinburg.ru",
+ "yuzhno-sakhalinsk.ru",
+ "amursk.ru",
+ "baikal.ru",
+ "cmw.ru",
+ "fareast.ru",
+ "jamal.ru",
+ "kms.ru",
+ "k-uralsk.ru",
+ "kustanai.ru",
+ "kuzbass.ru",
+ "mytis.ru",
+ "nakhodka.ru",
+ "nkz.ru",
+ "norilsk.ru",
+ "oskol.ru",
+ "pyatigorsk.ru",
+ "rubtsovsk.ru",
+ "snz.ru",
+ "syzran.ru",
+ "vdonsk.ru",
+ "zgrad.ru",
+ "gov.ru",
+ "mil.ru",
+ "test.ru",
+ "rw",
+ "gov.rw",
+ "net.rw",
+ "edu.rw",
+ "ac.rw",
+ "com.rw",
+ "co.rw",
+ "int.rw",
+ "mil.rw",
+ "gouv.rw",
+ "sa",
+ "com.sa",
+ "net.sa",
+ "org.sa",
+ "gov.sa",
+ "med.sa",
+ "pub.sa",
+ "edu.sa",
+ "sch.sa",
+ "sb",
+ "com.sb",
+ "edu.sb",
+ "gov.sb",
+ "net.sb",
+ "org.sb",
+ "sc",
+ "com.sc",
+ "gov.sc",
+ "net.sc",
+ "org.sc",
+ "edu.sc",
+ "sd",
+ "com.sd",
+ "net.sd",
+ "org.sd",
+ "edu.sd",
+ "med.sd",
+ "tv.sd",
+ "gov.sd",
+ "info.sd",
+ "se",
+ "a.se",
+ "ac.se",
+ "b.se",
+ "bd.se",
+ "brand.se",
+ "c.se",
+ "d.se",
+ "e.se",
+ "f.se",
+ "fh.se",
+ "fhsk.se",
+ "fhv.se",
+ "g.se",
+ "h.se",
+ "i.se",
+ "k.se",
+ "komforb.se",
+ "kommunalforbund.se",
+ "komvux.se",
+ "l.se",
+ "lanbib.se",
+ "m.se",
+ "n.se",
+ "naturbruksgymn.se",
+ "o.se",
+ "org.se",
+ "p.se",
+ "parti.se",
+ "pp.se",
+ "press.se",
+ "r.se",
+ "s.se",
+ "t.se",
+ "tm.se",
+ "u.se",
+ "w.se",
+ "x.se",
+ "y.se",
+ "z.se",
+ "sg",
+ "com.sg",
+ "net.sg",
+ "org.sg",
+ "gov.sg",
+ "edu.sg",
+ "per.sg",
+ "sh",
+ "com.sh",
+ "net.sh",
+ "gov.sh",
+ "org.sh",
+ "mil.sh",
+ "si",
+ "sj",
+ "sk",
+ "sl",
+ "com.sl",
+ "net.sl",
+ "edu.sl",
+ "gov.sl",
+ "org.sl",
+ "sm",
+ "sn",
+ "art.sn",
+ "com.sn",
+ "edu.sn",
+ "gouv.sn",
+ "org.sn",
+ "perso.sn",
+ "univ.sn",
+ "so",
+ "com.so",
+ "net.so",
+ "org.so",
+ "sr",
+ "st",
+ "co.st",
+ "com.st",
+ "consulado.st",
+ "edu.st",
+ "embaixada.st",
+ "gov.st",
+ "mil.st",
+ "net.st",
+ "org.st",
+ "principe.st",
+ "saotome.st",
+ "store.st",
+ "su",
+ "adygeya.su",
+ "arkhangelsk.su",
+ "balashov.su",
+ "bashkiria.su",
+ "bryansk.su",
+ "dagestan.su",
+ "grozny.su",
+ "ivanovo.su",
+ "kalmykia.su",
+ "kaluga.su",
+ "karelia.su",
+ "khakassia.su",
+ "krasnodar.su",
+ "kurgan.su",
+ "lenug.su",
+ "mordovia.su",
+ "msk.su",
+ "murmansk.su",
+ "nalchik.su",
+ "nov.su",
+ "obninsk.su",
+ "penza.su",
+ "pokrovsk.su",
+ "sochi.su",
+ "spb.su",
+ "togliatti.su",
+ "troitsk.su",
+ "tula.su",
+ "tuva.su",
+ "vladikavkaz.su",
+ "vladimir.su",
+ "vologda.su",
+ "sv",
+ "com.sv",
+ "edu.sv",
+ "gob.sv",
+ "org.sv",
+ "red.sv",
+ "sx",
+ "gov.sx",
+ "sy",
+ "edu.sy",
+ "gov.sy",
+ "net.sy",
+ "mil.sy",
+ "com.sy",
+ "org.sy",
+ "sz",
+ "co.sz",
+ "ac.sz",
+ "org.sz",
+ "tc",
+ "td",
+ "tel",
+ "tf",
+ "tg",
+ "th",
+ "ac.th",
+ "co.th",
+ "go.th",
+ "in.th",
+ "mi.th",
+ "net.th",
+ "or.th",
+ "tj",
+ "ac.tj",
+ "biz.tj",
+ "co.tj",
+ "com.tj",
+ "edu.tj",
+ "go.tj",
+ "gov.tj",
+ "int.tj",
+ "mil.tj",
+ "name.tj",
+ "net.tj",
+ "nic.tj",
+ "org.tj",
+ "test.tj",
+ "web.tj",
+ "tk",
+ "tl",
+ "gov.tl",
+ "tm",
+ "com.tm",
+ "co.tm",
+ "org.tm",
+ "net.tm",
+ "nom.tm",
+ "gov.tm",
+ "mil.tm",
+ "edu.tm",
+ "tn",
+ "com.tn",
+ "ens.tn",
+ "fin.tn",
+ "gov.tn",
+ "ind.tn",
+ "intl.tn",
+ "nat.tn",
+ "net.tn",
+ "org.tn",
+ "info.tn",
+ "perso.tn",
+ "tourism.tn",
+ "edunet.tn",
+ "rnrt.tn",
+ "rns.tn",
+ "rnu.tn",
+ "mincom.tn",
+ "agrinet.tn",
+ "defense.tn",
+ "turen.tn",
+ "to",
+ "com.to",
+ "gov.to",
+ "net.to",
+ "org.to",
+ "edu.to",
+ "mil.to",
+ "tr",
+ "com.tr",
+ "info.tr",
+ "biz.tr",
+ "net.tr",
+ "org.tr",
+ "web.tr",
+ "gen.tr",
+ "tv.tr",
+ "av.tr",
+ "dr.tr",
+ "bbs.tr",
+ "name.tr",
+ "tel.tr",
+ "gov.tr",
+ "bel.tr",
+ "pol.tr",
+ "mil.tr",
+ "k12.tr",
+ "edu.tr",
+ "kep.tr",
+ "nc.tr",
+ "gov.nc.tr",
+ "travel",
+ "tt",
+ "co.tt",
+ "com.tt",
+ "org.tt",
+ "net.tt",
+ "biz.tt",
+ "info.tt",
+ "pro.tt",
+ "int.tt",
+ "coop.tt",
+ "jobs.tt",
+ "mobi.tt",
+ "travel.tt",
+ "museum.tt",
+ "aero.tt",
+ "name.tt",
+ "gov.tt",
+ "edu.tt",
+ "tv",
+ "tw",
+ "edu.tw",
+ "gov.tw",
+ "mil.tw",
+ "com.tw",
+ "net.tw",
+ "org.tw",
+ "idv.tw",
+ "game.tw",
+ "ebiz.tw",
+ "club.tw",
+ "xn--zf0ao64a.tw",
+ "xn--uc0atv.tw",
+ "xn--czrw28b.tw",
+ "tz",
+ "ac.tz",
+ "co.tz",
+ "go.tz",
+ "hotel.tz",
+ "info.tz",
+ "me.tz",
+ "mil.tz",
+ "mobi.tz",
+ "ne.tz",
+ "or.tz",
+ "sc.tz",
+ "tv.tz",
+ "ua",
+ "com.ua",
+ "edu.ua",
+ "gov.ua",
+ "in.ua",
+ "net.ua",
+ "org.ua",
+ "cherkassy.ua",
+ "cherkasy.ua",
+ "chernigov.ua",
+ "chernihiv.ua",
+ "chernivtsi.ua",
+ "chernovtsy.ua",
+ "ck.ua",
+ "cn.ua",
+ "cr.ua",
+ "crimea.ua",
+ "cv.ua",
+ "dn.ua",
+ "dnepropetrovsk.ua",
+ "dnipropetrovsk.ua",
+ "dominic.ua",
+ "donetsk.ua",
+ "dp.ua",
+ "if.ua",
+ "ivano-frankivsk.ua",
+ "kh.ua",
+ "kharkiv.ua",
+ "kharkov.ua",
+ "kherson.ua",
+ "khmelnitskiy.ua",
+ "khmelnytskyi.ua",
+ "kiev.ua",
+ "kirovograd.ua",
+ "km.ua",
+ "kr.ua",
+ "krym.ua",
+ "ks.ua",
+ "kv.ua",
+ "kyiv.ua",
+ "lg.ua",
+ "lt.ua",
+ "lugansk.ua",
+ "lutsk.ua",
+ "lv.ua",
+ "lviv.ua",
+ "mk.ua",
+ "mykolaiv.ua",
+ "nikolaev.ua",
+ "od.ua",
+ "odesa.ua",
+ "odessa.ua",
+ "pl.ua",
+ "poltava.ua",
+ "rivne.ua",
+ "rovno.ua",
+ "rv.ua",
+ "sb.ua",
+ "sebastopol.ua",
+ "sevastopol.ua",
+ "sm.ua",
+ "sumy.ua",
+ "te.ua",
+ "ternopil.ua",
+ "uz.ua",
+ "uzhgorod.ua",
+ "vinnica.ua",
+ "vinnytsia.ua",
+ "vn.ua",
+ "volyn.ua",
+ "yalta.ua",
+ "zaporizhzhe.ua",
+ "zaporizhzhia.ua",
+ "zhitomir.ua",
+ "zhytomyr.ua",
+ "zp.ua",
+ "zt.ua",
+ "ug",
+ "co.ug",
+ "or.ug",
+ "ac.ug",
+ "sc.ug",
+ "go.ug",
+ "ne.ug",
+ "com.ug",
+ "org.ug",
+ "uk",
+ "ac.uk",
+ "co.uk",
+ "gov.uk",
+ "ltd.uk",
+ "me.uk",
+ "net.uk",
+ "nhs.uk",
+ "org.uk",
+ "plc.uk",
+ "police.uk",
+ "*.sch.uk",
+ "us",
+ "dni.us",
+ "fed.us",
+ "isa.us",
+ "kids.us",
+ "nsn.us",
+ "ak.us",
+ "al.us",
+ "ar.us",
+ "as.us",
+ "az.us",
+ "ca.us",
+ "co.us",
+ "ct.us",
+ "dc.us",
+ "de.us",
+ "fl.us",
+ "ga.us",
+ "gu.us",
+ "hi.us",
+ "ia.us",
+ "id.us",
+ "il.us",
+ "in.us",
+ "ks.us",
+ "ky.us",
+ "la.us",
+ "ma.us",
+ "md.us",
+ "me.us",
+ "mi.us",
+ "mn.us",
+ "mo.us",
+ "ms.us",
+ "mt.us",
+ "nc.us",
+ "nd.us",
+ "ne.us",
+ "nh.us",
+ "nj.us",
+ "nm.us",
+ "nv.us",
+ "ny.us",
+ "oh.us",
+ "ok.us",
+ "or.us",
+ "pa.us",
+ "pr.us",
+ "ri.us",
+ "sc.us",
+ "sd.us",
+ "tn.us",
+ "tx.us",
+ "ut.us",
+ "vi.us",
+ "vt.us",
+ "va.us",
+ "wa.us",
+ "wi.us",
+ "wv.us",
+ "wy.us",
+ "k12.ak.us",
+ "k12.al.us",
+ "k12.ar.us",
+ "k12.as.us",
+ "k12.az.us",
+ "k12.ca.us",
+ "k12.co.us",
+ "k12.ct.us",
+ "k12.dc.us",
+ "k12.de.us",
+ "k12.fl.us",
+ "k12.ga.us",
+ "k12.gu.us",
+ "k12.ia.us",
+ "k12.id.us",
+ "k12.il.us",
+ "k12.in.us",
+ "k12.ks.us",
+ "k12.ky.us",
+ "k12.la.us",
+ "k12.ma.us",
+ "k12.md.us",
+ "k12.me.us",
+ "k12.mi.us",
+ "k12.mn.us",
+ "k12.mo.us",
+ "k12.ms.us",
+ "k12.mt.us",
+ "k12.nc.us",
+ "k12.ne.us",
+ "k12.nh.us",
+ "k12.nj.us",
+ "k12.nm.us",
+ "k12.nv.us",
+ "k12.ny.us",
+ "k12.oh.us",
+ "k12.ok.us",
+ "k12.or.us",
+ "k12.pa.us",
+ "k12.pr.us",
+ "k12.ri.us",
+ "k12.sc.us",
+ "k12.tn.us",
+ "k12.tx.us",
+ "k12.ut.us",
+ "k12.vi.us",
+ "k12.vt.us",
+ "k12.va.us",
+ "k12.wa.us",
+ "k12.wi.us",
+ "k12.wy.us",
+ "cc.ak.us",
+ "cc.al.us",
+ "cc.ar.us",
+ "cc.as.us",
+ "cc.az.us",
+ "cc.ca.us",
+ "cc.co.us",
+ "cc.ct.us",
+ "cc.dc.us",
+ "cc.de.us",
+ "cc.fl.us",
+ "cc.ga.us",
+ "cc.gu.us",
+ "cc.hi.us",
+ "cc.ia.us",
+ "cc.id.us",
+ "cc.il.us",
+ "cc.in.us",
+ "cc.ks.us",
+ "cc.ky.us",
+ "cc.la.us",
+ "cc.ma.us",
+ "cc.md.us",
+ "cc.me.us",
+ "cc.mi.us",
+ "cc.mn.us",
+ "cc.mo.us",
+ "cc.ms.us",
+ "cc.mt.us",
+ "cc.nc.us",
+ "cc.nd.us",
+ "cc.ne.us",
+ "cc.nh.us",
+ "cc.nj.us",
+ "cc.nm.us",
+ "cc.nv.us",
+ "cc.ny.us",
+ "cc.oh.us",
+ "cc.ok.us",
+ "cc.or.us",
+ "cc.pa.us",
+ "cc.pr.us",
+ "cc.ri.us",
+ "cc.sc.us",
+ "cc.sd.us",
+ "cc.tn.us",
+ "cc.tx.us",
+ "cc.ut.us",
+ "cc.vi.us",
+ "cc.vt.us",
+ "cc.va.us",
+ "cc.wa.us",
+ "cc.wi.us",
+ "cc.wv.us",
+ "cc.wy.us",
+ "lib.ak.us",
+ "lib.al.us",
+ "lib.ar.us",
+ "lib.as.us",
+ "lib.az.us",
+ "lib.ca.us",
+ "lib.co.us",
+ "lib.ct.us",
+ "lib.dc.us",
+ "lib.de.us",
+ "lib.fl.us",
+ "lib.ga.us",
+ "lib.gu.us",
+ "lib.hi.us",
+ "lib.ia.us",
+ "lib.id.us",
+ "lib.il.us",
+ "lib.in.us",
+ "lib.ks.us",
+ "lib.ky.us",
+ "lib.la.us",
+ "lib.ma.us",
+ "lib.md.us",
+ "lib.me.us",
+ "lib.mi.us",
+ "lib.mn.us",
+ "lib.mo.us",
+ "lib.ms.us",
+ "lib.mt.us",
+ "lib.nc.us",
+ "lib.nd.us",
+ "lib.ne.us",
+ "lib.nh.us",
+ "lib.nj.us",
+ "lib.nm.us",
+ "lib.nv.us",
+ "lib.ny.us",
+ "lib.oh.us",
+ "lib.ok.us",
+ "lib.or.us",
+ "lib.pa.us",
+ "lib.pr.us",
+ "lib.ri.us",
+ "lib.sc.us",
+ "lib.sd.us",
+ "lib.tn.us",
+ "lib.tx.us",
+ "lib.ut.us",
+ "lib.vi.us",
+ "lib.vt.us",
+ "lib.va.us",
+ "lib.wa.us",
+ "lib.wi.us",
+ "lib.wy.us",
+ "pvt.k12.ma.us",
+ "chtr.k12.ma.us",
+ "paroch.k12.ma.us",
+ "uy",
+ "com.uy",
+ "edu.uy",
+ "gub.uy",
+ "mil.uy",
+ "net.uy",
+ "org.uy",
+ "uz",
+ "co.uz",
+ "com.uz",
+ "net.uz",
+ "org.uz",
+ "va",
+ "vc",
+ "com.vc",
+ "net.vc",
+ "org.vc",
+ "gov.vc",
+ "mil.vc",
+ "edu.vc",
+ "ve",
+ "arts.ve",
+ "co.ve",
+ "com.ve",
+ "e12.ve",
+ "edu.ve",
+ "firm.ve",
+ "gob.ve",
+ "gov.ve",
+ "info.ve",
+ "int.ve",
+ "mil.ve",
+ "net.ve",
+ "org.ve",
+ "rec.ve",
+ "store.ve",
+ "tec.ve",
+ "web.ve",
+ "vg",
+ "vi",
+ "co.vi",
+ "com.vi",
+ "k12.vi",
+ "net.vi",
+ "org.vi",
+ "vn",
+ "com.vn",
+ "net.vn",
+ "org.vn",
+ "edu.vn",
+ "gov.vn",
+ "int.vn",
+ "ac.vn",
+ "biz.vn",
+ "info.vn",
+ "name.vn",
+ "pro.vn",
+ "health.vn",
+ "vu",
+ "com.vu",
+ "edu.vu",
+ "net.vu",
+ "org.vu",
+ "wf",
+ "ws",
+ "com.ws",
+ "net.ws",
+ "org.ws",
+ "gov.ws",
+ "edu.ws",
+ "yt",
+ "xn--mgbaam7a8h",
+ "xn--y9a3aq",
+ "xn--54b7fta0cc",
+ "xn--90ais",
+ "xn--fiqs8s",
+ "xn--fiqz9s",
+ "xn--lgbbat1ad8j",
+ "xn--wgbh1c",
+ "xn--e1a4c",
+ "xn--node",
+ "xn--qxam",
+ "xn--j6w193g",
+ "xn--h2brj9c",
+ "xn--mgbbh1a71e",
+ "xn--fpcrj9c3d",
+ "xn--gecrj9c",
+ "xn--s9brj9c",
+ "xn--45brj9c",
+ "xn--xkc2dl3a5ee0h",
+ "xn--mgba3a4f16a",
+ "xn--mgba3a4fra",
+ "xn--mgbtx2b",
+ "xn--mgbayh7gpa",
+ "xn--3e0b707e",
+ "xn--80ao21a",
+ "xn--fzc2c9e2c",
+ "xn--xkc2al3hye2a",
+ "xn--mgbc0a9azcg",
+ "xn--d1alf",
+ "xn--l1acc",
+ "xn--mix891f",
+ "xn--mix082f",
+ "xn--mgbx4cd0ab",
+ "xn--mgb9awbf",
+ "xn--mgbai9azgqp6j",
+ "xn--mgbai9a5eva00b",
+ "xn--ygbi2ammx",
+ "xn--90a3ac",
+ "xn--o1ac.xn--90a3ac",
+ "xn--c1avg.xn--90a3ac",
+ "xn--90azh.xn--90a3ac",
+ "xn--d1at.xn--90a3ac",
+ "xn--o1ach.xn--90a3ac",
+ "xn--80au.xn--90a3ac",
+ "xn--p1ai",
+ "xn--wgbl6a",
+ "xn--mgberp4a5d4ar",
+ "xn--mgberp4a5d4a87g",
+ "xn--mgbqly7c0a67fbc",
+ "xn--mgbqly7cvafr",
+ "xn--mgbpl2fh",
+ "xn--yfro4i67o",
+ "xn--clchc0ea0b2g2a9gcd",
+ "xn--ogbpf8fl",
+ "xn--mgbtf8fl",
+ "xn--o3cw4h",
+ "xn--pgbs0dh",
+ "xn--kpry57d",
+ "xn--kprw13d",
+ "xn--nnx388a",
+ "xn--j1amh",
+ "xn--mgb2ddes",
+ "xxx",
+ "*.ye",
+ "ac.za",
+ "agric.za",
+ "alt.za",
+ "co.za",
+ "edu.za",
+ "gov.za",
+ "grondar.za",
+ "law.za",
+ "mil.za",
+ "net.za",
+ "ngo.za",
+ "nis.za",
+ "nom.za",
+ "org.za",
+ "school.za",
+ "tm.za",
+ "web.za",
+ "zm",
+ "ac.zm",
+ "biz.zm",
+ "co.zm",
+ "com.zm",
+ "edu.zm",
+ "gov.zm",
+ "info.zm",
+ "mil.zm",
+ "net.zm",
+ "org.zm",
+ "sch.zm",
+ "*.zw",
+ "aaa",
+ "aarp",
+ "abarth",
+ "abb",
+ "abbott",
+ "abbvie",
+ "abc",
+ "able",
+ "abogado",
+ "abudhabi",
+ "academy",
+ "accenture",
+ "accountant",
+ "accountants",
+ "aco",
+ "active",
+ "actor",
+ "adac",
+ "ads",
+ "adult",
+ "aeg",
+ "aetna",
+ "afamilycompany",
+ "afl",
+ "africa",
+ "africamagic",
+ "agakhan",
+ "agency",
+ "aig",
+ "aigo",
+ "airbus",
+ "airforce",
+ "airtel",
+ "akdn",
+ "alfaromeo",
+ "alibaba",
+ "alipay",
+ "allfinanz",
+ "allstate",
+ "ally",
+ "alsace",
+ "alstom",
+ "americanexpress",
+ "americanfamily",
+ "amex",
+ "amfam",
+ "amica",
+ "amsterdam",
+ "analytics",
+ "android",
+ "anquan",
+ "anz",
+ "aol",
+ "apartments",
+ "app",
+ "apple",
+ "aquarelle",
+ "arab",
+ "aramco",
+ "archi",
+ "army",
+ "art",
+ "arte",
+ "asda",
+ "associates",
+ "athleta",
+ "attorney",
+ "auction",
+ "audi",
+ "audible",
+ "audio",
+ "auspost",
+ "author",
+ "auto",
+ "autos",
+ "avianca",
+ "aws",
+ "axa",
+ "azure",
+ "baby",
+ "baidu",
+ "banamex",
+ "bananarepublic",
+ "band",
+ "bank",
+ "bar",
+ "barcelona",
+ "barclaycard",
+ "barclays",
+ "barefoot",
+ "bargains",
+ "baseball",
+ "basketball",
+ "bauhaus",
+ "bayern",
+ "bbc",
+ "bbt",
+ "bbva",
+ "bcg",
+ "bcn",
+ "beats",
+ "beauty",
+ "beer",
+ "bentley",
+ "berlin",
+ "best",
+ "bestbuy",
+ "bet",
+ "bharti",
+ "bible",
+ "bid",
+ "bike",
+ "bing",
+ "bingo",
+ "bio",
+ "black",
+ "blackfriday",
+ "blanco",
+ "blockbuster",
+ "blog",
+ "bloomberg",
+ "blue",
+ "bms",
+ "bmw",
+ "bnl",
+ "bnpparibas",
+ "boats",
+ "boehringer",
+ "bofa",
+ "bom",
+ "bond",
+ "boo",
+ "book",
+ "booking",
+ "boots",
+ "bosch",
+ "bostik",
+ "boston",
+ "bot",
+ "boutique",
+ "box",
+ "bradesco",
+ "bridgestone",
+ "broadway",
+ "broker",
+ "brother",
+ "brussels",
+ "budapest",
+ "bugatti",
+ "build",
+ "builders",
+ "business",
+ "buy",
+ "buzz",
+ "bzh",
+ "cab",
+ "cafe",
+ "cal",
+ "call",
+ "calvinklein",
+ "cam",
+ "camera",
+ "camp",
+ "cancerresearch",
+ "canon",
+ "capetown",
+ "capital",
+ "capitalone",
+ "car",
+ "caravan",
+ "cards",
+ "care",
+ "career",
+ "careers",
+ "cars",
+ "cartier",
+ "casa",
+ "case",
+ "caseih",
+ "cash",
+ "casino",
+ "catering",
+ "catholic",
+ "cba",
+ "cbn",
+ "cbre",
+ "cbs",
+ "ceb",
+ "center",
+ "ceo",
+ "cern",
+ "cfa",
+ "cfd",
+ "chanel",
+ "channel",
+ "chase",
+ "chat",
+ "cheap",
+ "chintai",
+ "chloe",
+ "christmas",
+ "chrome",
+ "chrysler",
+ "church",
+ "cipriani",
+ "circle",
+ "cisco",
+ "citadel",
+ "citi",
+ "citic",
+ "city",
+ "cityeats",
+ "claims",
+ "cleaning",
+ "click",
+ "clinic",
+ "clinique",
+ "clothing",
+ "cloud",
+ "club",
+ "clubmed",
+ "coach",
+ "codes",
+ "coffee",
+ "college",
+ "cologne",
+ "comcast",
+ "commbank",
+ "community",
+ "company",
+ "compare",
+ "computer",
+ "comsec",
+ "condos",
+ "construction",
+ "consulting",
+ "contact",
+ "contractors",
+ "cooking",
+ "cookingchannel",
+ "cool",
+ "corsica",
+ "country",
+ "coupon",
+ "coupons",
+ "courses",
+ "credit",
+ "creditcard",
+ "creditunion",
+ "cricket",
+ "crown",
+ "crs",
+ "cruise",
+ "cruises",
+ "csc",
+ "cuisinella",
+ "cymru",
+ "cyou",
+ "dabur",
+ "dad",
+ "dance",
+ "date",
+ "dating",
+ "datsun",
+ "day",
+ "dclk",
+ "dds",
+ "deal",
+ "dealer",
+ "deals",
+ "degree",
+ "delivery",
+ "dell",
+ "deloitte",
+ "delta",
+ "democrat",
+ "dental",
+ "dentist",
+ "desi",
+ "design",
+ "dev",
+ "dhl",
+ "diamonds",
+ "diet",
+ "digital",
+ "direct",
+ "directory",
+ "discount",
+ "discover",
+ "dish",
+ "diy",
+ "dnp",
+ "docs",
+ "dodge",
+ "dog",
+ "doha",
+ "domains",
+ "dot",
+ "download",
+ "drive",
+ "dstv",
+ "dtv",
+ "dubai",
+ "duck",
+ "dunlop",
+ "duns",
+ "dupont",
+ "durban",
+ "dvag",
+ "dwg",
+ "earth",
+ "eat",
+ "edeka",
+ "education",
+ "email",
+ "emerck",
+ "emerson",
+ "energy",
+ "engineer",
+ "engineering",
+ "enterprises",
+ "epost",
+ "epson",
+ "equipment",
+ "ericsson",
+ "erni",
+ "esq",
+ "estate",
+ "esurance",
+ "etisalat",
+ "eurovision",
+ "eus",
+ "events",
+ "everbank",
+ "exchange",
+ "expert",
+ "exposed",
+ "express",
+ "extraspace",
+ "fage",
+ "fail",
+ "fairwinds",
+ "faith",
+ "family",
+ "fan",
+ "fans",
+ "farm",
+ "farmers",
+ "fashion",
+ "fast",
+ "fedex",
+ "feedback",
+ "ferrari",
+ "ferrero",
+ "fiat",
+ "fidelity",
+ "fido",
+ "film",
+ "final",
+ "finance",
+ "financial",
+ "fire",
+ "firestone",
+ "firmdale",
+ "fish",
+ "fishing",
+ "fit",
+ "fitness",
+ "flickr",
+ "flights",
+ "flir",
+ "florist",
+ "flowers",
+ "flsmidth",
+ "fly",
+ "foo",
+ "food",
+ "foodnetwork",
+ "football",
+ "ford",
+ "forex",
+ "forsale",
+ "forum",
+ "foundation",
+ "fox",
+ "free",
+ "fresenius",
+ "frl",
+ "frogans",
+ "frontdoor",
+ "frontier",
+ "ftr",
+ "fujitsu",
+ "fujixerox",
+ "fun",
+ "fund",
+ "furniture",
+ "futbol",
+ "fyi",
+ "gal",
+ "gallery",
+ "gallo",
+ "gallup",
+ "game",
+ "games",
+ "gap",
+ "garden",
+ "gbiz",
+ "gdn",
+ "gea",
+ "gent",
+ "genting",
+ "george",
+ "ggee",
+ "gift",
+ "gifts",
+ "gives",
+ "giving",
+ "glade",
+ "glass",
+ "gle",
+ "global",
+ "globo",
+ "gmail",
+ "gmbh",
+ "gmo",
+ "gmx",
+ "godaddy",
+ "gold",
+ "goldpoint",
+ "golf",
+ "goo",
+ "goodhands",
+ "goodyear",
+ "goog",
+ "google",
+ "gop",
+ "got",
+ "gotv",
+ "grainger",
+ "graphics",
+ "gratis",
+ "green",
+ "gripe",
+ "group",
+ "guardian",
+ "gucci",
+ "guge",
+ "guide",
+ "guitars",
+ "guru",
+ "hair",
+ "hamburg",
+ "hangout",
+ "haus",
+ "hbo",
+ "hdfc",
+ "hdfcbank",
+ "health",
+ "healthcare",
+ "help",
+ "helsinki",
+ "here",
+ "hermes",
+ "hgtv",
+ "hiphop",
+ "hisamitsu",
+ "hitachi",
+ "hiv",
+ "hkt",
+ "hockey",
+ "holdings",
+ "holiday",
+ "homedepot",
+ "homegoods",
+ "homes",
+ "homesense",
+ "honda",
+ "honeywell",
+ "horse",
+ "host",
+ "hosting",
+ "hot",
+ "hoteles",
+ "hotels",
+ "hotmail",
+ "house",
+ "how",
+ "hsbc",
+ "htc",
+ "hughes",
+ "hyatt",
+ "hyundai",
+ "ibm",
+ "icbc",
+ "ice",
+ "icu",
+ "ieee",
+ "ifm",
+ "iinet",
+ "ikano",
+ "imamat",
+ "imdb",
+ "immo",
+ "immobilien",
+ "industries",
+ "infiniti",
+ "ing",
+ "ink",
+ "institute",
+ "insurance",
+ "insure",
+ "intel",
+ "international",
+ "intuit",
+ "investments",
+ "ipiranga",
+ "irish",
+ "iselect",
+ "ismaili",
+ "ist",
+ "istanbul",
+ "itau",
+ "itv",
+ "iveco",
+ "iwc",
+ "jaguar",
+ "java",
+ "jcb",
+ "jcp",
+ "jeep",
+ "jetzt",
+ "jewelry",
+ "jio",
+ "jlc",
+ "jll",
+ "jmp",
+ "jnj",
+ "joburg",
+ "jot",
+ "joy",
+ "jpmorgan",
+ "jprs",
+ "juegos",
+ "juniper",
+ "kaufen",
+ "kddi",
+ "kerryhotels",
+ "kerrylogistics",
+ "kerryproperties",
+ "kfh",
+ "kia",
+ "kim",
+ "kinder",
+ "kindle",
+ "kitchen",
+ "kiwi",
+ "koeln",
+ "komatsu",
+ "kosher",
+ "kpmg",
+ "kpn",
+ "krd",
+ "kred",
+ "kuokgroup",
+ "kyknet",
+ "kyoto",
+ "lacaixa",
+ "ladbrokes",
+ "lamborghini",
+ "lamer",
+ "lancaster",
+ "lancia",
+ "lancome",
+ "land",
+ "landrover",
+ "lanxess",
+ "lasalle",
+ "lat",
+ "latino",
+ "latrobe",
+ "law",
+ "lawyer",
+ "lds",
+ "lease",
+ "leclerc",
+ "lefrak",
+ "legal",
+ "lego",
+ "lexus",
+ "lgbt",
+ "liaison",
+ "lidl",
+ "life",
+ "lifeinsurance",
+ "lifestyle",
+ "lighting",
+ "like",
+ "lilly",
+ "limited",
+ "limo",
+ "lincoln",
+ "linde",
+ "link",
+ "lipsy",
+ "live",
+ "living",
+ "lixil",
+ "loan",
+ "loans",
+ "locker",
+ "locus",
+ "loft",
+ "lol",
+ "london",
+ "lotte",
+ "lotto",
+ "love",
+ "lpl",
+ "lplfinancial",
+ "ltd",
+ "ltda",
+ "lundbeck",
+ "lupin",
+ "luxe",
+ "luxury",
+ "macys",
+ "madrid",
+ "maif",
+ "maison",
+ "makeup",
+ "man",
+ "management",
+ "mango",
+ "market",
+ "marketing",
+ "markets",
+ "marriott",
+ "marshalls",
+ "maserati",
+ "mattel",
+ "mba",
+ "mcd",
+ "mcdonalds",
+ "mckinsey",
+ "med",
+ "media",
+ "meet",
+ "melbourne",
+ "meme",
+ "memorial",
+ "men",
+ "menu",
+ "meo",
+ "metlife",
+ "miami",
+ "microsoft",
+ "mini",
+ "mint",
+ "mit",
+ "mitsubishi",
+ "mlb",
+ "mls",
+ "mma",
+ "mnet",
+ "mobily",
+ "moda",
+ "moe",
+ "moi",
+ "mom",
+ "monash",
+ "money",
+ "monster",
+ "montblanc",
+ "mopar",
+ "mormon",
+ "mortgage",
+ "moscow",
+ "moto",
+ "motorcycles",
+ "mov",
+ "movie",
+ "movistar",
+ "msd",
+ "mtn",
+ "mtpc",
+ "mtr",
+ "multichoice",
+ "mutual",
+ "mutuelle",
+ "mzansimagic",
+ "nab",
+ "nadex",
+ "nagoya",
+ "naspers",
+ "nationwide",
+ "natura",
+ "navy",
+ "nba",
+ "nec",
+ "netbank",
+ "netflix",
+ "network",
+ "neustar",
+ "new",
+ "newholland",
+ "news",
+ "next",
+ "nextdirect",
+ "nexus",
+ "nfl",
+ "ngo",
+ "nhk",
+ "nico",
+ "nike",
+ "nikon",
+ "ninja",
+ "nissan",
+ "nissay",
+ "nokia",
+ "northwesternmutual",
+ "norton",
+ "now",
+ "nowruz",
+ "nowtv",
+ "nra",
+ "nrw",
+ "ntt",
+ "nyc",
+ "obi",
+ "observer",
+ "off",
+ "office",
+ "okinawa",
+ "olayan",
+ "olayangroup",
+ "oldnavy",
+ "ollo",
+ "omega",
+ "one",
+ "ong",
+ "onl",
+ "online",
+ "onyourside",
+ "ooo",
+ "open",
+ "oracle",
+ "orange",
+ "organic",
+ "orientexpress",
+ "origins",
+ "osaka",
+ "otsuka",
+ "ott",
+ "ovh",
+ "page",
+ "pamperedchef",
+ "panasonic",
+ "panerai",
+ "paris",
+ "pars",
+ "partners",
+ "parts",
+ "party",
+ "passagens",
+ "pay",
+ "payu",
+ "pccw",
+ "pet",
+ "pfizer",
+ "pharmacy",
+ "philips",
+ "photo",
+ "photography",
+ "photos",
+ "physio",
+ "piaget",
+ "pics",
+ "pictet",
+ "pictures",
+ "pid",
+ "pin",
+ "ping",
+ "pink",
+ "pioneer",
+ "pizza",
+ "place",
+ "play",
+ "playstation",
+ "plumbing",
+ "plus",
+ "pnc",
+ "pohl",
+ "poker",
+ "politie",
+ "porn",
+ "pramerica",
+ "praxi",
+ "press",
+ "prime",
+ "prod",
+ "productions",
+ "prof",
+ "progressive",
+ "promo",
+ "properties",
+ "property",
+ "protection",
+ "pru",
+ "prudential",
+ "pub",
+ "pwc",
+ "qpon",
+ "quebec",
+ "quest",
+ "qvc",
+ "racing",
+ "raid",
+ "read",
+ "realestate",
+ "realtor",
+ "realty",
+ "recipes",
+ "red",
+ "redstone",
+ "redumbrella",
+ "rehab",
+ "reise",
+ "reisen",
+ "reit",
+ "reliance",
+ "ren",
+ "rent",
+ "rentals",
+ "repair",
+ "report",
+ "republican",
+ "rest",
+ "restaurant",
+ "review",
+ "reviews",
+ "rexroth",
+ "rich",
+ "richardli",
+ "ricoh",
+ "rightathome",
+ "ril",
+ "rio",
+ "rip",
+ "rmit",
+ "rocher",
+ "rocks",
+ "rodeo",
+ "rogers",
+ "room",
+ "rsvp",
+ "ruhr",
+ "run",
+ "rwe",
+ "ryukyu",
+ "saarland",
+ "safe",
+ "safety",
+ "sakura",
+ "sale",
+ "salon",
+ "samsclub",
+ "samsung",
+ "sandvik",
+ "sandvikcoromant",
+ "sanofi",
+ "sap",
+ "sapo",
+ "sarl",
+ "sas",
+ "save",
+ "saxo",
+ "sbi",
+ "sbs",
+ "sca",
+ "scb",
+ "schaeffler",
+ "schmidt",
+ "scholarships",
+ "school",
+ "schule",
+ "schwarz",
+ "science",
+ "scjohnson",
+ "scor",
+ "scot",
+ "seat",
+ "secure",
+ "security",
+ "seek",
+ "select",
+ "sener",
+ "services",
+ "ses",
+ "seven",
+ "sew",
+ "sex",
+ "sexy",
+ "sfr",
+ "shangrila",
+ "sharp",
+ "shaw",
+ "shell",
+ "shia",
+ "shiksha",
+ "shoes",
+ "shop",
+ "shopping",
+ "shouji",
+ "show",
+ "showtime",
+ "shriram",
+ "silk",
+ "sina",
+ "singles",
+ "site",
+ "ski",
+ "skin",
+ "sky",
+ "skype",
+ "sling",
+ "smart",
+ "smile",
+ "sncf",
+ "soccer",
+ "social",
+ "softbank",
+ "software",
+ "sohu",
+ "solar",
+ "solutions",
+ "song",
+ "sony",
+ "soy",
+ "space",
+ "spiegel",
+ "spot",
+ "spreadbetting",
+ "srl",
+ "srt",
+ "stada",
+ "staples",
+ "star",
+ "starhub",
+ "statebank",
+ "statefarm",
+ "statoil",
+ "stc",
+ "stcgroup",
+ "stockholm",
+ "storage",
+ "store",
+ "stream",
+ "studio",
+ "study",
+ "style",
+ "sucks",
+ "supersport",
+ "supplies",
+ "supply",
+ "support",
+ "surf",
+ "surgery",
+ "suzuki",
+ "swatch",
+ "swiftcover",
+ "swiss",
+ "sydney",
+ "symantec",
+ "systems",
+ "tab",
+ "taipei",
+ "talk",
+ "taobao",
+ "target",
+ "tatamotors",
+ "tatar",
+ "tattoo",
+ "tax",
+ "taxi",
+ "tci",
+ "tdk",
+ "team",
+ "tech",
+ "technology",
+ "telecity",
+ "telefonica",
+ "temasek",
+ "tennis",
+ "teva",
+ "thd",
+ "theater",
+ "theatre",
+ "theguardian",
+ "tiaa",
+ "tickets",
+ "tienda",
+ "tiffany",
+ "tips",
+ "tires",
+ "tirol",
+ "tjmaxx",
+ "tjx",
+ "tkmaxx",
+ "tmall",
+ "today",
+ "tokyo",
+ "tools",
+ "top",
+ "toray",
+ "toshiba",
+ "total",
+ "tours",
+ "town",
+ "toyota",
+ "toys",
+ "trade",
+ "trading",
+ "training",
+ "travelchannel",
+ "travelers",
+ "travelersinsurance",
+ "trust",
+ "trv",
+ "tube",
+ "tui",
+ "tunes",
+ "tushu",
+ "tvs",
+ "ubank",
+ "ubs",
+ "uconnect",
+ "unicom",
+ "university",
+ "uno",
+ "uol",
+ "ups",
+ "vacations",
+ "vana",
+ "vanguard",
+ "vegas",
+ "ventures",
+ "verisign",
+ "versicherung",
+ "vet",
+ "viajes",
+ "video",
+ "vig",
+ "viking",
+ "villas",
+ "vin",
+ "vip",
+ "virgin",
+ "visa",
+ "vision",
+ "vista",
+ "vistaprint",
+ "viva",
+ "vivo",
+ "vlaanderen",
+ "vodka",
+ "volkswagen",
+ "volvo",
+ "vote",
+ "voting",
+ "voto",
+ "voyage",
+ "vuelos",
+ "wales",
+ "walmart",
+ "walter",
+ "wang",
+ "wanggou",
+ "warman",
+ "watch",
+ "watches",
+ "weather",
+ "weatherchannel",
+ "webcam",
+ "weber",
+ "website",
+ "wed",
+ "wedding",
+ "weibo",
+ "weir",
+ "whoswho",
+ "wien",
+ "wiki",
+ "williamhill",
+ "win",
+ "windows",
+ "wine",
+ "winners",
+ "wme",
+ "wolterskluwer",
+ "woodside",
+ "work",
+ "works",
+ "world",
+ "wow",
+ "wtc",
+ "wtf",
+ "xbox",
+ "xerox",
+ "xfinity",
+ "xihuan",
+ "xin",
+ "xn--11b4c3d",
+ "xn--1ck2e1b",
+ "xn--1qqw23a",
+ "xn--30rr7y",
+ "xn--3bst00m",
+ "xn--3ds443g",
+ "xn--3oq18vl8pn36a",
+ "xn--3pxu8k",
+ "xn--42c2d9a",
+ "xn--45q11c",
+ "xn--4gbrim",
+ "xn--4gq48lf9j",
+ "xn--55qw42g",
+ "xn--55qx5d",
+ "xn--5su34j936bgsg",
+ "xn--5tzm5g",
+ "xn--6frz82g",
+ "xn--6qq986b3xl",
+ "xn--80adxhks",
+ "xn--80aqecdr1a",
+ "xn--80asehdb",
+ "xn--80aswg",
+ "xn--8y0a063a",
+ "xn--9dbq2a",
+ "xn--9et52u",
+ "xn--9krt00a",
+ "xn--b4w605ferd",
+ "xn--bck1b9a5dre4c",
+ "xn--c1avg",
+ "xn--c2br7g",
+ "xn--cck2b3b",
+ "xn--cg4bki",
+ "xn--czr694b",
+ "xn--czrs0t",
+ "xn--czru2d",
+ "xn--d1acj3b",
+ "xn--eckvdtc9d",
+ "xn--efvy88h",
+ "xn--estv75g",
+ "xn--fct429k",
+ "xn--fhbei",
+ "xn--fiq228c5hs",
+ "xn--fiq64b",
+ "xn--fjq720a",
+ "xn--flw351e",
+ "xn--fzys8d69uvgm",
+ "xn--g2xx48c",
+ "xn--gckr3f0f",
+ "xn--gk3at1e",
+ "xn--hxt814e",
+ "xn--i1b6b1a6a2e",
+ "xn--imr513n",
+ "xn--io0a7i",
+ "xn--j1aef",
+ "xn--jlq61u9w7b",
+ "xn--jvr189m",
+ "xn--kcrx77d1x4a",
+ "xn--kpu716f",
+ "xn--kput3i",
+ "xn--mgba3a3ejt",
+ "xn--mgba7c0bbn0a",
+ "xn--mgbaakc7dvf",
+ "xn--mgbab2bd",
+ "xn--mgbb9fbpob",
+ "xn--mgbca7dzdo",
+ "xn--mgbi4ecexp",
+ "xn--mgbt3dhd",
+ "xn--mk1bu44c",
+ "xn--mxtq1m",
+ "xn--ngbc5azd",
+ "xn--ngbe9e0a",
+ "xn--ngbrx",
+ "xn--nqv7f",
+ "xn--nqv7fs00ema",
+ "xn--nyqy26a",
+ "xn--p1acf",
+ "xn--pbt977c",
+ "xn--pssy2u",
+ "xn--q9jyb4c",
+ "xn--qcka1pmc",
+ "xn--rhqv96g",
+ "xn--rovu88b",
+ "xn--ses554g",
+ "xn--t60b56a",
+ "xn--tckwe",
+ "xn--tiq49xqyj",
+ "xn--unup4y",
+ "xn--vermgensberater-ctb",
+ "xn--vermgensberatung-pwb",
+ "xn--vhquv",
+ "xn--vuq861b",
+ "xn--w4r85el8fhu5dnra",
+ "xn--w4rs40l",
+ "xn--xhq521b",
+ "xn--zfr164b",
+ "xperia",
+ "xyz",
+ "yachts",
+ "yahoo",
+ "yamaxun",
+ "yandex",
+ "yodobashi",
+ "yoga",
+ "yokohama",
+ "you",
+ "youtube",
+ "yun",
+ "zappos",
+ "zara",
+ "zero",
+ "zip",
+ "zippo",
+ "zone",
+ "zuerich",
+ "beep.pl",
+ "*.compute.estate",
+ "*.alces.network",
+ "cloudfront.net",
+ "compute.amazonaws.com",
+ "ap-northeast-1.compute.amazonaws.com",
+ "ap-northeast-2.compute.amazonaws.com",
+ "ap-southeast-1.compute.amazonaws.com",
+ "ap-southeast-2.compute.amazonaws.com",
+ "eu-central-1.compute.amazonaws.com",
+ "eu-west-1.compute.amazonaws.com",
+ "sa-east-1.compute.amazonaws.com",
+ "us-gov-west-1.compute.amazonaws.com",
+ "us-west-1.compute.amazonaws.com",
+ "us-west-2.compute.amazonaws.com",
+ "compute-1.amazonaws.com",
+ "z-1.compute-1.amazonaws.com",
+ "z-2.compute-1.amazonaws.com",
+ "us-east-1.amazonaws.com",
+ "compute.amazonaws.com.cn",
+ "cn-north-1.compute.amazonaws.com.cn",
+ "elasticbeanstalk.com",
+ "elb.amazonaws.com",
+ "s3.amazonaws.com",
+ "s3-ap-northeast-1.amazonaws.com",
+ "s3-ap-northeast-2.amazonaws.com",
+ "s3-ap-southeast-1.amazonaws.com",
+ "s3-ap-southeast-2.amazonaws.com",
+ "s3-eu-central-1.amazonaws.com",
+ "s3-eu-west-1.amazonaws.com",
+ "s3-external-1.amazonaws.com",
+ "s3-external-2.amazonaws.com",
+ "s3-fips-us-gov-west-1.amazonaws.com",
+ "s3-sa-east-1.amazonaws.com",
+ "s3-us-gov-west-1.amazonaws.com",
+ "s3-us-west-1.amazonaws.com",
+ "s3-us-west-2.amazonaws.com",
+ "s3.ap-northeast-2.amazonaws.com",
+ "s3.cn-north-1.amazonaws.com.cn",
+ "s3.eu-central-1.amazonaws.com",
+ "on-aptible.com",
+ "pimienta.org",
+ "poivron.org",
+ "potager.org",
+ "sweetpepper.org",
+ "myasustor.com",
+ "myfritz.net",
+ "backplaneapp.io",
+ "betainabox.com",
+ "boxfuse.io",
+ "browsersafetymark.io",
+ "mycd.eu",
+ "ae.org",
+ "ar.com",
+ "br.com",
+ "cn.com",
+ "com.de",
+ "com.se",
+ "de.com",
+ "eu.com",
+ "gb.com",
+ "gb.net",
+ "hu.com",
+ "hu.net",
+ "jp.net",
+ "jpn.com",
+ "kr.com",
+ "mex.com",
+ "no.com",
+ "qc.com",
+ "ru.com",
+ "sa.com",
+ "se.com",
+ "se.net",
+ "uk.com",
+ "uk.net",
+ "us.com",
+ "uy.com",
+ "za.bz",
+ "za.com",
+ "africa.com",
+ "gr.com",
+ "in.net",
+ "us.org",
+ "co.com",
+ "c.la",
+ "certmgr.org",
+ "xenapponazure.com",
+ "virtueeldomein.nl",
+ "cloudcontrolled.com",
+ "cloudcontrolapp.com",
+ "co.ca",
+ "co.cz",
+ "c.cdn77.org",
+ "cdn77-ssl.net",
+ "r.cdn77.net",
+ "rsc.cdn77.org",
+ "ssl.origin.cdn77-secure.org",
+ "co.nl",
+ "co.no",
+ "*.platform.sh",
+ "realm.cz",
+ "*.cryptonomic.net",
+ "cupcake.is",
+ "cyon.link",
+ "cyon.site",
+ "daplie.me",
+ "biz.dk",
+ "co.dk",
+ "firm.dk",
+ "reg.dk",
+ "store.dk",
+ "dedyn.io",
+ "dnshome.de",
+ "dreamhosters.com",
+ "mydrobo.com",
+ "drud.io",
+ "drud.us",
+ "duckdns.org",
+ "dy.fi",
+ "tunk.org",
+ "dyndns-at-home.com",
+ "dyndns-at-work.com",
+ "dyndns-blog.com",
+ "dyndns-free.com",
+ "dyndns-home.com",
+ "dyndns-ip.com",
+ "dyndns-mail.com",
+ "dyndns-office.com",
+ "dyndns-pics.com",
+ "dyndns-remote.com",
+ "dyndns-server.com",
+ "dyndns-web.com",
+ "dyndns-wiki.com",
+ "dyndns-work.com",
+ "dyndns.biz",
+ "dyndns.info",
+ "dyndns.org",
+ "dyndns.tv",
+ "at-band-camp.net",
+ "ath.cx",
+ "barrel-of-knowledge.info",
+ "barrell-of-knowledge.info",
+ "better-than.tv",
+ "blogdns.com",
+ "blogdns.net",
+ "blogdns.org",
+ "blogsite.org",
+ "boldlygoingnowhere.org",
+ "broke-it.net",
+ "buyshouses.net",
+ "cechire.com",
+ "dnsalias.com",
+ "dnsalias.net",
+ "dnsalias.org",
+ "dnsdojo.com",
+ "dnsdojo.net",
+ "dnsdojo.org",
+ "does-it.net",
+ "doesntexist.com",
+ "doesntexist.org",
+ "dontexist.com",
+ "dontexist.net",
+ "dontexist.org",
+ "doomdns.com",
+ "doomdns.org",
+ "dvrdns.org",
+ "dyn-o-saur.com",
+ "dynalias.com",
+ "dynalias.net",
+ "dynalias.org",
+ "dynathome.net",
+ "dyndns.ws",
+ "endofinternet.net",
+ "endofinternet.org",
+ "endoftheinternet.org",
+ "est-a-la-maison.com",
+ "est-a-la-masion.com",
+ "est-le-patron.com",
+ "est-mon-blogueur.com",
+ "for-better.biz",
+ "for-more.biz",
+ "for-our.info",
+ "for-some.biz",
+ "for-the.biz",
+ "forgot.her.name",
+ "forgot.his.name",
+ "from-ak.com",
+ "from-al.com",
+ "from-ar.com",
+ "from-az.net",
+ "from-ca.com",
+ "from-co.net",
+ "from-ct.com",
+ "from-dc.com",
+ "from-de.com",
+ "from-fl.com",
+ "from-ga.com",
+ "from-hi.com",
+ "from-ia.com",
+ "from-id.com",
+ "from-il.com",
+ "from-in.com",
+ "from-ks.com",
+ "from-ky.com",
+ "from-la.net",
+ "from-ma.com",
+ "from-md.com",
+ "from-me.org",
+ "from-mi.com",
+ "from-mn.com",
+ "from-mo.com",
+ "from-ms.com",
+ "from-mt.com",
+ "from-nc.com",
+ "from-nd.com",
+ "from-ne.com",
+ "from-nh.com",
+ "from-nj.com",
+ "from-nm.com",
+ "from-nv.com",
+ "from-ny.net",
+ "from-oh.com",
+ "from-ok.com",
+ "from-or.com",
+ "from-pa.com",
+ "from-pr.com",
+ "from-ri.com",
+ "from-sc.com",
+ "from-sd.com",
+ "from-tn.com",
+ "from-tx.com",
+ "from-ut.com",
+ "from-va.com",
+ "from-vt.com",
+ "from-wa.com",
+ "from-wi.com",
+ "from-wv.com",
+ "from-wy.com",
+ "ftpaccess.cc",
+ "fuettertdasnetz.de",
+ "game-host.org",
+ "game-server.cc",
+ "getmyip.com",
+ "gets-it.net",
+ "go.dyndns.org",
+ "gotdns.com",
+ "gotdns.org",
+ "groks-the.info",
+ "groks-this.info",
+ "ham-radio-op.net",
+ "here-for-more.info",
+ "hobby-site.com",
+ "hobby-site.org",
+ "home.dyndns.org",
+ "homedns.org",
+ "homeftp.net",
+ "homeftp.org",
+ "homeip.net",
+ "homelinux.com",
+ "homelinux.net",
+ "homelinux.org",
+ "homeunix.com",
+ "homeunix.net",
+ "homeunix.org",
+ "iamallama.com",
+ "in-the-band.net",
+ "is-a-anarchist.com",
+ "is-a-blogger.com",
+ "is-a-bookkeeper.com",
+ "is-a-bruinsfan.org",
+ "is-a-bulls-fan.com",
+ "is-a-candidate.org",
+ "is-a-caterer.com",
+ "is-a-celticsfan.org",
+ "is-a-chef.com",
+ "is-a-chef.net",
+ "is-a-chef.org",
+ "is-a-conservative.com",
+ "is-a-cpa.com",
+ "is-a-cubicle-slave.com",
+ "is-a-democrat.com",
+ "is-a-designer.com",
+ "is-a-doctor.com",
+ "is-a-financialadvisor.com",
+ "is-a-geek.com",
+ "is-a-geek.net",
+ "is-a-geek.org",
+ "is-a-green.com",
+ "is-a-guru.com",
+ "is-a-hard-worker.com",
+ "is-a-hunter.com",
+ "is-a-knight.org",
+ "is-a-landscaper.com",
+ "is-a-lawyer.com",
+ "is-a-liberal.com",
+ "is-a-libertarian.com",
+ "is-a-linux-user.org",
+ "is-a-llama.com",
+ "is-a-musician.com",
+ "is-a-nascarfan.com",
+ "is-a-nurse.com",
+ "is-a-painter.com",
+ "is-a-patsfan.org",
+ "is-a-personaltrainer.com",
+ "is-a-photographer.com",
+ "is-a-player.com",
+ "is-a-republican.com",
+ "is-a-rockstar.com",
+ "is-a-socialist.com",
+ "is-a-soxfan.org",
+ "is-a-student.com",
+ "is-a-teacher.com",
+ "is-a-techie.com",
+ "is-a-therapist.com",
+ "is-an-accountant.com",
+ "is-an-actor.com",
+ "is-an-actress.com",
+ "is-an-anarchist.com",
+ "is-an-artist.com",
+ "is-an-engineer.com",
+ "is-an-entertainer.com",
+ "is-by.us",
+ "is-certified.com",
+ "is-found.org",
+ "is-gone.com",
+ "is-into-anime.com",
+ "is-into-cars.com",
+ "is-into-cartoons.com",
+ "is-into-games.com",
+ "is-leet.com",
+ "is-lost.org",
+ "is-not-certified.com",
+ "is-saved.org",
+ "is-slick.com",
+ "is-uberleet.com",
+ "is-very-bad.org",
+ "is-very-evil.org",
+ "is-very-good.org",
+ "is-very-nice.org",
+ "is-very-sweet.org",
+ "is-with-theband.com",
+ "isa-geek.com",
+ "isa-geek.net",
+ "isa-geek.org",
+ "isa-hockeynut.com",
+ "issmarterthanyou.com",
+ "isteingeek.de",
+ "istmein.de",
+ "kicks-ass.net",
+ "kicks-ass.org",
+ "knowsitall.info",
+ "land-4-sale.us",
+ "lebtimnetz.de",
+ "leitungsen.de",
+ "likes-pie.com",
+ "likescandy.com",
+ "merseine.nu",
+ "mine.nu",
+ "misconfused.org",
+ "mypets.ws",
+ "myphotos.cc",
+ "neat-url.com",
+ "office-on-the.net",
+ "on-the-web.tv",
+ "podzone.net",
+ "podzone.org",
+ "readmyblog.org",
+ "saves-the-whales.com",
+ "scrapper-site.net",
+ "scrapping.cc",
+ "selfip.biz",
+ "selfip.com",
+ "selfip.info",
+ "selfip.net",
+ "selfip.org",
+ "sells-for-less.com",
+ "sells-for-u.com",
+ "sells-it.net",
+ "sellsyourhome.org",
+ "servebbs.com",
+ "servebbs.net",
+ "servebbs.org",
+ "serveftp.net",
+ "serveftp.org",
+ "servegame.org",
+ "shacknet.nu",
+ "simple-url.com",
+ "space-to-rent.com",
+ "stuff-4-sale.org",
+ "stuff-4-sale.us",
+ "teaches-yoga.com",
+ "thruhere.net",
+ "traeumtgerade.de",
+ "webhop.biz",
+ "webhop.info",
+ "webhop.net",
+ "webhop.org",
+ "worse-than.tv",
+ "writesthisblog.com",
+ "dynv6.net",
+ "e4.cz",
+ "eu.org",
+ "al.eu.org",
+ "asso.eu.org",
+ "at.eu.org",
+ "au.eu.org",
+ "be.eu.org",
+ "bg.eu.org",
+ "ca.eu.org",
+ "cd.eu.org",
+ "ch.eu.org",
+ "cn.eu.org",
+ "cy.eu.org",
+ "cz.eu.org",
+ "de.eu.org",
+ "dk.eu.org",
+ "edu.eu.org",
+ "ee.eu.org",
+ "es.eu.org",
+ "fi.eu.org",
+ "fr.eu.org",
+ "gr.eu.org",
+ "hr.eu.org",
+ "hu.eu.org",
+ "ie.eu.org",
+ "il.eu.org",
+ "in.eu.org",
+ "int.eu.org",
+ "is.eu.org",
+ "it.eu.org",
+ "jp.eu.org",
+ "kr.eu.org",
+ "lt.eu.org",
+ "lu.eu.org",
+ "lv.eu.org",
+ "mc.eu.org",
+ "me.eu.org",
+ "mk.eu.org",
+ "mt.eu.org",
+ "my.eu.org",
+ "net.eu.org",
+ "ng.eu.org",
+ "nl.eu.org",
+ "no.eu.org",
+ "nz.eu.org",
+ "paris.eu.org",
+ "pl.eu.org",
+ "pt.eu.org",
+ "q-a.eu.org",
+ "ro.eu.org",
+ "ru.eu.org",
+ "se.eu.org",
+ "si.eu.org",
+ "sk.eu.org",
+ "tr.eu.org",
+ "uk.eu.org",
+ "us.eu.org",
+ "eu-1.evennode.com",
+ "eu-2.evennode.com",
+ "us-1.evennode.com",
+ "us-2.evennode.com",
+ "apps.fbsbx.com",
+ "a.ssl.fastly.net",
+ "b.ssl.fastly.net",
+ "global.ssl.fastly.net",
+ "a.prod.fastly.net",
+ "global.prod.fastly.net",
+ "fhapp.xyz",
+ "firebaseapp.com",
+ "flynnhub.com",
+ "freebox-os.com",
+ "freeboxos.com",
+ "fbx-os.fr",
+ "fbxos.fr",
+ "freebox-os.fr",
+ "freeboxos.fr",
+ "service.gov.uk",
+ "github.io",
+ "githubusercontent.com",
+ "githubcloud.com",
+ "*.api.githubcloud.com",
+ "*.ext.githubcloud.com",
+ "gist.githubcloud.com",
+ "*.githubcloudusercontent.com",
+ "gitlab.io",
+ "ro.com",
+ "goip.de",
+ "*.0emm.com",
+ "appspot.com",
+ "blogspot.ae",
+ "blogspot.al",
+ "blogspot.am",
+ "blogspot.ba",
+ "blogspot.be",
+ "blogspot.bg",
+ "blogspot.bj",
+ "blogspot.ca",
+ "blogspot.cf",
+ "blogspot.ch",
+ "blogspot.cl",
+ "blogspot.co.at",
+ "blogspot.co.id",
+ "blogspot.co.il",
+ "blogspot.co.ke",
+ "blogspot.co.nz",
+ "blogspot.co.uk",
+ "blogspot.co.za",
+ "blogspot.com",
+ "blogspot.com.ar",
+ "blogspot.com.au",
+ "blogspot.com.br",
+ "blogspot.com.by",
+ "blogspot.com.co",
+ "blogspot.com.cy",
+ "blogspot.com.ee",
+ "blogspot.com.eg",
+ "blogspot.com.es",
+ "blogspot.com.mt",
+ "blogspot.com.ng",
+ "blogspot.com.tr",
+ "blogspot.com.uy",
+ "blogspot.cv",
+ "blogspot.cz",
+ "blogspot.de",
+ "blogspot.dk",
+ "blogspot.fi",
+ "blogspot.fr",
+ "blogspot.gr",
+ "blogspot.hk",
+ "blogspot.hr",
+ "blogspot.hu",
+ "blogspot.ie",
+ "blogspot.in",
+ "blogspot.is",
+ "blogspot.it",
+ "blogspot.jp",
+ "blogspot.kr",
+ "blogspot.li",
+ "blogspot.lt",
+ "blogspot.lu",
+ "blogspot.md",
+ "blogspot.mk",
+ "blogspot.mr",
+ "blogspot.mx",
+ "blogspot.my",
+ "blogspot.nl",
+ "blogspot.no",
+ "blogspot.pe",
+ "blogspot.pt",
+ "blogspot.qa",
+ "blogspot.re",
+ "blogspot.ro",
+ "blogspot.rs",
+ "blogspot.ru",
+ "blogspot.se",
+ "blogspot.sg",
+ "blogspot.si",
+ "blogspot.sk",
+ "blogspot.sn",
+ "blogspot.td",
+ "blogspot.tw",
+ "blogspot.ug",
+ "blogspot.vn",
+ "cloudfunctions.net",
+ "codespot.com",
+ "googleapis.com",
+ "googlecode.com",
+ "pagespeedmobilizer.com",
+ "withgoogle.com",
+ "withyoutube.com",
+ "hashbang.sh",
+ "hasura-app.io",
+ "hepforge.org",
+ "herokuapp.com",
+ "herokussl.com",
+ "iki.fi",
+ "biz.at",
+ "info.at",
+ "*.magentosite.cloud",
+ "meteorapp.com",
+ "eu.meteorapp.com",
+ "co.pl",
+ "azurewebsites.net",
+ "azure-mobile.net",
+ "cloudapp.net",
+ "bmoattachments.org",
+ "4u.com",
+ "ngrok.io",
+ "nfshost.com",
+ "nsupdate.info",
+ "nerdpol.ovh",
+ "blogsyte.com",
+ "brasilia.me",
+ "cable-modem.org",
+ "ciscofreak.com",
+ "collegefan.org",
+ "couchpotatofries.org",
+ "damnserver.com",
+ "ddns.me",
+ "ditchyourip.com",
+ "dnsfor.me",
+ "dnsiskinky.com",
+ "dvrcam.info",
+ "dynns.com",
+ "eating-organic.net",
+ "fantasyleague.cc",
+ "geekgalaxy.com",
+ "golffan.us",
+ "health-carereform.com",
+ "homesecuritymac.com",
+ "homesecuritypc.com",
+ "hopto.me",
+ "ilovecollege.info",
+ "loginto.me",
+ "mlbfan.org",
+ "mmafan.biz",
+ "myactivedirectory.com",
+ "mydissent.net",
+ "myeffect.net",
+ "mymediapc.net",
+ "mypsx.net",
+ "mysecuritycamera.com",
+ "mysecuritycamera.net",
+ "mysecuritycamera.org",
+ "net-freaks.com",
+ "nflfan.org",
+ "nhlfan.net",
+ "no-ip.ca",
+ "no-ip.co.uk",
+ "no-ip.net",
+ "noip.us",
+ "onthewifi.com",
+ "pgafan.net",
+ "point2this.com",
+ "pointto.us",
+ "privatizehealthinsurance.net",
+ "quicksytes.com",
+ "read-books.org",
+ "securitytactics.com",
+ "serveexchange.com",
+ "servehumour.com",
+ "servep2p.com",
+ "servesarcasm.com",
+ "stufftoread.com",
+ "ufcfan.org",
+ "unusualperson.com",
+ "workisboring.com",
+ "3utilities.com",
+ "bounceme.net",
+ "ddns.net",
+ "ddnsking.com",
+ "gotdns.ch",
+ "hopto.org",
+ "myftp.biz",
+ "myftp.org",
+ "myvnc.com",
+ "no-ip.biz",
+ "no-ip.info",
+ "no-ip.org",
+ "noip.me",
+ "redirectme.net",
+ "servebeer.com",
+ "serveblog.net",
+ "servecounterstrike.com",
+ "serveftp.com",
+ "servegame.com",
+ "servehalflife.com",
+ "servehttp.com",
+ "serveirc.com",
+ "serveminecraft.net",
+ "servemp3.com",
+ "servepics.com",
+ "servequake.com",
+ "sytes.net",
+ "webhop.me",
+ "zapto.org",
+ "nyc.mn",
+ "nid.io",
+ "operaunite.com",
+ "outsystemscloud.com",
+ "ownprovider.com",
+ "oy.lc",
+ "pgfog.com",
+ "pagefrontapp.com",
+ "art.pl",
+ "gliwice.pl",
+ "krakow.pl",
+ "poznan.pl",
+ "wroc.pl",
+ "zakopane.pl",
+ "pantheonsite.io",
+ "gotpantheon.com",
+ "mypep.link",
+ "xen.prgmr.com",
+ "priv.at",
+ "chirurgiens-dentistes-en-france.fr",
+ "qa2.com",
+ "dev-myqnapcloud.com",
+ "alpha-myqnapcloud.com",
+ "myqnapcloud.com",
+ "rackmaze.com",
+ "rackmaze.net",
+ "rhcloud.com",
+ "hzc.io",
+ "sandcats.io",
+ "logoip.de",
+ "logoip.com",
+ "biz.ua",
+ "co.ua",
+ "pp.ua",
+ "myshopblocks.com",
+ "sinaapp.com",
+ "vipsinaapp.com",
+ "1kapp.com",
+ "bounty-full.com",
+ "alpha.bounty-full.com",
+ "beta.bounty-full.com",
+ "static.land",
+ "dev.static.land",
+ "sites.static.land",
+ "spacekit.io",
+ "stackspace.space",
+ "diskstation.me",
+ "dscloud.biz",
+ "dscloud.me",
+ "dscloud.mobi",
+ "dsmynas.com",
+ "dsmynas.net",
+ "dsmynas.org",
+ "familyds.com",
+ "familyds.net",
+ "familyds.org",
+ "i234.me",
+ "myds.me",
+ "synology.me",
+ "gda.pl",
+ "gdansk.pl",
+ "gdynia.pl",
+ "med.pl",
+ "sopot.pl",
+ "bloxcms.com",
+ "townnews-staging.com",
+ "tuxfamily.org",
+ "hk.com",
+ "hk.org",
+ "ltd.hk",
+ "inc.hk",
+ "router.management",
+ "yolasite.com",
+ "za.net",
+ "za.org",
+}
+
+var nodeLabels = [...]string{
+ "aaa",
+ "aarp",
+ "abarth",
+ "abb",
+ "abbott",
+ "abbvie",
+ "abc",
+ "able",
+ "abogado",
+ "abudhabi",
+ "ac",
+ "academy",
+ "accenture",
+ "accountant",
+ "accountants",
+ "aco",
+ "active",
+ "actor",
+ "ad",
+ "adac",
+ "ads",
+ "adult",
+ "ae",
+ "aeg",
+ "aero",
+ "aetna",
+ "af",
+ "afamilycompany",
+ "afl",
+ "africa",
+ "africamagic",
+ "ag",
+ "agakhan",
+ "agency",
+ "ai",
+ "aig",
+ "aigo",
+ "airbus",
+ "airforce",
+ "airtel",
+ "akdn",
+ "al",
+ "alfaromeo",
+ "alibaba",
+ "alipay",
+ "allfinanz",
+ "allstate",
+ "ally",
+ "alsace",
+ "alstom",
+ "am",
+ "americanexpress",
+ "americanfamily",
+ "amex",
+ "amfam",
+ "amica",
+ "amsterdam",
+ "analytics",
+ "android",
+ "anquan",
+ "anz",
+ "ao",
+ "aol",
+ "apartments",
+ "app",
+ "apple",
+ "aq",
+ "aquarelle",
+ "ar",
+ "arab",
+ "aramco",
+ "archi",
+ "army",
+ "arpa",
+ "art",
+ "arte",
+ "as",
+ "asda",
+ "asia",
+ "associates",
+ "at",
+ "athleta",
+ "attorney",
+ "au",
+ "auction",
+ "audi",
+ "audible",
+ "audio",
+ "auspost",
+ "author",
+ "auto",
+ "autos",
+ "avianca",
+ "aw",
+ "aws",
+ "ax",
+ "axa",
+ "az",
+ "azure",
+ "ba",
+ "baby",
+ "baidu",
+ "banamex",
+ "bananarepublic",
+ "band",
+ "bank",
+ "bar",
+ "barcelona",
+ "barclaycard",
+ "barclays",
+ "barefoot",
+ "bargains",
+ "baseball",
+ "basketball",
+ "bauhaus",
+ "bayern",
+ "bb",
+ "bbc",
+ "bbt",
+ "bbva",
+ "bcg",
+ "bcn",
+ "bd",
+ "be",
+ "beats",
+ "beauty",
+ "beer",
+ "bentley",
+ "berlin",
+ "best",
+ "bestbuy",
+ "bet",
+ "bf",
+ "bg",
+ "bh",
+ "bharti",
+ "bi",
+ "bible",
+ "bid",
+ "bike",
+ "bing",
+ "bingo",
+ "bio",
+ "biz",
+ "bj",
+ "black",
+ "blackfriday",
+ "blanco",
+ "blockbuster",
+ "blog",
+ "bloomberg",
+ "blue",
+ "bm",
+ "bms",
+ "bmw",
+ "bn",
+ "bnl",
+ "bnpparibas",
+ "bo",
+ "boats",
+ "boehringer",
+ "bofa",
+ "bom",
+ "bond",
+ "boo",
+ "book",
+ "booking",
+ "boots",
+ "bosch",
+ "bostik",
+ "boston",
+ "bot",
+ "boutique",
+ "box",
+ "br",
+ "bradesco",
+ "bridgestone",
+ "broadway",
+ "broker",
+ "brother",
+ "brussels",
+ "bs",
+ "bt",
+ "budapest",
+ "bugatti",
+ "build",
+ "builders",
+ "business",
+ "buy",
+ "buzz",
+ "bv",
+ "bw",
+ "by",
+ "bz",
+ "bzh",
+ "ca",
+ "cab",
+ "cafe",
+ "cal",
+ "call",
+ "calvinklein",
+ "cam",
+ "camera",
+ "camp",
+ "cancerresearch",
+ "canon",
+ "capetown",
+ "capital",
+ "capitalone",
+ "car",
+ "caravan",
+ "cards",
+ "care",
+ "career",
+ "careers",
+ "cars",
+ "cartier",
+ "casa",
+ "case",
+ "caseih",
+ "cash",
+ "casino",
+ "cat",
+ "catering",
+ "catholic",
+ "cba",
+ "cbn",
+ "cbre",
+ "cbs",
+ "cc",
+ "cd",
+ "ceb",
+ "center",
+ "ceo",
+ "cern",
+ "cf",
+ "cfa",
+ "cfd",
+ "cg",
+ "ch",
+ "chanel",
+ "channel",
+ "chase",
+ "chat",
+ "cheap",
+ "chintai",
+ "chloe",
+ "christmas",
+ "chrome",
+ "chrysler",
+ "church",
+ "ci",
+ "cipriani",
+ "circle",
+ "cisco",
+ "citadel",
+ "citi",
+ "citic",
+ "city",
+ "cityeats",
+ "ck",
+ "cl",
+ "claims",
+ "cleaning",
+ "click",
+ "clinic",
+ "clinique",
+ "clothing",
+ "cloud",
+ "club",
+ "clubmed",
+ "cm",
+ "cn",
+ "co",
+ "coach",
+ "codes",
+ "coffee",
+ "college",
+ "cologne",
+ "com",
+ "comcast",
+ "commbank",
+ "community",
+ "company",
+ "compare",
+ "computer",
+ "comsec",
+ "condos",
+ "construction",
+ "consulting",
+ "contact",
+ "contractors",
+ "cooking",
+ "cookingchannel",
+ "cool",
+ "coop",
+ "corsica",
+ "country",
+ "coupon",
+ "coupons",
+ "courses",
+ "cr",
+ "credit",
+ "creditcard",
+ "creditunion",
+ "cricket",
+ "crown",
+ "crs",
+ "cruise",
+ "cruises",
+ "csc",
+ "cu",
+ "cuisinella",
+ "cv",
+ "cw",
+ "cx",
+ "cy",
+ "cymru",
+ "cyou",
+ "cz",
+ "dabur",
+ "dad",
+ "dance",
+ "date",
+ "dating",
+ "datsun",
+ "day",
+ "dclk",
+ "dds",
+ "de",
+ "deal",
+ "dealer",
+ "deals",
+ "degree",
+ "delivery",
+ "dell",
+ "deloitte",
+ "delta",
+ "democrat",
+ "dental",
+ "dentist",
+ "desi",
+ "design",
+ "dev",
+ "dhl",
+ "diamonds",
+ "diet",
+ "digital",
+ "direct",
+ "directory",
+ "discount",
+ "discover",
+ "dish",
+ "diy",
+ "dj",
+ "dk",
+ "dm",
+ "dnp",
+ "do",
+ "docs",
+ "dodge",
+ "dog",
+ "doha",
+ "domains",
+ "dot",
+ "download",
+ "drive",
+ "dstv",
+ "dtv",
+ "dubai",
+ "duck",
+ "dunlop",
+ "duns",
+ "dupont",
+ "durban",
+ "dvag",
+ "dwg",
+ "dz",
+ "earth",
+ "eat",
+ "ec",
+ "edeka",
+ "edu",
+ "education",
+ "ee",
+ "eg",
+ "email",
+ "emerck",
+ "emerson",
+ "energy",
+ "engineer",
+ "engineering",
+ "enterprises",
+ "epost",
+ "epson",
+ "equipment",
+ "er",
+ "ericsson",
+ "erni",
+ "es",
+ "esq",
+ "estate",
+ "esurance",
+ "et",
+ "etisalat",
+ "eu",
+ "eurovision",
+ "eus",
+ "events",
+ "everbank",
+ "exchange",
+ "expert",
+ "exposed",
+ "express",
+ "extraspace",
+ "fage",
+ "fail",
+ "fairwinds",
+ "faith",
+ "family",
+ "fan",
+ "fans",
+ "farm",
+ "farmers",
+ "fashion",
+ "fast",
+ "fedex",
+ "feedback",
+ "ferrari",
+ "ferrero",
+ "fi",
+ "fiat",
+ "fidelity",
+ "fido",
+ "film",
+ "final",
+ "finance",
+ "financial",
+ "fire",
+ "firestone",
+ "firmdale",
+ "fish",
+ "fishing",
+ "fit",
+ "fitness",
+ "fj",
+ "fk",
+ "flickr",
+ "flights",
+ "flir",
+ "florist",
+ "flowers",
+ "flsmidth",
+ "fly",
+ "fm",
+ "fo",
+ "foo",
+ "food",
+ "foodnetwork",
+ "football",
+ "ford",
+ "forex",
+ "forsale",
+ "forum",
+ "foundation",
+ "fox",
+ "fr",
+ "free",
+ "fresenius",
+ "frl",
+ "frogans",
+ "frontdoor",
+ "frontier",
+ "ftr",
+ "fujitsu",
+ "fujixerox",
+ "fun",
+ "fund",
+ "furniture",
+ "futbol",
+ "fyi",
+ "ga",
+ "gal",
+ "gallery",
+ "gallo",
+ "gallup",
+ "game",
+ "games",
+ "gap",
+ "garden",
+ "gb",
+ "gbiz",
+ "gd",
+ "gdn",
+ "ge",
+ "gea",
+ "gent",
+ "genting",
+ "george",
+ "gf",
+ "gg",
+ "ggee",
+ "gh",
+ "gi",
+ "gift",
+ "gifts",
+ "gives",
+ "giving",
+ "gl",
+ "glade",
+ "glass",
+ "gle",
+ "global",
+ "globo",
+ "gm",
+ "gmail",
+ "gmbh",
+ "gmo",
+ "gmx",
+ "gn",
+ "godaddy",
+ "gold",
+ "goldpoint",
+ "golf",
+ "goo",
+ "goodhands",
+ "goodyear",
+ "goog",
+ "google",
+ "gop",
+ "got",
+ "gotv",
+ "gov",
+ "gp",
+ "gq",
+ "gr",
+ "grainger",
+ "graphics",
+ "gratis",
+ "green",
+ "gripe",
+ "group",
+ "gs",
+ "gt",
+ "gu",
+ "guardian",
+ "gucci",
+ "guge",
+ "guide",
+ "guitars",
+ "guru",
+ "gw",
+ "gy",
+ "hair",
+ "hamburg",
+ "hangout",
+ "haus",
+ "hbo",
+ "hdfc",
+ "hdfcbank",
+ "health",
+ "healthcare",
+ "help",
+ "helsinki",
+ "here",
+ "hermes",
+ "hgtv",
+ "hiphop",
+ "hisamitsu",
+ "hitachi",
+ "hiv",
+ "hk",
+ "hkt",
+ "hm",
+ "hn",
+ "hockey",
+ "holdings",
+ "holiday",
+ "homedepot",
+ "homegoods",
+ "homes",
+ "homesense",
+ "honda",
+ "honeywell",
+ "horse",
+ "host",
+ "hosting",
+ "hot",
+ "hoteles",
+ "hotels",
+ "hotmail",
+ "house",
+ "how",
+ "hr",
+ "hsbc",
+ "ht",
+ "htc",
+ "hu",
+ "hughes",
+ "hyatt",
+ "hyundai",
+ "ibm",
+ "icbc",
+ "ice",
+ "icu",
+ "id",
+ "ie",
+ "ieee",
+ "ifm",
+ "iinet",
+ "ikano",
+ "il",
+ "im",
+ "imamat",
+ "imdb",
+ "immo",
+ "immobilien",
+ "in",
+ "industries",
+ "infiniti",
+ "info",
+ "ing",
+ "ink",
+ "institute",
+ "insurance",
+ "insure",
+ "int",
+ "intel",
+ "international",
+ "intuit",
+ "investments",
+ "io",
+ "ipiranga",
+ "iq",
+ "ir",
+ "irish",
+ "is",
+ "iselect",
+ "ismaili",
+ "ist",
+ "istanbul",
+ "it",
+ "itau",
+ "itv",
+ "iveco",
+ "iwc",
+ "jaguar",
+ "java",
+ "jcb",
+ "jcp",
+ "je",
+ "jeep",
+ "jetzt",
+ "jewelry",
+ "jio",
+ "jlc",
+ "jll",
+ "jm",
+ "jmp",
+ "jnj",
+ "jo",
+ "jobs",
+ "joburg",
+ "jot",
+ "joy",
+ "jp",
+ "jpmorgan",
+ "jprs",
+ "juegos",
+ "juniper",
+ "kaufen",
+ "kddi",
+ "ke",
+ "kerryhotels",
+ "kerrylogistics",
+ "kerryproperties",
+ "kfh",
+ "kg",
+ "kh",
+ "ki",
+ "kia",
+ "kim",
+ "kinder",
+ "kindle",
+ "kitchen",
+ "kiwi",
+ "km",
+ "kn",
+ "koeln",
+ "komatsu",
+ "kosher",
+ "kp",
+ "kpmg",
+ "kpn",
+ "kr",
+ "krd",
+ "kred",
+ "kuokgroup",
+ "kw",
+ "ky",
+ "kyknet",
+ "kyoto",
+ "kz",
+ "la",
+ "lacaixa",
+ "ladbrokes",
+ "lamborghini",
+ "lamer",
+ "lancaster",
+ "lancia",
+ "lancome",
+ "land",
+ "landrover",
+ "lanxess",
+ "lasalle",
+ "lat",
+ "latino",
+ "latrobe",
+ "law",
+ "lawyer",
+ "lb",
+ "lc",
+ "lds",
+ "lease",
+ "leclerc",
+ "lefrak",
+ "legal",
+ "lego",
+ "lexus",
+ "lgbt",
+ "li",
+ "liaison",
+ "lidl",
+ "life",
+ "lifeinsurance",
+ "lifestyle",
+ "lighting",
+ "like",
+ "lilly",
+ "limited",
+ "limo",
+ "lincoln",
+ "linde",
+ "link",
+ "lipsy",
+ "live",
+ "living",
+ "lixil",
+ "lk",
+ "loan",
+ "loans",
+ "locker",
+ "locus",
+ "loft",
+ "lol",
+ "london",
+ "lotte",
+ "lotto",
+ "love",
+ "lpl",
+ "lplfinancial",
+ "lr",
+ "ls",
+ "lt",
+ "ltd",
+ "ltda",
+ "lu",
+ "lundbeck",
+ "lupin",
+ "luxe",
+ "luxury",
+ "lv",
+ "ly",
+ "ma",
+ "macys",
+ "madrid",
+ "maif",
+ "maison",
+ "makeup",
+ "man",
+ "management",
+ "mango",
+ "market",
+ "marketing",
+ "markets",
+ "marriott",
+ "marshalls",
+ "maserati",
+ "mattel",
+ "mba",
+ "mc",
+ "mcd",
+ "mcdonalds",
+ "mckinsey",
+ "md",
+ "me",
+ "med",
+ "media",
+ "meet",
+ "melbourne",
+ "meme",
+ "memorial",
+ "men",
+ "menu",
+ "meo",
+ "metlife",
+ "mg",
+ "mh",
+ "miami",
+ "microsoft",
+ "mil",
+ "mini",
+ "mint",
+ "mit",
+ "mitsubishi",
+ "mk",
+ "ml",
+ "mlb",
+ "mls",
+ "mm",
+ "mma",
+ "mn",
+ "mnet",
+ "mo",
+ "mobi",
+ "mobily",
+ "moda",
+ "moe",
+ "moi",
+ "mom",
+ "monash",
+ "money",
+ "monster",
+ "montblanc",
+ "mopar",
+ "mormon",
+ "mortgage",
+ "moscow",
+ "moto",
+ "motorcycles",
+ "mov",
+ "movie",
+ "movistar",
+ "mp",
+ "mq",
+ "mr",
+ "ms",
+ "msd",
+ "mt",
+ "mtn",
+ "mtpc",
+ "mtr",
+ "mu",
+ "multichoice",
+ "museum",
+ "mutual",
+ "mutuelle",
+ "mv",
+ "mw",
+ "mx",
+ "my",
+ "mz",
+ "mzansimagic",
+ "na",
+ "nab",
+ "nadex",
+ "nagoya",
+ "name",
+ "naspers",
+ "nationwide",
+ "natura",
+ "navy",
+ "nba",
+ "nc",
+ "ne",
+ "nec",
+ "net",
+ "netbank",
+ "netflix",
+ "network",
+ "neustar",
+ "new",
+ "newholland",
+ "news",
+ "next",
+ "nextdirect",
+ "nexus",
+ "nf",
+ "nfl",
+ "ng",
+ "ngo",
+ "nhk",
+ "ni",
+ "nico",
+ "nike",
+ "nikon",
+ "ninja",
+ "nissan",
+ "nissay",
+ "nl",
+ "no",
+ "nokia",
+ "northwesternmutual",
+ "norton",
+ "now",
+ "nowruz",
+ "nowtv",
+ "np",
+ "nr",
+ "nra",
+ "nrw",
+ "ntt",
+ "nu",
+ "nyc",
+ "nz",
+ "obi",
+ "observer",
+ "off",
+ "office",
+ "okinawa",
+ "olayan",
+ "olayangroup",
+ "oldnavy",
+ "ollo",
+ "om",
+ "omega",
+ "one",
+ "ong",
+ "onl",
+ "online",
+ "onyourside",
+ "ooo",
+ "open",
+ "oracle",
+ "orange",
+ "org",
+ "organic",
+ "orientexpress",
+ "origins",
+ "osaka",
+ "otsuka",
+ "ott",
+ "ovh",
+ "pa",
+ "page",
+ "pamperedchef",
+ "panasonic",
+ "panerai",
+ "paris",
+ "pars",
+ "partners",
+ "parts",
+ "party",
+ "passagens",
+ "pay",
+ "payu",
+ "pccw",
+ "pe",
+ "pet",
+ "pf",
+ "pfizer",
+ "pg",
+ "ph",
+ "pharmacy",
+ "philips",
+ "photo",
+ "photography",
+ "photos",
+ "physio",
+ "piaget",
+ "pics",
+ "pictet",
+ "pictures",
+ "pid",
+ "pin",
+ "ping",
+ "pink",
+ "pioneer",
+ "pizza",
+ "pk",
+ "pl",
+ "place",
+ "play",
+ "playstation",
+ "plumbing",
+ "plus",
+ "pm",
+ "pn",
+ "pnc",
+ "pohl",
+ "poker",
+ "politie",
+ "porn",
+ "post",
+ "pr",
+ "pramerica",
+ "praxi",
+ "press",
+ "prime",
+ "pro",
+ "prod",
+ "productions",
+ "prof",
+ "progressive",
+ "promo",
+ "properties",
+ "property",
+ "protection",
+ "pru",
+ "prudential",
+ "ps",
+ "pt",
+ "pub",
+ "pw",
+ "pwc",
+ "py",
+ "qa",
+ "qpon",
+ "quebec",
+ "quest",
+ "qvc",
+ "racing",
+ "raid",
+ "re",
+ "read",
+ "realestate",
+ "realtor",
+ "realty",
+ "recipes",
+ "red",
+ "redstone",
+ "redumbrella",
+ "rehab",
+ "reise",
+ "reisen",
+ "reit",
+ "reliance",
+ "ren",
+ "rent",
+ "rentals",
+ "repair",
+ "report",
+ "republican",
+ "rest",
+ "restaurant",
+ "review",
+ "reviews",
+ "rexroth",
+ "rich",
+ "richardli",
+ "ricoh",
+ "rightathome",
+ "ril",
+ "rio",
+ "rip",
+ "rmit",
+ "ro",
+ "rocher",
+ "rocks",
+ "rodeo",
+ "rogers",
+ "room",
+ "rs",
+ "rsvp",
+ "ru",
+ "ruhr",
+ "run",
+ "rw",
+ "rwe",
+ "ryukyu",
+ "sa",
+ "saarland",
+ "safe",
+ "safety",
+ "sakura",
+ "sale",
+ "salon",
+ "samsclub",
+ "samsung",
+ "sandvik",
+ "sandvikcoromant",
+ "sanofi",
+ "sap",
+ "sapo",
+ "sarl",
+ "sas",
+ "save",
+ "saxo",
+ "sb",
+ "sbi",
+ "sbs",
+ "sc",
+ "sca",
+ "scb",
+ "schaeffler",
+ "schmidt",
+ "scholarships",
+ "school",
+ "schule",
+ "schwarz",
+ "science",
+ "scjohnson",
+ "scor",
+ "scot",
+ "sd",
+ "se",
+ "seat",
+ "secure",
+ "security",
+ "seek",
+ "select",
+ "sener",
+ "services",
+ "ses",
+ "seven",
+ "sew",
+ "sex",
+ "sexy",
+ "sfr",
+ "sg",
+ "sh",
+ "shangrila",
+ "sharp",
+ "shaw",
+ "shell",
+ "shia",
+ "shiksha",
+ "shoes",
+ "shop",
+ "shopping",
+ "shouji",
+ "show",
+ "showtime",
+ "shriram",
+ "si",
+ "silk",
+ "sina",
+ "singles",
+ "site",
+ "sj",
+ "sk",
+ "ski",
+ "skin",
+ "sky",
+ "skype",
+ "sl",
+ "sling",
+ "sm",
+ "smart",
+ "smile",
+ "sn",
+ "sncf",
+ "so",
+ "soccer",
+ "social",
+ "softbank",
+ "software",
+ "sohu",
+ "solar",
+ "solutions",
+ "song",
+ "sony",
+ "soy",
+ "space",
+ "spiegel",
+ "spot",
+ "spreadbetting",
+ "sr",
+ "srl",
+ "srt",
+ "st",
+ "stada",
+ "staples",
+ "star",
+ "starhub",
+ "statebank",
+ "statefarm",
+ "statoil",
+ "stc",
+ "stcgroup",
+ "stockholm",
+ "storage",
+ "store",
+ "stream",
+ "studio",
+ "study",
+ "style",
+ "su",
+ "sucks",
+ "supersport",
+ "supplies",
+ "supply",
+ "support",
+ "surf",
+ "surgery",
+ "suzuki",
+ "sv",
+ "swatch",
+ "swiftcover",
+ "swiss",
+ "sx",
+ "sy",
+ "sydney",
+ "symantec",
+ "systems",
+ "sz",
+ "tab",
+ "taipei",
+ "talk",
+ "taobao",
+ "target",
+ "tatamotors",
+ "tatar",
+ "tattoo",
+ "tax",
+ "taxi",
+ "tc",
+ "tci",
+ "td",
+ "tdk",
+ "team",
+ "tech",
+ "technology",
+ "tel",
+ "telecity",
+ "telefonica",
+ "temasek",
+ "tennis",
+ "teva",
+ "tf",
+ "tg",
+ "th",
+ "thd",
+ "theater",
+ "theatre",
+ "theguardian",
+ "tiaa",
+ "tickets",
+ "tienda",
+ "tiffany",
+ "tips",
+ "tires",
+ "tirol",
+ "tj",
+ "tjmaxx",
+ "tjx",
+ "tk",
+ "tkmaxx",
+ "tl",
+ "tm",
+ "tmall",
+ "tn",
+ "to",
+ "today",
+ "tokyo",
+ "tools",
+ "top",
+ "toray",
+ "toshiba",
+ "total",
+ "tours",
+ "town",
+ "toyota",
+ "toys",
+ "tr",
+ "trade",
+ "trading",
+ "training",
+ "travel",
+ "travelchannel",
+ "travelers",
+ "travelersinsurance",
+ "trust",
+ "trv",
+ "tt",
+ "tube",
+ "tui",
+ "tunes",
+ "tushu",
+ "tv",
+ "tvs",
+ "tw",
+ "tz",
+ "ua",
+ "ubank",
+ "ubs",
+ "uconnect",
+ "ug",
+ "uk",
+ "unicom",
+ "university",
+ "uno",
+ "uol",
+ "ups",
+ "us",
+ "uy",
+ "uz",
+ "va",
+ "vacations",
+ "vana",
+ "vanguard",
+ "vc",
+ "ve",
+ "vegas",
+ "ventures",
+ "verisign",
+ "versicherung",
+ "vet",
+ "vg",
+ "vi",
+ "viajes",
+ "video",
+ "vig",
+ "viking",
+ "villas",
+ "vin",
+ "vip",
+ "virgin",
+ "visa",
+ "vision",
+ "vista",
+ "vistaprint",
+ "viva",
+ "vivo",
+ "vlaanderen",
+ "vn",
+ "vodka",
+ "volkswagen",
+ "volvo",
+ "vote",
+ "voting",
+ "voto",
+ "voyage",
+ "vu",
+ "vuelos",
+ "wales",
+ "walmart",
+ "walter",
+ "wang",
+ "wanggou",
+ "warman",
+ "watch",
+ "watches",
+ "weather",
+ "weatherchannel",
+ "webcam",
+ "weber",
+ "website",
+ "wed",
+ "wedding",
+ "weibo",
+ "weir",
+ "wf",
+ "whoswho",
+ "wien",
+ "wiki",
+ "williamhill",
+ "win",
+ "windows",
+ "wine",
+ "winners",
+ "wme",
+ "wolterskluwer",
+ "woodside",
+ "work",
+ "works",
+ "world",
+ "wow",
+ "ws",
+ "wtc",
+ "wtf",
+ "xbox",
+ "xerox",
+ "xfinity",
+ "xihuan",
+ "xin",
+ "xn--11b4c3d",
+ "xn--1ck2e1b",
+ "xn--1qqw23a",
+ "xn--30rr7y",
+ "xn--3bst00m",
+ "xn--3ds443g",
+ "xn--3e0b707e",
+ "xn--3oq18vl8pn36a",
+ "xn--3pxu8k",
+ "xn--42c2d9a",
+ "xn--45brj9c",
+ "xn--45q11c",
+ "xn--4gbrim",
+ "xn--4gq48lf9j",
+ "xn--54b7fta0cc",
+ "xn--55qw42g",
+ "xn--55qx5d",
+ "xn--5su34j936bgsg",
+ "xn--5tzm5g",
+ "xn--6frz82g",
+ "xn--6qq986b3xl",
+ "xn--80adxhks",
+ "xn--80ao21a",
+ "xn--80aqecdr1a",
+ "xn--80asehdb",
+ "xn--80aswg",
+ "xn--8y0a063a",
+ "xn--90a3ac",
+ "xn--90ais",
+ "xn--9dbq2a",
+ "xn--9et52u",
+ "xn--9krt00a",
+ "xn--b4w605ferd",
+ "xn--bck1b9a5dre4c",
+ "xn--c1avg",
+ "xn--c2br7g",
+ "xn--cck2b3b",
+ "xn--cg4bki",
+ "xn--clchc0ea0b2g2a9gcd",
+ "xn--czr694b",
+ "xn--czrs0t",
+ "xn--czru2d",
+ "xn--d1acj3b",
+ "xn--d1alf",
+ "xn--e1a4c",
+ "xn--eckvdtc9d",
+ "xn--efvy88h",
+ "xn--estv75g",
+ "xn--fct429k",
+ "xn--fhbei",
+ "xn--fiq228c5hs",
+ "xn--fiq64b",
+ "xn--fiqs8s",
+ "xn--fiqz9s",
+ "xn--fjq720a",
+ "xn--flw351e",
+ "xn--fpcrj9c3d",
+ "xn--fzc2c9e2c",
+ "xn--fzys8d69uvgm",
+ "xn--g2xx48c",
+ "xn--gckr3f0f",
+ "xn--gecrj9c",
+ "xn--gk3at1e",
+ "xn--h2brj9c",
+ "xn--hxt814e",
+ "xn--i1b6b1a6a2e",
+ "xn--imr513n",
+ "xn--io0a7i",
+ "xn--j1aef",
+ "xn--j1amh",
+ "xn--j6w193g",
+ "xn--jlq61u9w7b",
+ "xn--jvr189m",
+ "xn--kcrx77d1x4a",
+ "xn--kprw13d",
+ "xn--kpry57d",
+ "xn--kpu716f",
+ "xn--kput3i",
+ "xn--l1acc",
+ "xn--lgbbat1ad8j",
+ "xn--mgb2ddes",
+ "xn--mgb9awbf",
+ "xn--mgba3a3ejt",
+ "xn--mgba3a4f16a",
+ "xn--mgba3a4fra",
+ "xn--mgba7c0bbn0a",
+ "xn--mgbaakc7dvf",
+ "xn--mgbaam7a8h",
+ "xn--mgbab2bd",
+ "xn--mgbai9a5eva00b",
+ "xn--mgbai9azgqp6j",
+ "xn--mgbayh7gpa",
+ "xn--mgbb9fbpob",
+ "xn--mgbbh1a71e",
+ "xn--mgbc0a9azcg",
+ "xn--mgbca7dzdo",
+ "xn--mgberp4a5d4a87g",
+ "xn--mgberp4a5d4ar",
+ "xn--mgbi4ecexp",
+ "xn--mgbpl2fh",
+ "xn--mgbqly7c0a67fbc",
+ "xn--mgbqly7cvafr",
+ "xn--mgbt3dhd",
+ "xn--mgbtf8fl",
+ "xn--mgbtx2b",
+ "xn--mgbx4cd0ab",
+ "xn--mix082f",
+ "xn--mix891f",
+ "xn--mk1bu44c",
+ "xn--mxtq1m",
+ "xn--ngbc5azd",
+ "xn--ngbe9e0a",
+ "xn--ngbrx",
+ "xn--nnx388a",
+ "xn--node",
+ "xn--nqv7f",
+ "xn--nqv7fs00ema",
+ "xn--nyqy26a",
+ "xn--o3cw4h",
+ "xn--ogbpf8fl",
+ "xn--p1acf",
+ "xn--p1ai",
+ "xn--pbt977c",
+ "xn--pgbs0dh",
+ "xn--pssy2u",
+ "xn--q9jyb4c",
+ "xn--qcka1pmc",
+ "xn--qxam",
+ "xn--rhqv96g",
+ "xn--rovu88b",
+ "xn--s9brj9c",
+ "xn--ses554g",
+ "xn--t60b56a",
+ "xn--tckwe",
+ "xn--tiq49xqyj",
+ "xn--unup4y",
+ "xn--vermgensberater-ctb",
+ "xn--vermgensberatung-pwb",
+ "xn--vhquv",
+ "xn--vuq861b",
+ "xn--w4r85el8fhu5dnra",
+ "xn--w4rs40l",
+ "xn--wgbh1c",
+ "xn--wgbl6a",
+ "xn--xhq521b",
+ "xn--xkc2al3hye2a",
+ "xn--xkc2dl3a5ee0h",
+ "xn--y9a3aq",
+ "xn--yfro4i67o",
+ "xn--ygbi2ammx",
+ "xn--zfr164b",
+ "xperia",
+ "xxx",
+ "xyz",
+ "yachts",
+ "yahoo",
+ "yamaxun",
+ "yandex",
+ "ye",
+ "yodobashi",
+ "yoga",
+ "yokohama",
+ "you",
+ "youtube",
+ "yt",
+ "yun",
+ "za",
+ "zappos",
+ "zara",
+ "zero",
+ "zip",
+ "zippo",
+ "zm",
+ "zone",
+ "zuerich",
+ "zw",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "nom",
+ "ac",
+ "blogspot",
+ "co",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "sch",
+ "accident-investigation",
+ "accident-prevention",
+ "aerobatic",
+ "aeroclub",
+ "aerodrome",
+ "agents",
+ "air-surveillance",
+ "air-traffic-control",
+ "aircraft",
+ "airline",
+ "airport",
+ "airtraffic",
+ "ambulance",
+ "amusement",
+ "association",
+ "author",
+ "ballooning",
+ "broker",
+ "caa",
+ "cargo",
+ "catering",
+ "certification",
+ "championship",
+ "charter",
+ "civilaviation",
+ "club",
+ "conference",
+ "consultant",
+ "consulting",
+ "control",
+ "council",
+ "crew",
+ "design",
+ "dgca",
+ "educator",
+ "emergency",
+ "engine",
+ "engineer",
+ "entertainment",
+ "equipment",
+ "exchange",
+ "express",
+ "federation",
+ "flight",
+ "freight",
+ "fuel",
+ "gliding",
+ "government",
+ "groundhandling",
+ "group",
+ "hanggliding",
+ "homebuilt",
+ "insurance",
+ "journal",
+ "journalist",
+ "leasing",
+ "logistics",
+ "magazine",
+ "maintenance",
+ "media",
+ "microlight",
+ "modelling",
+ "navigation",
+ "parachuting",
+ "paragliding",
+ "passenger-association",
+ "pilot",
+ "press",
+ "production",
+ "recreation",
+ "repbody",
+ "res",
+ "research",
+ "rotorcraft",
+ "safety",
+ "scientist",
+ "services",
+ "show",
+ "skydiving",
+ "software",
+ "student",
+ "trader",
+ "trading",
+ "trainer",
+ "union",
+ "workinggroup",
+ "works",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "co",
+ "com",
+ "net",
+ "nom",
+ "org",
+ "com",
+ "net",
+ "off",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "blogspot",
+ "co",
+ "ed",
+ "gv",
+ "it",
+ "og",
+ "pb",
+ "com",
+ "edu",
+ "gob",
+ "gov",
+ "int",
+ "mil",
+ "net",
+ "org",
+ "tur",
+ "blogspot",
+ "e164",
+ "in-addr",
+ "ip6",
+ "iris",
+ "uri",
+ "urn",
+ "gov",
+ "ac",
+ "biz",
+ "co",
+ "gv",
+ "info",
+ "or",
+ "priv",
+ "blogspot",
+ "act",
+ "asn",
+ "com",
+ "conf",
+ "edu",
+ "gov",
+ "id",
+ "info",
+ "net",
+ "nsw",
+ "nt",
+ "org",
+ "oz",
+ "qld",
+ "sa",
+ "tas",
+ "vic",
+ "wa",
+ "blogspot",
+ "act",
+ "nsw",
+ "nt",
+ "qld",
+ "sa",
+ "tas",
+ "vic",
+ "wa",
+ "qld",
+ "sa",
+ "tas",
+ "vic",
+ "wa",
+ "com",
+ "biz",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "int",
+ "mil",
+ "name",
+ "net",
+ "org",
+ "pp",
+ "pro",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "biz",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "net",
+ "org",
+ "store",
+ "tv",
+ "ac",
+ "blogspot",
+ "gov",
+ "0",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ "a",
+ "b",
+ "blogspot",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "co",
+ "com",
+ "edu",
+ "or",
+ "org",
+ "dscloud",
+ "dyndns",
+ "for-better",
+ "for-more",
+ "for-some",
+ "for-the",
+ "mmafan",
+ "myftp",
+ "no-ip",
+ "selfip",
+ "webhop",
+ "asso",
+ "barreau",
+ "blogspot",
+ "gouv",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gob",
+ "gov",
+ "int",
+ "mil",
+ "net",
+ "org",
+ "tv",
+ "adm",
+ "adv",
+ "agr",
+ "am",
+ "arq",
+ "art",
+ "ato",
+ "b",
+ "bio",
+ "blog",
+ "bmd",
+ "cim",
+ "cng",
+ "cnt",
+ "com",
+ "coop",
+ "ecn",
+ "eco",
+ "edu",
+ "emp",
+ "eng",
+ "esp",
+ "etc",
+ "eti",
+ "far",
+ "flog",
+ "fm",
+ "fnd",
+ "fot",
+ "fst",
+ "g12",
+ "ggf",
+ "gov",
+ "imb",
+ "ind",
+ "inf",
+ "jor",
+ "jus",
+ "leg",
+ "lel",
+ "mat",
+ "med",
+ "mil",
+ "mp",
+ "mus",
+ "net",
+ "nom",
+ "not",
+ "ntr",
+ "odo",
+ "org",
+ "ppg",
+ "pro",
+ "psc",
+ "psi",
+ "qsl",
+ "radio",
+ "rec",
+ "slg",
+ "srv",
+ "taxi",
+ "teo",
+ "tmp",
+ "trd",
+ "tur",
+ "tv",
+ "vet",
+ "vlog",
+ "wiki",
+ "zlg",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "co",
+ "org",
+ "com",
+ "gov",
+ "mil",
+ "of",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "za",
+ "ab",
+ "bc",
+ "blogspot",
+ "co",
+ "gc",
+ "mb",
+ "nb",
+ "nf",
+ "nl",
+ "no-ip",
+ "ns",
+ "nt",
+ "nu",
+ "on",
+ "pe",
+ "qc",
+ "sk",
+ "yk",
+ "fantasyleague",
+ "ftpaccess",
+ "game-server",
+ "myphotos",
+ "scrapping",
+ "gov",
+ "blogspot",
+ "blogspot",
+ "gotdns",
+ "ac",
+ "asso",
+ "co",
+ "com",
+ "ed",
+ "edu",
+ "go",
+ "gouv",
+ "int",
+ "md",
+ "net",
+ "or",
+ "org",
+ "presse",
+ "xn--aroport-bya",
+ "www",
+ "blogspot",
+ "co",
+ "gob",
+ "gov",
+ "mil",
+ "magentosite",
+ "co",
+ "com",
+ "gov",
+ "net",
+ "ac",
+ "ah",
+ "bj",
+ "com",
+ "cq",
+ "edu",
+ "fj",
+ "gd",
+ "gov",
+ "gs",
+ "gx",
+ "gz",
+ "ha",
+ "hb",
+ "he",
+ "hi",
+ "hk",
+ "hl",
+ "hn",
+ "jl",
+ "js",
+ "jx",
+ "ln",
+ "mil",
+ "mo",
+ "net",
+ "nm",
+ "nx",
+ "org",
+ "qh",
+ "sc",
+ "sd",
+ "sh",
+ "sn",
+ "sx",
+ "tj",
+ "tw",
+ "xj",
+ "xn--55qx5d",
+ "xn--io0a7i",
+ "xn--od0alg",
+ "xz",
+ "yn",
+ "zj",
+ "amazonaws",
+ "cn-north-1",
+ "compute",
+ "s3",
+ "cn-north-1",
+ "arts",
+ "com",
+ "edu",
+ "firm",
+ "gov",
+ "info",
+ "int",
+ "mil",
+ "net",
+ "nom",
+ "org",
+ "rec",
+ "web",
+ "blogspot",
+ "0emm",
+ "1kapp",
+ "3utilities",
+ "4u",
+ "africa",
+ "alpha-myqnapcloud",
+ "amazonaws",
+ "appspot",
+ "ar",
+ "betainabox",
+ "blogdns",
+ "blogspot",
+ "blogsyte",
+ "bloxcms",
+ "bounty-full",
+ "br",
+ "cechire",
+ "ciscofreak",
+ "cloudcontrolapp",
+ "cloudcontrolled",
+ "cn",
+ "co",
+ "codespot",
+ "damnserver",
+ "ddnsking",
+ "de",
+ "dev-myqnapcloud",
+ "ditchyourip",
+ "dnsalias",
+ "dnsdojo",
+ "dnsiskinky",
+ "doesntexist",
+ "dontexist",
+ "doomdns",
+ "dreamhosters",
+ "dsmynas",
+ "dyn-o-saur",
+ "dynalias",
+ "dyndns-at-home",
+ "dyndns-at-work",
+ "dyndns-blog",
+ "dyndns-free",
+ "dyndns-home",
+ "dyndns-ip",
+ "dyndns-mail",
+ "dyndns-office",
+ "dyndns-pics",
+ "dyndns-remote",
+ "dyndns-server",
+ "dyndns-web",
+ "dyndns-wiki",
+ "dyndns-work",
+ "dynns",
+ "elasticbeanstalk",
+ "est-a-la-maison",
+ "est-a-la-masion",
+ "est-le-patron",
+ "est-mon-blogueur",
+ "eu",
+ "evennode",
+ "familyds",
+ "fbsbx",
+ "firebaseapp",
+ "flynnhub",
+ "freebox-os",
+ "freeboxos",
+ "from-ak",
+ "from-al",
+ "from-ar",
+ "from-ca",
+ "from-ct",
+ "from-dc",
+ "from-de",
+ "from-fl",
+ "from-ga",
+ "from-hi",
+ "from-ia",
+ "from-id",
+ "from-il",
+ "from-in",
+ "from-ks",
+ "from-ky",
+ "from-ma",
+ "from-md",
+ "from-mi",
+ "from-mn",
+ "from-mo",
+ "from-ms",
+ "from-mt",
+ "from-nc",
+ "from-nd",
+ "from-ne",
+ "from-nh",
+ "from-nj",
+ "from-nm",
+ "from-nv",
+ "from-oh",
+ "from-ok",
+ "from-or",
+ "from-pa",
+ "from-pr",
+ "from-ri",
+ "from-sc",
+ "from-sd",
+ "from-tn",
+ "from-tx",
+ "from-ut",
+ "from-va",
+ "from-vt",
+ "from-wa",
+ "from-wi",
+ "from-wv",
+ "from-wy",
+ "gb",
+ "geekgalaxy",
+ "getmyip",
+ "githubcloud",
+ "githubcloudusercontent",
+ "githubusercontent",
+ "googleapis",
+ "googlecode",
+ "gotdns",
+ "gotpantheon",
+ "gr",
+ "health-carereform",
+ "herokuapp",
+ "herokussl",
+ "hk",
+ "hobby-site",
+ "homelinux",
+ "homesecuritymac",
+ "homesecuritypc",
+ "homeunix",
+ "hu",
+ "iamallama",
+ "is-a-anarchist",
+ "is-a-blogger",
+ "is-a-bookkeeper",
+ "is-a-bulls-fan",
+ "is-a-caterer",
+ "is-a-chef",
+ "is-a-conservative",
+ "is-a-cpa",
+ "is-a-cubicle-slave",
+ "is-a-democrat",
+ "is-a-designer",
+ "is-a-doctor",
+ "is-a-financialadvisor",
+ "is-a-geek",
+ "is-a-green",
+ "is-a-guru",
+ "is-a-hard-worker",
+ "is-a-hunter",
+ "is-a-landscaper",
+ "is-a-lawyer",
+ "is-a-liberal",
+ "is-a-libertarian",
+ "is-a-llama",
+ "is-a-musician",
+ "is-a-nascarfan",
+ "is-a-nurse",
+ "is-a-painter",
+ "is-a-personaltrainer",
+ "is-a-photographer",
+ "is-a-player",
+ "is-a-republican",
+ "is-a-rockstar",
+ "is-a-socialist",
+ "is-a-student",
+ "is-a-teacher",
+ "is-a-techie",
+ "is-a-therapist",
+ "is-an-accountant",
+ "is-an-actor",
+ "is-an-actress",
+ "is-an-anarchist",
+ "is-an-artist",
+ "is-an-engineer",
+ "is-an-entertainer",
+ "is-certified",
+ "is-gone",
+ "is-into-anime",
+ "is-into-cars",
+ "is-into-cartoons",
+ "is-into-games",
+ "is-leet",
+ "is-not-certified",
+ "is-slick",
+ "is-uberleet",
+ "is-with-theband",
+ "isa-geek",
+ "isa-hockeynut",
+ "issmarterthanyou",
+ "jpn",
+ "kr",
+ "likes-pie",
+ "likescandy",
+ "logoip",
+ "meteorapp",
+ "mex",
+ "myactivedirectory",
+ "myasustor",
+ "mydrobo",
+ "myqnapcloud",
+ "mysecuritycamera",
+ "myshopblocks",
+ "myvnc",
+ "neat-url",
+ "net-freaks",
+ "nfshost",
+ "no",
+ "on-aptible",
+ "onthewifi",
+ "operaunite",
+ "outsystemscloud",
+ "ownprovider",
+ "pagefrontapp",
+ "pagespeedmobilizer",
+ "pgfog",
+ "point2this",
+ "prgmr",
+ "qa2",
+ "qc",
+ "quicksytes",
+ "rackmaze",
+ "rhcloud",
+ "ro",
+ "ru",
+ "sa",
+ "saves-the-whales",
+ "se",
+ "securitytactics",
+ "selfip",
+ "sells-for-less",
+ "sells-for-u",
+ "servebbs",
+ "servebeer",
+ "servecounterstrike",
+ "serveexchange",
+ "serveftp",
+ "servegame",
+ "servehalflife",
+ "servehttp",
+ "servehumour",
+ "serveirc",
+ "servemp3",
+ "servep2p",
+ "servepics",
+ "servequake",
+ "servesarcasm",
+ "simple-url",
+ "sinaapp",
+ "space-to-rent",
+ "stufftoread",
+ "teaches-yoga",
+ "townnews-staging",
+ "uk",
+ "unusualperson",
+ "us",
+ "uy",
+ "vipsinaapp",
+ "withgoogle",
+ "withyoutube",
+ "workisboring",
+ "writesthisblog",
+ "xenapponazure",
+ "yolasite",
+ "za",
+ "ap-northeast-2",
+ "compute",
+ "compute-1",
+ "elb",
+ "eu-central-1",
+ "s3",
+ "s3-ap-northeast-1",
+ "s3-ap-northeast-2",
+ "s3-ap-southeast-1",
+ "s3-ap-southeast-2",
+ "s3-eu-central-1",
+ "s3-eu-west-1",
+ "s3-external-1",
+ "s3-external-2",
+ "s3-fips-us-gov-west-1",
+ "s3-sa-east-1",
+ "s3-us-gov-west-1",
+ "s3-us-west-1",
+ "s3-us-west-2",
+ "us-east-1",
+ "s3",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "eu-central-1",
+ "eu-west-1",
+ "sa-east-1",
+ "us-gov-west-1",
+ "us-west-1",
+ "us-west-2",
+ "z-1",
+ "z-2",
+ "s3",
+ "alpha",
+ "beta",
+ "eu-1",
+ "eu-2",
+ "us-1",
+ "us-2",
+ "apps",
+ "api",
+ "ext",
+ "gist",
+ "eu",
+ "xen",
+ "ac",
+ "co",
+ "ed",
+ "fi",
+ "go",
+ "or",
+ "sa",
+ "com",
+ "edu",
+ "gov",
+ "inf",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "net",
+ "org",
+ "ath",
+ "gov",
+ "ac",
+ "biz",
+ "com",
+ "ekloges",
+ "gov",
+ "ltd",
+ "name",
+ "net",
+ "org",
+ "parliament",
+ "press",
+ "pro",
+ "tm",
+ "blogspot",
+ "blogspot",
+ "co",
+ "e4",
+ "realm",
+ "blogspot",
+ "com",
+ "dnshome",
+ "fuettertdasnetz",
+ "goip",
+ "isteingeek",
+ "istmein",
+ "lebtimnetz",
+ "leitungsen",
+ "logoip",
+ "traeumtgerade",
+ "biz",
+ "blogspot",
+ "co",
+ "firm",
+ "reg",
+ "store",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "art",
+ "com",
+ "edu",
+ "gob",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "sld",
+ "web",
+ "art",
+ "asso",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "pol",
+ "com",
+ "edu",
+ "fin",
+ "gob",
+ "gov",
+ "info",
+ "k12",
+ "med",
+ "mil",
+ "net",
+ "org",
+ "pro",
+ "aip",
+ "com",
+ "edu",
+ "fie",
+ "gov",
+ "lib",
+ "med",
+ "org",
+ "pri",
+ "riik",
+ "blogspot",
+ "com",
+ "edu",
+ "eun",
+ "gov",
+ "mil",
+ "name",
+ "net",
+ "org",
+ "sci",
+ "blogspot",
+ "com",
+ "edu",
+ "gob",
+ "nom",
+ "org",
+ "blogspot",
+ "compute",
+ "biz",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "name",
+ "net",
+ "org",
+ "mycd",
+ "aland",
+ "blogspot",
+ "dy",
+ "iki",
+ "aeroport",
+ "assedic",
+ "asso",
+ "avocat",
+ "avoues",
+ "blogspot",
+ "cci",
+ "chambagri",
+ "chirurgiens-dentistes",
+ "chirurgiens-dentistes-en-france",
+ "com",
+ "experts-comptables",
+ "fbx-os",
+ "fbxos",
+ "freebox-os",
+ "freeboxos",
+ "geometre-expert",
+ "gouv",
+ "greta",
+ "huissier-justice",
+ "medecin",
+ "nom",
+ "notaires",
+ "pharmacien",
+ "port",
+ "prd",
+ "presse",
+ "tm",
+ "veterinaire",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "pvt",
+ "co",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "ltd",
+ "mod",
+ "org",
+ "co",
+ "com",
+ "edu",
+ "net",
+ "org",
+ "ac",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "asso",
+ "com",
+ "edu",
+ "mobi",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gob",
+ "ind",
+ "mil",
+ "net",
+ "org",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "idv",
+ "inc",
+ "ltd",
+ "net",
+ "org",
+ "xn--55qx5d",
+ "xn--ciqpn",
+ "xn--gmq050i",
+ "xn--gmqw5a",
+ "xn--io0a7i",
+ "xn--lcvr32d",
+ "xn--mk0axi",
+ "xn--mxtq1m",
+ "xn--od0alg",
+ "xn--od0aq3b",
+ "xn--tn0ag",
+ "xn--uc0atv",
+ "xn--uc0ay4a",
+ "xn--wcvs22d",
+ "xn--zf0avx",
+ "com",
+ "edu",
+ "gob",
+ "mil",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "from",
+ "iz",
+ "name",
+ "adult",
+ "art",
+ "asso",
+ "com",
+ "coop",
+ "edu",
+ "firm",
+ "gouv",
+ "info",
+ "med",
+ "net",
+ "org",
+ "perso",
+ "pol",
+ "pro",
+ "rel",
+ "shop",
+ "2000",
+ "agrar",
+ "blogspot",
+ "bolt",
+ "casino",
+ "city",
+ "co",
+ "erotica",
+ "erotika",
+ "film",
+ "forum",
+ "games",
+ "hotel",
+ "info",
+ "ingatlan",
+ "jogasz",
+ "konyvelo",
+ "lakas",
+ "media",
+ "news",
+ "org",
+ "priv",
+ "reklam",
+ "sex",
+ "shop",
+ "sport",
+ "suli",
+ "szex",
+ "tm",
+ "tozsde",
+ "utazas",
+ "video",
+ "ac",
+ "biz",
+ "co",
+ "desa",
+ "go",
+ "mil",
+ "my",
+ "net",
+ "or",
+ "sch",
+ "web",
+ "blogspot",
+ "blogspot",
+ "gov",
+ "ac",
+ "co",
+ "gov",
+ "idf",
+ "k12",
+ "muni",
+ "net",
+ "org",
+ "blogspot",
+ "ac",
+ "co",
+ "com",
+ "net",
+ "org",
+ "tt",
+ "tv",
+ "ltd",
+ "plc",
+ "ac",
+ "blogspot",
+ "co",
+ "edu",
+ "firm",
+ "gen",
+ "gov",
+ "ind",
+ "mil",
+ "net",
+ "nic",
+ "org",
+ "res",
+ "barrel-of-knowledge",
+ "barrell-of-knowledge",
+ "dvrcam",
+ "dyndns",
+ "for-our",
+ "groks-the",
+ "groks-this",
+ "here-for-more",
+ "ilovecollege",
+ "knowsitall",
+ "no-ip",
+ "nsupdate",
+ "selfip",
+ "webhop",
+ "eu",
+ "backplaneapp",
+ "boxfuse",
+ "browsersafetymark",
+ "com",
+ "dedyn",
+ "drud",
+ "github",
+ "gitlab",
+ "hasura-app",
+ "hzc",
+ "ngrok",
+ "nid",
+ "pantheonsite",
+ "sandcats",
+ "spacekit",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "ac",
+ "co",
+ "gov",
+ "id",
+ "net",
+ "org",
+ "sch",
+ "xn--mgba3a4f16a",
+ "xn--mgba3a4fra",
+ "blogspot",
+ "com",
+ "cupcake",
+ "edu",
+ "gov",
+ "int",
+ "net",
+ "org",
+ "abr",
+ "abruzzo",
+ "ag",
+ "agrigento",
+ "al",
+ "alessandria",
+ "alto-adige",
+ "altoadige",
+ "an",
+ "ancona",
+ "andria-barletta-trani",
+ "andria-trani-barletta",
+ "andriabarlettatrani",
+ "andriatranibarletta",
+ "ao",
+ "aosta",
+ "aosta-valley",
+ "aostavalley",
+ "aoste",
+ "ap",
+ "aq",
+ "aquila",
+ "ar",
+ "arezzo",
+ "ascoli-piceno",
+ "ascolipiceno",
+ "asti",
+ "at",
+ "av",
+ "avellino",
+ "ba",
+ "balsan",
+ "bari",
+ "barletta-trani-andria",
+ "barlettatraniandria",
+ "bas",
+ "basilicata",
+ "belluno",
+ "benevento",
+ "bergamo",
+ "bg",
+ "bi",
+ "biella",
+ "bl",
+ "blogspot",
+ "bn",
+ "bo",
+ "bologna",
+ "bolzano",
+ "bozen",
+ "br",
+ "brescia",
+ "brindisi",
+ "bs",
+ "bt",
+ "bz",
+ "ca",
+ "cagliari",
+ "cal",
+ "calabria",
+ "caltanissetta",
+ "cam",
+ "campania",
+ "campidano-medio",
+ "campidanomedio",
+ "campobasso",
+ "carbonia-iglesias",
+ "carboniaiglesias",
+ "carrara-massa",
+ "carraramassa",
+ "caserta",
+ "catania",
+ "catanzaro",
+ "cb",
+ "ce",
+ "cesena-forli",
+ "cesenaforli",
+ "ch",
+ "chieti",
+ "ci",
+ "cl",
+ "cn",
+ "co",
+ "como",
+ "cosenza",
+ "cr",
+ "cremona",
+ "crotone",
+ "cs",
+ "ct",
+ "cuneo",
+ "cz",
+ "dell-ogliastra",
+ "dellogliastra",
+ "edu",
+ "emilia-romagna",
+ "emiliaromagna",
+ "emr",
+ "en",
+ "enna",
+ "fc",
+ "fe",
+ "fermo",
+ "ferrara",
+ "fg",
+ "fi",
+ "firenze",
+ "florence",
+ "fm",
+ "foggia",
+ "forli-cesena",
+ "forlicesena",
+ "fr",
+ "friuli-v-giulia",
+ "friuli-ve-giulia",
+ "friuli-vegiulia",
+ "friuli-venezia-giulia",
+ "friuli-veneziagiulia",
+ "friuli-vgiulia",
+ "friuliv-giulia",
+ "friulive-giulia",
+ "friulivegiulia",
+ "friulivenezia-giulia",
+ "friuliveneziagiulia",
+ "friulivgiulia",
+ "frosinone",
+ "fvg",
+ "ge",
+ "genoa",
+ "genova",
+ "go",
+ "gorizia",
+ "gov",
+ "gr",
+ "grosseto",
+ "iglesias-carbonia",
+ "iglesiascarbonia",
+ "im",
+ "imperia",
+ "is",
+ "isernia",
+ "kr",
+ "la-spezia",
+ "laquila",
+ "laspezia",
+ "latina",
+ "laz",
+ "lazio",
+ "lc",
+ "le",
+ "lecce",
+ "lecco",
+ "li",
+ "lig",
+ "liguria",
+ "livorno",
+ "lo",
+ "lodi",
+ "lom",
+ "lombardia",
+ "lombardy",
+ "lt",
+ "lu",
+ "lucania",
+ "lucca",
+ "macerata",
+ "mantova",
+ "mar",
+ "marche",
+ "massa-carrara",
+ "massacarrara",
+ "matera",
+ "mb",
+ "mc",
+ "me",
+ "medio-campidano",
+ "mediocampidano",
+ "messina",
+ "mi",
+ "milan",
+ "milano",
+ "mn",
+ "mo",
+ "modena",
+ "mol",
+ "molise",
+ "monza",
+ "monza-brianza",
+ "monza-e-della-brianza",
+ "monzabrianza",
+ "monzaebrianza",
+ "monzaedellabrianza",
+ "ms",
+ "mt",
+ "na",
+ "naples",
+ "napoli",
+ "no",
+ "novara",
+ "nu",
+ "nuoro",
+ "og",
+ "ogliastra",
+ "olbia-tempio",
+ "olbiatempio",
+ "or",
+ "oristano",
+ "ot",
+ "pa",
+ "padova",
+ "padua",
+ "palermo",
+ "parma",
+ "pavia",
+ "pc",
+ "pd",
+ "pe",
+ "perugia",
+ "pesaro-urbino",
+ "pesarourbino",
+ "pescara",
+ "pg",
+ "pi",
+ "piacenza",
+ "piedmont",
+ "piemonte",
+ "pisa",
+ "pistoia",
+ "pmn",
+ "pn",
+ "po",
+ "pordenone",
+ "potenza",
+ "pr",
+ "prato",
+ "pt",
+ "pu",
+ "pug",
+ "puglia",
+ "pv",
+ "pz",
+ "ra",
+ "ragusa",
+ "ravenna",
+ "rc",
+ "re",
+ "reggio-calabria",
+ "reggio-emilia",
+ "reggiocalabria",
+ "reggioemilia",
+ "rg",
+ "ri",
+ "rieti",
+ "rimini",
+ "rm",
+ "rn",
+ "ro",
+ "roma",
+ "rome",
+ "rovigo",
+ "sa",
+ "salerno",
+ "sar",
+ "sardegna",
+ "sardinia",
+ "sassari",
+ "savona",
+ "si",
+ "sic",
+ "sicilia",
+ "sicily",
+ "siena",
+ "siracusa",
+ "so",
+ "sondrio",
+ "sp",
+ "sr",
+ "ss",
+ "suedtirol",
+ "sv",
+ "ta",
+ "taa",
+ "taranto",
+ "te",
+ "tempio-olbia",
+ "tempioolbia",
+ "teramo",
+ "terni",
+ "tn",
+ "to",
+ "torino",
+ "tos",
+ "toscana",
+ "tp",
+ "tr",
+ "trani-andria-barletta",
+ "trani-barletta-andria",
+ "traniandriabarletta",
+ "tranibarlettaandria",
+ "trapani",
+ "trentino",
+ "trentino-a-adige",
+ "trentino-aadige",
+ "trentino-alto-adige",
+ "trentino-altoadige",
+ "trentino-s-tirol",
+ "trentino-stirol",
+ "trentino-sud-tirol",
+ "trentino-sudtirol",
+ "trentino-sued-tirol",
+ "trentino-suedtirol",
+ "trentinoa-adige",
+ "trentinoaadige",
+ "trentinoalto-adige",
+ "trentinoaltoadige",
+ "trentinos-tirol",
+ "trentinostirol",
+ "trentinosud-tirol",
+ "trentinosudtirol",
+ "trentinosued-tirol",
+ "trentinosuedtirol",
+ "trento",
+ "treviso",
+ "trieste",
+ "ts",
+ "turin",
+ "tuscany",
+ "tv",
+ "ud",
+ "udine",
+ "umb",
+ "umbria",
+ "urbino-pesaro",
+ "urbinopesaro",
+ "va",
+ "val-d-aosta",
+ "val-daosta",
+ "vald-aosta",
+ "valdaosta",
+ "valle-aosta",
+ "valle-d-aosta",
+ "valle-daosta",
+ "valleaosta",
+ "valled-aosta",
+ "valledaosta",
+ "vallee-aoste",
+ "valleeaoste",
+ "vao",
+ "varese",
+ "vb",
+ "vc",
+ "vda",
+ "ve",
+ "ven",
+ "veneto",
+ "venezia",
+ "venice",
+ "verbania",
+ "vercelli",
+ "verona",
+ "vi",
+ "vibo-valentia",
+ "vibovalentia",
+ "vicenza",
+ "viterbo",
+ "vr",
+ "vs",
+ "vt",
+ "vv",
+ "co",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "name",
+ "net",
+ "org",
+ "sch",
+ "ac",
+ "ad",
+ "aichi",
+ "akita",
+ "aomori",
+ "blogspot",
+ "chiba",
+ "co",
+ "ed",
+ "ehime",
+ "fukui",
+ "fukuoka",
+ "fukushima",
+ "gifu",
+ "go",
+ "gr",
+ "gunma",
+ "hiroshima",
+ "hokkaido",
+ "hyogo",
+ "ibaraki",
+ "ishikawa",
+ "iwate",
+ "kagawa",
+ "kagoshima",
+ "kanagawa",
+ "kawasaki",
+ "kitakyushu",
+ "kobe",
+ "kochi",
+ "kumamoto",
+ "kyoto",
+ "lg",
+ "mie",
+ "miyagi",
+ "miyazaki",
+ "nagano",
+ "nagasaki",
+ "nagoya",
+ "nara",
+ "ne",
+ "niigata",
+ "oita",
+ "okayama",
+ "okinawa",
+ "or",
+ "osaka",
+ "saga",
+ "saitama",
+ "sapporo",
+ "sendai",
+ "shiga",
+ "shimane",
+ "shizuoka",
+ "tochigi",
+ "tokushima",
+ "tokyo",
+ "tottori",
+ "toyama",
+ "wakayama",
+ "xn--0trq7p7nn",
+ "xn--1ctwo",
+ "xn--1lqs03n",
+ "xn--1lqs71d",
+ "xn--2m4a15e",
+ "xn--32vp30h",
+ "xn--4it168d",
+ "xn--4it797k",
+ "xn--4pvxs",
+ "xn--5js045d",
+ "xn--5rtp49c",
+ "xn--5rtq34k",
+ "xn--6btw5a",
+ "xn--6orx2r",
+ "xn--7t0a264c",
+ "xn--8ltr62k",
+ "xn--8pvr4u",
+ "xn--c3s14m",
+ "xn--d5qv7z876c",
+ "xn--djrs72d6uy",
+ "xn--djty4k",
+ "xn--efvn9s",
+ "xn--ehqz56n",
+ "xn--elqq16h",
+ "xn--f6qx53a",
+ "xn--k7yn95e",
+ "xn--kbrq7o",
+ "xn--klt787d",
+ "xn--kltp7d",
+ "xn--kltx9a",
+ "xn--klty5x",
+ "xn--mkru45i",
+ "xn--nit225k",
+ "xn--ntso0iqx3a",
+ "xn--ntsq17g",
+ "xn--pssu33l",
+ "xn--qqqt11m",
+ "xn--rht27z",
+ "xn--rht3d",
+ "xn--rht61e",
+ "xn--rny31h",
+ "xn--tor131o",
+ "xn--uist22h",
+ "xn--uisz3g",
+ "xn--uuwu58a",
+ "xn--vgu402c",
+ "xn--zbx025d",
+ "yamagata",
+ "yamaguchi",
+ "yamanashi",
+ "yokohama",
+ "aisai",
+ "ama",
+ "anjo",
+ "asuke",
+ "chiryu",
+ "chita",
+ "fuso",
+ "gamagori",
+ "handa",
+ "hazu",
+ "hekinan",
+ "higashiura",
+ "ichinomiya",
+ "inazawa",
+ "inuyama",
+ "isshiki",
+ "iwakura",
+ "kanie",
+ "kariya",
+ "kasugai",
+ "kira",
+ "kiyosu",
+ "komaki",
+ "konan",
+ "kota",
+ "mihama",
+ "miyoshi",
+ "nishio",
+ "nisshin",
+ "obu",
+ "oguchi",
+ "oharu",
+ "okazaki",
+ "owariasahi",
+ "seto",
+ "shikatsu",
+ "shinshiro",
+ "shitara",
+ "tahara",
+ "takahama",
+ "tobishima",
+ "toei",
+ "togo",
+ "tokai",
+ "tokoname",
+ "toyoake",
+ "toyohashi",
+ "toyokawa",
+ "toyone",
+ "toyota",
+ "tsushima",
+ "yatomi",
+ "akita",
+ "daisen",
+ "fujisato",
+ "gojome",
+ "hachirogata",
+ "happou",
+ "higashinaruse",
+ "honjo",
+ "honjyo",
+ "ikawa",
+ "kamikoani",
+ "kamioka",
+ "katagami",
+ "kazuno",
+ "kitaakita",
+ "kosaka",
+ "kyowa",
+ "misato",
+ "mitane",
+ "moriyoshi",
+ "nikaho",
+ "noshiro",
+ "odate",
+ "oga",
+ "ogata",
+ "semboku",
+ "yokote",
+ "yurihonjo",
+ "aomori",
+ "gonohe",
+ "hachinohe",
+ "hashikami",
+ "hiranai",
+ "hirosaki",
+ "itayanagi",
+ "kuroishi",
+ "misawa",
+ "mutsu",
+ "nakadomari",
+ "noheji",
+ "oirase",
+ "owani",
+ "rokunohe",
+ "sannohe",
+ "shichinohe",
+ "shingo",
+ "takko",
+ "towada",
+ "tsugaru",
+ "tsuruta",
+ "abiko",
+ "asahi",
+ "chonan",
+ "chosei",
+ "choshi",
+ "chuo",
+ "funabashi",
+ "futtsu",
+ "hanamigawa",
+ "ichihara",
+ "ichikawa",
+ "ichinomiya",
+ "inzai",
+ "isumi",
+ "kamagaya",
+ "kamogawa",
+ "kashiwa",
+ "katori",
+ "katsuura",
+ "kimitsu",
+ "kisarazu",
+ "kozaki",
+ "kujukuri",
+ "kyonan",
+ "matsudo",
+ "midori",
+ "mihama",
+ "minamiboso",
+ "mobara",
+ "mutsuzawa",
+ "nagara",
+ "nagareyama",
+ "narashino",
+ "narita",
+ "noda",
+ "oamishirasato",
+ "omigawa",
+ "onjuku",
+ "otaki",
+ "sakae",
+ "sakura",
+ "shimofusa",
+ "shirako",
+ "shiroi",
+ "shisui",
+ "sodegaura",
+ "sosa",
+ "tako",
+ "tateyama",
+ "togane",
+ "tohnosho",
+ "tomisato",
+ "urayasu",
+ "yachimata",
+ "yachiyo",
+ "yokaichiba",
+ "yokoshibahikari",
+ "yotsukaido",
+ "ainan",
+ "honai",
+ "ikata",
+ "imabari",
+ "iyo",
+ "kamijima",
+ "kihoku",
+ "kumakogen",
+ "masaki",
+ "matsuno",
+ "matsuyama",
+ "namikata",
+ "niihama",
+ "ozu",
+ "saijo",
+ "seiyo",
+ "shikokuchuo",
+ "tobe",
+ "toon",
+ "uchiko",
+ "uwajima",
+ "yawatahama",
+ "echizen",
+ "eiheiji",
+ "fukui",
+ "ikeda",
+ "katsuyama",
+ "mihama",
+ "minamiechizen",
+ "obama",
+ "ohi",
+ "ono",
+ "sabae",
+ "sakai",
+ "takahama",
+ "tsuruga",
+ "wakasa",
+ "ashiya",
+ "buzen",
+ "chikugo",
+ "chikuho",
+ "chikujo",
+ "chikushino",
+ "chikuzen",
+ "chuo",
+ "dazaifu",
+ "fukuchi",
+ "hakata",
+ "higashi",
+ "hirokawa",
+ "hisayama",
+ "iizuka",
+ "inatsuki",
+ "kaho",
+ "kasuga",
+ "kasuya",
+ "kawara",
+ "keisen",
+ "koga",
+ "kurate",
+ "kurogi",
+ "kurume",
+ "minami",
+ "miyako",
+ "miyama",
+ "miyawaka",
+ "mizumaki",
+ "munakata",
+ "nakagawa",
+ "nakama",
+ "nishi",
+ "nogata",
+ "ogori",
+ "okagaki",
+ "okawa",
+ "oki",
+ "omuta",
+ "onga",
+ "onojo",
+ "oto",
+ "saigawa",
+ "sasaguri",
+ "shingu",
+ "shinyoshitomi",
+ "shonai",
+ "soeda",
+ "sue",
+ "tachiarai",
+ "tagawa",
+ "takata",
+ "toho",
+ "toyotsu",
+ "tsuiki",
+ "ukiha",
+ "umi",
+ "usui",
+ "yamada",
+ "yame",
+ "yanagawa",
+ "yukuhashi",
+ "aizubange",
+ "aizumisato",
+ "aizuwakamatsu",
+ "asakawa",
+ "bandai",
+ "date",
+ "fukushima",
+ "furudono",
+ "futaba",
+ "hanawa",
+ "higashi",
+ "hirata",
+ "hirono",
+ "iitate",
+ "inawashiro",
+ "ishikawa",
+ "iwaki",
+ "izumizaki",
+ "kagamiishi",
+ "kaneyama",
+ "kawamata",
+ "kitakata",
+ "kitashiobara",
+ "koori",
+ "koriyama",
+ "kunimi",
+ "miharu",
+ "mishima",
+ "namie",
+ "nango",
+ "nishiaizu",
+ "nishigo",
+ "okuma",
+ "omotego",
+ "ono",
+ "otama",
+ "samegawa",
+ "shimogo",
+ "shirakawa",
+ "showa",
+ "soma",
+ "sukagawa",
+ "taishin",
+ "tamakawa",
+ "tanagura",
+ "tenei",
+ "yabuki",
+ "yamato",
+ "yamatsuri",
+ "yanaizu",
+ "yugawa",
+ "anpachi",
+ "ena",
+ "gifu",
+ "ginan",
+ "godo",
+ "gujo",
+ "hashima",
+ "hichiso",
+ "hida",
+ "higashishirakawa",
+ "ibigawa",
+ "ikeda",
+ "kakamigahara",
+ "kani",
+ "kasahara",
+ "kasamatsu",
+ "kawaue",
+ "kitagata",
+ "mino",
+ "minokamo",
+ "mitake",
+ "mizunami",
+ "motosu",
+ "nakatsugawa",
+ "ogaki",
+ "sakahogi",
+ "seki",
+ "sekigahara",
+ "shirakawa",
+ "tajimi",
+ "takayama",
+ "tarui",
+ "toki",
+ "tomika",
+ "wanouchi",
+ "yamagata",
+ "yaotsu",
+ "yoro",
+ "annaka",
+ "chiyoda",
+ "fujioka",
+ "higashiagatsuma",
+ "isesaki",
+ "itakura",
+ "kanna",
+ "kanra",
+ "katashina",
+ "kawaba",
+ "kiryu",
+ "kusatsu",
+ "maebashi",
+ "meiwa",
+ "midori",
+ "minakami",
+ "naganohara",
+ "nakanojo",
+ "nanmoku",
+ "numata",
+ "oizumi",
+ "ora",
+ "ota",
+ "shibukawa",
+ "shimonita",
+ "shinto",
+ "showa",
+ "takasaki",
+ "takayama",
+ "tamamura",
+ "tatebayashi",
+ "tomioka",
+ "tsukiyono",
+ "tsumagoi",
+ "ueno",
+ "yoshioka",
+ "asaminami",
+ "daiwa",
+ "etajima",
+ "fuchu",
+ "fukuyama",
+ "hatsukaichi",
+ "higashihiroshima",
+ "hongo",
+ "jinsekikogen",
+ "kaita",
+ "kui",
+ "kumano",
+ "kure",
+ "mihara",
+ "miyoshi",
+ "naka",
+ "onomichi",
+ "osakikamijima",
+ "otake",
+ "saka",
+ "sera",
+ "seranishi",
+ "shinichi",
+ "shobara",
+ "takehara",
+ "abashiri",
+ "abira",
+ "aibetsu",
+ "akabira",
+ "akkeshi",
+ "asahikawa",
+ "ashibetsu",
+ "ashoro",
+ "assabu",
+ "atsuma",
+ "bibai",
+ "biei",
+ "bifuka",
+ "bihoro",
+ "biratori",
+ "chippubetsu",
+ "chitose",
+ "date",
+ "ebetsu",
+ "embetsu",
+ "eniwa",
+ "erimo",
+ "esan",
+ "esashi",
+ "fukagawa",
+ "fukushima",
+ "furano",
+ "furubira",
+ "haboro",
+ "hakodate",
+ "hamatonbetsu",
+ "hidaka",
+ "higashikagura",
+ "higashikawa",
+ "hiroo",
+ "hokuryu",
+ "hokuto",
+ "honbetsu",
+ "horokanai",
+ "horonobe",
+ "ikeda",
+ "imakane",
+ "ishikari",
+ "iwamizawa",
+ "iwanai",
+ "kamifurano",
+ "kamikawa",
+ "kamishihoro",
+ "kamisunagawa",
+ "kamoenai",
+ "kayabe",
+ "kembuchi",
+ "kikonai",
+ "kimobetsu",
+ "kitahiroshima",
+ "kitami",
+ "kiyosato",
+ "koshimizu",
+ "kunneppu",
+ "kuriyama",
+ "kuromatsunai",
+ "kushiro",
+ "kutchan",
+ "kyowa",
+ "mashike",
+ "matsumae",
+ "mikasa",
+ "minamifurano",
+ "mombetsu",
+ "moseushi",
+ "mukawa",
+ "muroran",
+ "naie",
+ "nakagawa",
+ "nakasatsunai",
+ "nakatombetsu",
+ "nanae",
+ "nanporo",
+ "nayoro",
+ "nemuro",
+ "niikappu",
+ "niki",
+ "nishiokoppe",
+ "noboribetsu",
+ "numata",
+ "obihiro",
+ "obira",
+ "oketo",
+ "okoppe",
+ "otaru",
+ "otobe",
+ "otofuke",
+ "otoineppu",
+ "oumu",
+ "ozora",
+ "pippu",
+ "rankoshi",
+ "rebun",
+ "rikubetsu",
+ "rishiri",
+ "rishirifuji",
+ "saroma",
+ "sarufutsu",
+ "shakotan",
+ "shari",
+ "shibecha",
+ "shibetsu",
+ "shikabe",
+ "shikaoi",
+ "shimamaki",
+ "shimizu",
+ "shimokawa",
+ "shinshinotsu",
+ "shintoku",
+ "shiranuka",
+ "shiraoi",
+ "shiriuchi",
+ "sobetsu",
+ "sunagawa",
+ "taiki",
+ "takasu",
+ "takikawa",
+ "takinoue",
+ "teshikaga",
+ "tobetsu",
+ "tohma",
+ "tomakomai",
+ "tomari",
+ "toya",
+ "toyako",
+ "toyotomi",
+ "toyoura",
+ "tsubetsu",
+ "tsukigata",
+ "urakawa",
+ "urausu",
+ "uryu",
+ "utashinai",
+ "wakkanai",
+ "wassamu",
+ "yakumo",
+ "yoichi",
+ "aioi",
+ "akashi",
+ "ako",
+ "amagasaki",
+ "aogaki",
+ "asago",
+ "ashiya",
+ "awaji",
+ "fukusaki",
+ "goshiki",
+ "harima",
+ "himeji",
+ "ichikawa",
+ "inagawa",
+ "itami",
+ "kakogawa",
+ "kamigori",
+ "kamikawa",
+ "kasai",
+ "kasuga",
+ "kawanishi",
+ "miki",
+ "minamiawaji",
+ "nishinomiya",
+ "nishiwaki",
+ "ono",
+ "sanda",
+ "sannan",
+ "sasayama",
+ "sayo",
+ "shingu",
+ "shinonsen",
+ "shiso",
+ "sumoto",
+ "taishi",
+ "taka",
+ "takarazuka",
+ "takasago",
+ "takino",
+ "tamba",
+ "tatsuno",
+ "toyooka",
+ "yabu",
+ "yashiro",
+ "yoka",
+ "yokawa",
+ "ami",
+ "asahi",
+ "bando",
+ "chikusei",
+ "daigo",
+ "fujishiro",
+ "hitachi",
+ "hitachinaka",
+ "hitachiomiya",
+ "hitachiota",
+ "ibaraki",
+ "ina",
+ "inashiki",
+ "itako",
+ "iwama",
+ "joso",
+ "kamisu",
+ "kasama",
+ "kashima",
+ "kasumigaura",
+ "koga",
+ "miho",
+ "mito",
+ "moriya",
+ "naka",
+ "namegata",
+ "oarai",
+ "ogawa",
+ "omitama",
+ "ryugasaki",
+ "sakai",
+ "sakuragawa",
+ "shimodate",
+ "shimotsuma",
+ "shirosato",
+ "sowa",
+ "suifu",
+ "takahagi",
+ "tamatsukuri",
+ "tokai",
+ "tomobe",
+ "tone",
+ "toride",
+ "tsuchiura",
+ "tsukuba",
+ "uchihara",
+ "ushiku",
+ "yachiyo",
+ "yamagata",
+ "yawara",
+ "yuki",
+ "anamizu",
+ "hakui",
+ "hakusan",
+ "kaga",
+ "kahoku",
+ "kanazawa",
+ "kawakita",
+ "komatsu",
+ "nakanoto",
+ "nanao",
+ "nomi",
+ "nonoichi",
+ "noto",
+ "shika",
+ "suzu",
+ "tsubata",
+ "tsurugi",
+ "uchinada",
+ "wajima",
+ "fudai",
+ "fujisawa",
+ "hanamaki",
+ "hiraizumi",
+ "hirono",
+ "ichinohe",
+ "ichinoseki",
+ "iwaizumi",
+ "iwate",
+ "joboji",
+ "kamaishi",
+ "kanegasaki",
+ "karumai",
+ "kawai",
+ "kitakami",
+ "kuji",
+ "kunohe",
+ "kuzumaki",
+ "miyako",
+ "mizusawa",
+ "morioka",
+ "ninohe",
+ "noda",
+ "ofunato",
+ "oshu",
+ "otsuchi",
+ "rikuzentakata",
+ "shiwa",
+ "shizukuishi",
+ "sumita",
+ "tanohata",
+ "tono",
+ "yahaba",
+ "yamada",
+ "ayagawa",
+ "higashikagawa",
+ "kanonji",
+ "kotohira",
+ "manno",
+ "marugame",
+ "mitoyo",
+ "naoshima",
+ "sanuki",
+ "tadotsu",
+ "takamatsu",
+ "tonosho",
+ "uchinomi",
+ "utazu",
+ "zentsuji",
+ "akune",
+ "amami",
+ "hioki",
+ "isa",
+ "isen",
+ "izumi",
+ "kagoshima",
+ "kanoya",
+ "kawanabe",
+ "kinko",
+ "kouyama",
+ "makurazaki",
+ "matsumoto",
+ "minamitane",
+ "nakatane",
+ "nishinoomote",
+ "satsumasendai",
+ "soo",
+ "tarumizu",
+ "yusui",
+ "aikawa",
+ "atsugi",
+ "ayase",
+ "chigasaki",
+ "ebina",
+ "fujisawa",
+ "hadano",
+ "hakone",
+ "hiratsuka",
+ "isehara",
+ "kaisei",
+ "kamakura",
+ "kiyokawa",
+ "matsuda",
+ "minamiashigara",
+ "miura",
+ "nakai",
+ "ninomiya",
+ "odawara",
+ "oi",
+ "oiso",
+ "sagamihara",
+ "samukawa",
+ "tsukui",
+ "yamakita",
+ "yamato",
+ "yokosuka",
+ "yugawara",
+ "zama",
+ "zushi",
+ "city",
+ "city",
+ "city",
+ "aki",
+ "geisei",
+ "hidaka",
+ "higashitsuno",
+ "ino",
+ "kagami",
+ "kami",
+ "kitagawa",
+ "kochi",
+ "mihara",
+ "motoyama",
+ "muroto",
+ "nahari",
+ "nakamura",
+ "nankoku",
+ "nishitosa",
+ "niyodogawa",
+ "ochi",
+ "okawa",
+ "otoyo",
+ "otsuki",
+ "sakawa",
+ "sukumo",
+ "susaki",
+ "tosa",
+ "tosashimizu",
+ "toyo",
+ "tsuno",
+ "umaji",
+ "yasuda",
+ "yusuhara",
+ "amakusa",
+ "arao",
+ "aso",
+ "choyo",
+ "gyokuto",
+ "hitoyoshi",
+ "kamiamakusa",
+ "kashima",
+ "kikuchi",
+ "kumamoto",
+ "mashiki",
+ "mifune",
+ "minamata",
+ "minamioguni",
+ "nagasu",
+ "nishihara",
+ "oguni",
+ "ozu",
+ "sumoto",
+ "takamori",
+ "uki",
+ "uto",
+ "yamaga",
+ "yamato",
+ "yatsushiro",
+ "ayabe",
+ "fukuchiyama",
+ "higashiyama",
+ "ide",
+ "ine",
+ "joyo",
+ "kameoka",
+ "kamo",
+ "kita",
+ "kizu",
+ "kumiyama",
+ "kyotamba",
+ "kyotanabe",
+ "kyotango",
+ "maizuru",
+ "minami",
+ "minamiyamashiro",
+ "miyazu",
+ "muko",
+ "nagaokakyo",
+ "nakagyo",
+ "nantan",
+ "oyamazaki",
+ "sakyo",
+ "seika",
+ "tanabe",
+ "uji",
+ "ujitawara",
+ "wazuka",
+ "yamashina",
+ "yawata",
+ "asahi",
+ "inabe",
+ "ise",
+ "kameyama",
+ "kawagoe",
+ "kiho",
+ "kisosaki",
+ "kiwa",
+ "komono",
+ "kumano",
+ "kuwana",
+ "matsusaka",
+ "meiwa",
+ "mihama",
+ "minamiise",
+ "misugi",
+ "miyama",
+ "nabari",
+ "shima",
+ "suzuka",
+ "tado",
+ "taiki",
+ "taki",
+ "tamaki",
+ "toba",
+ "tsu",
+ "udono",
+ "ureshino",
+ "watarai",
+ "yokkaichi",
+ "furukawa",
+ "higashimatsushima",
+ "ishinomaki",
+ "iwanuma",
+ "kakuda",
+ "kami",
+ "kawasaki",
+ "marumori",
+ "matsushima",
+ "minamisanriku",
+ "misato",
+ "murata",
+ "natori",
+ "ogawara",
+ "ohira",
+ "onagawa",
+ "osaki",
+ "rifu",
+ "semine",
+ "shibata",
+ "shichikashuku",
+ "shikama",
+ "shiogama",
+ "shiroishi",
+ "tagajo",
+ "taiwa",
+ "tome",
+ "tomiya",
+ "wakuya",
+ "watari",
+ "yamamoto",
+ "zao",
+ "aya",
+ "ebino",
+ "gokase",
+ "hyuga",
+ "kadogawa",
+ "kawaminami",
+ "kijo",
+ "kitagawa",
+ "kitakata",
+ "kitaura",
+ "kobayashi",
+ "kunitomi",
+ "kushima",
+ "mimata",
+ "miyakonojo",
+ "miyazaki",
+ "morotsuka",
+ "nichinan",
+ "nishimera",
+ "nobeoka",
+ "saito",
+ "shiiba",
+ "shintomi",
+ "takaharu",
+ "takanabe",
+ "takazaki",
+ "tsuno",
+ "achi",
+ "agematsu",
+ "anan",
+ "aoki",
+ "asahi",
+ "azumino",
+ "chikuhoku",
+ "chikuma",
+ "chino",
+ "fujimi",
+ "hakuba",
+ "hara",
+ "hiraya",
+ "iida",
+ "iijima",
+ "iiyama",
+ "iizuna",
+ "ikeda",
+ "ikusaka",
+ "ina",
+ "karuizawa",
+ "kawakami",
+ "kiso",
+ "kisofukushima",
+ "kitaaiki",
+ "komagane",
+ "komoro",
+ "matsukawa",
+ "matsumoto",
+ "miasa",
+ "minamiaiki",
+ "minamimaki",
+ "minamiminowa",
+ "minowa",
+ "miyada",
+ "miyota",
+ "mochizuki",
+ "nagano",
+ "nagawa",
+ "nagiso",
+ "nakagawa",
+ "nakano",
+ "nozawaonsen",
+ "obuse",
+ "ogawa",
+ "okaya",
+ "omachi",
+ "omi",
+ "ookuwa",
+ "ooshika",
+ "otaki",
+ "otari",
+ "sakae",
+ "sakaki",
+ "saku",
+ "sakuho",
+ "shimosuwa",
+ "shinanomachi",
+ "shiojiri",
+ "suwa",
+ "suzaka",
+ "takagi",
+ "takamori",
+ "takayama",
+ "tateshina",
+ "tatsuno",
+ "togakushi",
+ "togura",
+ "tomi",
+ "ueda",
+ "wada",
+ "yamagata",
+ "yamanouchi",
+ "yasaka",
+ "yasuoka",
+ "chijiwa",
+ "futsu",
+ "goto",
+ "hasami",
+ "hirado",
+ "iki",
+ "isahaya",
+ "kawatana",
+ "kuchinotsu",
+ "matsuura",
+ "nagasaki",
+ "obama",
+ "omura",
+ "oseto",
+ "saikai",
+ "sasebo",
+ "seihi",
+ "shimabara",
+ "shinkamigoto",
+ "togitsu",
+ "tsushima",
+ "unzen",
+ "city",
+ "ando",
+ "gose",
+ "heguri",
+ "higashiyoshino",
+ "ikaruga",
+ "ikoma",
+ "kamikitayama",
+ "kanmaki",
+ "kashiba",
+ "kashihara",
+ "katsuragi",
+ "kawai",
+ "kawakami",
+ "kawanishi",
+ "koryo",
+ "kurotaki",
+ "mitsue",
+ "miyake",
+ "nara",
+ "nosegawa",
+ "oji",
+ "ouda",
+ "oyodo",
+ "sakurai",
+ "sango",
+ "shimoichi",
+ "shimokitayama",
+ "shinjo",
+ "soni",
+ "takatori",
+ "tawaramoto",
+ "tenkawa",
+ "tenri",
+ "uda",
+ "yamatokoriyama",
+ "yamatotakada",
+ "yamazoe",
+ "yoshino",
+ "aga",
+ "agano",
+ "gosen",
+ "itoigawa",
+ "izumozaki",
+ "joetsu",
+ "kamo",
+ "kariwa",
+ "kashiwazaki",
+ "minamiuonuma",
+ "mitsuke",
+ "muika",
+ "murakami",
+ "myoko",
+ "nagaoka",
+ "niigata",
+ "ojiya",
+ "omi",
+ "sado",
+ "sanjo",
+ "seiro",
+ "seirou",
+ "sekikawa",
+ "shibata",
+ "tagami",
+ "tainai",
+ "tochio",
+ "tokamachi",
+ "tsubame",
+ "tsunan",
+ "uonuma",
+ "yahiko",
+ "yoita",
+ "yuzawa",
+ "beppu",
+ "bungoono",
+ "bungotakada",
+ "hasama",
+ "hiji",
+ "himeshima",
+ "hita",
+ "kamitsue",
+ "kokonoe",
+ "kuju",
+ "kunisaki",
+ "kusu",
+ "oita",
+ "saiki",
+ "taketa",
+ "tsukumi",
+ "usa",
+ "usuki",
+ "yufu",
+ "akaiwa",
+ "asakuchi",
+ "bizen",
+ "hayashima",
+ "ibara",
+ "kagamino",
+ "kasaoka",
+ "kibichuo",
+ "kumenan",
+ "kurashiki",
+ "maniwa",
+ "misaki",
+ "nagi",
+ "niimi",
+ "nishiawakura",
+ "okayama",
+ "satosho",
+ "setouchi",
+ "shinjo",
+ "shoo",
+ "soja",
+ "takahashi",
+ "tamano",
+ "tsuyama",
+ "wake",
+ "yakage",
+ "aguni",
+ "ginowan",
+ "ginoza",
+ "gushikami",
+ "haebaru",
+ "higashi",
+ "hirara",
+ "iheya",
+ "ishigaki",
+ "ishikawa",
+ "itoman",
+ "izena",
+ "kadena",
+ "kin",
+ "kitadaito",
+ "kitanakagusuku",
+ "kumejima",
+ "kunigami",
+ "minamidaito",
+ "motobu",
+ "nago",
+ "naha",
+ "nakagusuku",
+ "nakijin",
+ "nanjo",
+ "nishihara",
+ "ogimi",
+ "okinawa",
+ "onna",
+ "shimoji",
+ "taketomi",
+ "tarama",
+ "tokashiki",
+ "tomigusuku",
+ "tonaki",
+ "urasoe",
+ "uruma",
+ "yaese",
+ "yomitan",
+ "yonabaru",
+ "yonaguni",
+ "zamami",
+ "abeno",
+ "chihayaakasaka",
+ "chuo",
+ "daito",
+ "fujiidera",
+ "habikino",
+ "hannan",
+ "higashiosaka",
+ "higashisumiyoshi",
+ "higashiyodogawa",
+ "hirakata",
+ "ibaraki",
+ "ikeda",
+ "izumi",
+ "izumiotsu",
+ "izumisano",
+ "kadoma",
+ "kaizuka",
+ "kanan",
+ "kashiwara",
+ "katano",
+ "kawachinagano",
+ "kishiwada",
+ "kita",
+ "kumatori",
+ "matsubara",
+ "minato",
+ "minoh",
+ "misaki",
+ "moriguchi",
+ "neyagawa",
+ "nishi",
+ "nose",
+ "osakasayama",
+ "sakai",
+ "sayama",
+ "sennan",
+ "settsu",
+ "shijonawate",
+ "shimamoto",
+ "suita",
+ "tadaoka",
+ "taishi",
+ "tajiri",
+ "takaishi",
+ "takatsuki",
+ "tondabayashi",
+ "toyonaka",
+ "toyono",
+ "yao",
+ "ariake",
+ "arita",
+ "fukudomi",
+ "genkai",
+ "hamatama",
+ "hizen",
+ "imari",
+ "kamimine",
+ "kanzaki",
+ "karatsu",
+ "kashima",
+ "kitagata",
+ "kitahata",
+ "kiyama",
+ "kouhoku",
+ "kyuragi",
+ "nishiarita",
+ "ogi",
+ "omachi",
+ "ouchi",
+ "saga",
+ "shiroishi",
+ "taku",
+ "tara",
+ "tosu",
+ "yoshinogari",
+ "arakawa",
+ "asaka",
+ "chichibu",
+ "fujimi",
+ "fujimino",
+ "fukaya",
+ "hanno",
+ "hanyu",
+ "hasuda",
+ "hatogaya",
+ "hatoyama",
+ "hidaka",
+ "higashichichibu",
+ "higashimatsuyama",
+ "honjo",
+ "ina",
+ "iruma",
+ "iwatsuki",
+ "kamiizumi",
+ "kamikawa",
+ "kamisato",
+ "kasukabe",
+ "kawagoe",
+ "kawaguchi",
+ "kawajima",
+ "kazo",
+ "kitamoto",
+ "koshigaya",
+ "kounosu",
+ "kuki",
+ "kumagaya",
+ "matsubushi",
+ "minano",
+ "misato",
+ "miyashiro",
+ "miyoshi",
+ "moroyama",
+ "nagatoro",
+ "namegawa",
+ "niiza",
+ "ogano",
+ "ogawa",
+ "ogose",
+ "okegawa",
+ "omiya",
+ "otaki",
+ "ranzan",
+ "ryokami",
+ "saitama",
+ "sakado",
+ "satte",
+ "sayama",
+ "shiki",
+ "shiraoka",
+ "soka",
+ "sugito",
+ "toda",
+ "tokigawa",
+ "tokorozawa",
+ "tsurugashima",
+ "urawa",
+ "warabi",
+ "yashio",
+ "yokoze",
+ "yono",
+ "yorii",
+ "yoshida",
+ "yoshikawa",
+ "yoshimi",
+ "city",
+ "city",
+ "aisho",
+ "gamo",
+ "higashiomi",
+ "hikone",
+ "koka",
+ "konan",
+ "kosei",
+ "koto",
+ "kusatsu",
+ "maibara",
+ "moriyama",
+ "nagahama",
+ "nishiazai",
+ "notogawa",
+ "omihachiman",
+ "otsu",
+ "ritto",
+ "ryuoh",
+ "takashima",
+ "takatsuki",
+ "torahime",
+ "toyosato",
+ "yasu",
+ "akagi",
+ "ama",
+ "gotsu",
+ "hamada",
+ "higashiizumo",
+ "hikawa",
+ "hikimi",
+ "izumo",
+ "kakinoki",
+ "masuda",
+ "matsue",
+ "misato",
+ "nishinoshima",
+ "ohda",
+ "okinoshima",
+ "okuizumo",
+ "shimane",
+ "tamayu",
+ "tsuwano",
+ "unnan",
+ "yakumo",
+ "yasugi",
+ "yatsuka",
+ "arai",
+ "atami",
+ "fuji",
+ "fujieda",
+ "fujikawa",
+ "fujinomiya",
+ "fukuroi",
+ "gotemba",
+ "haibara",
+ "hamamatsu",
+ "higashiizu",
+ "ito",
+ "iwata",
+ "izu",
+ "izunokuni",
+ "kakegawa",
+ "kannami",
+ "kawanehon",
+ "kawazu",
+ "kikugawa",
+ "kosai",
+ "makinohara",
+ "matsuzaki",
+ "minamiizu",
+ "mishima",
+ "morimachi",
+ "nishiizu",
+ "numazu",
+ "omaezaki",
+ "shimada",
+ "shimizu",
+ "shimoda",
+ "shizuoka",
+ "susono",
+ "yaizu",
+ "yoshida",
+ "ashikaga",
+ "bato",
+ "haga",
+ "ichikai",
+ "iwafune",
+ "kaminokawa",
+ "kanuma",
+ "karasuyama",
+ "kuroiso",
+ "mashiko",
+ "mibu",
+ "moka",
+ "motegi",
+ "nasu",
+ "nasushiobara",
+ "nikko",
+ "nishikata",
+ "nogi",
+ "ohira",
+ "ohtawara",
+ "oyama",
+ "sakura",
+ "sano",
+ "shimotsuke",
+ "shioya",
+ "takanezawa",
+ "tochigi",
+ "tsuga",
+ "ujiie",
+ "utsunomiya",
+ "yaita",
+ "aizumi",
+ "anan",
+ "ichiba",
+ "itano",
+ "kainan",
+ "komatsushima",
+ "matsushige",
+ "mima",
+ "minami",
+ "miyoshi",
+ "mugi",
+ "nakagawa",
+ "naruto",
+ "sanagochi",
+ "shishikui",
+ "tokushima",
+ "wajiki",
+ "adachi",
+ "akiruno",
+ "akishima",
+ "aogashima",
+ "arakawa",
+ "bunkyo",
+ "chiyoda",
+ "chofu",
+ "chuo",
+ "edogawa",
+ "fuchu",
+ "fussa",
+ "hachijo",
+ "hachioji",
+ "hamura",
+ "higashikurume",
+ "higashimurayama",
+ "higashiyamato",
+ "hino",
+ "hinode",
+ "hinohara",
+ "inagi",
+ "itabashi",
+ "katsushika",
+ "kita",
+ "kiyose",
+ "kodaira",
+ "koganei",
+ "kokubunji",
+ "komae",
+ "koto",
+ "kouzushima",
+ "kunitachi",
+ "machida",
+ "meguro",
+ "minato",
+ "mitaka",
+ "mizuho",
+ "musashimurayama",
+ "musashino",
+ "nakano",
+ "nerima",
+ "ogasawara",
+ "okutama",
+ "ome",
+ "oshima",
+ "ota",
+ "setagaya",
+ "shibuya",
+ "shinagawa",
+ "shinjuku",
+ "suginami",
+ "sumida",
+ "tachikawa",
+ "taito",
+ "tama",
+ "toshima",
+ "chizu",
+ "hino",
+ "kawahara",
+ "koge",
+ "kotoura",
+ "misasa",
+ "nanbu",
+ "nichinan",
+ "sakaiminato",
+ "tottori",
+ "wakasa",
+ "yazu",
+ "yonago",
+ "asahi",
+ "fuchu",
+ "fukumitsu",
+ "funahashi",
+ "himi",
+ "imizu",
+ "inami",
+ "johana",
+ "kamiichi",
+ "kurobe",
+ "nakaniikawa",
+ "namerikawa",
+ "nanto",
+ "nyuzen",
+ "oyabe",
+ "taira",
+ "takaoka",
+ "tateyama",
+ "toga",
+ "tonami",
+ "toyama",
+ "unazuki",
+ "uozu",
+ "yamada",
+ "arida",
+ "aridagawa",
+ "gobo",
+ "hashimoto",
+ "hidaka",
+ "hirogawa",
+ "inami",
+ "iwade",
+ "kainan",
+ "kamitonda",
+ "katsuragi",
+ "kimino",
+ "kinokawa",
+ "kitayama",
+ "koya",
+ "koza",
+ "kozagawa",
+ "kudoyama",
+ "kushimoto",
+ "mihama",
+ "misato",
+ "nachikatsuura",
+ "shingu",
+ "shirahama",
+ "taiji",
+ "tanabe",
+ "wakayama",
+ "yuasa",
+ "yura",
+ "asahi",
+ "funagata",
+ "higashine",
+ "iide",
+ "kahoku",
+ "kaminoyama",
+ "kaneyama",
+ "kawanishi",
+ "mamurogawa",
+ "mikawa",
+ "murayama",
+ "nagai",
+ "nakayama",
+ "nanyo",
+ "nishikawa",
+ "obanazawa",
+ "oe",
+ "oguni",
+ "ohkura",
+ "oishida",
+ "sagae",
+ "sakata",
+ "sakegawa",
+ "shinjo",
+ "shirataka",
+ "shonai",
+ "takahata",
+ "tendo",
+ "tozawa",
+ "tsuruoka",
+ "yamagata",
+ "yamanobe",
+ "yonezawa",
+ "yuza",
+ "abu",
+ "hagi",
+ "hikari",
+ "hofu",
+ "iwakuni",
+ "kudamatsu",
+ "mitou",
+ "nagato",
+ "oshima",
+ "shimonoseki",
+ "shunan",
+ "tabuse",
+ "tokuyama",
+ "toyota",
+ "ube",
+ "yuu",
+ "chuo",
+ "doshi",
+ "fuefuki",
+ "fujikawa",
+ "fujikawaguchiko",
+ "fujiyoshida",
+ "hayakawa",
+ "hokuto",
+ "ichikawamisato",
+ "kai",
+ "kofu",
+ "koshu",
+ "kosuge",
+ "minami-alps",
+ "minobu",
+ "nakamichi",
+ "nanbu",
+ "narusawa",
+ "nirasaki",
+ "nishikatsura",
+ "oshino",
+ "otsuki",
+ "showa",
+ "tabayama",
+ "tsuru",
+ "uenohara",
+ "yamanakako",
+ "yamanashi",
+ "city",
+ "co",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "biz",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "net",
+ "org",
+ "ass",
+ "asso",
+ "com",
+ "coop",
+ "edu",
+ "gouv",
+ "gov",
+ "medecin",
+ "mil",
+ "nom",
+ "notaires",
+ "org",
+ "pharmaciens",
+ "prd",
+ "presse",
+ "tm",
+ "veterinaire",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "org",
+ "rep",
+ "tra",
+ "ac",
+ "blogspot",
+ "busan",
+ "chungbuk",
+ "chungnam",
+ "co",
+ "daegu",
+ "daejeon",
+ "es",
+ "gangwon",
+ "go",
+ "gwangju",
+ "gyeongbuk",
+ "gyeonggi",
+ "gyeongnam",
+ "hs",
+ "incheon",
+ "jeju",
+ "jeonbuk",
+ "jeonnam",
+ "kg",
+ "mil",
+ "ms",
+ "ne",
+ "or",
+ "pe",
+ "re",
+ "sc",
+ "seoul",
+ "ulsan",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "c",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "int",
+ "net",
+ "org",
+ "per",
+ "static",
+ "dev",
+ "sites",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "oy",
+ "blogspot",
+ "cyon",
+ "mypep",
+ "ac",
+ "assn",
+ "com",
+ "edu",
+ "gov",
+ "grp",
+ "hotel",
+ "int",
+ "ltd",
+ "net",
+ "ngo",
+ "org",
+ "sch",
+ "soc",
+ "web",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "co",
+ "org",
+ "blogspot",
+ "gov",
+ "blogspot",
+ "asn",
+ "com",
+ "conf",
+ "edu",
+ "gov",
+ "id",
+ "mil",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "id",
+ "med",
+ "net",
+ "org",
+ "plc",
+ "sch",
+ "ac",
+ "co",
+ "gov",
+ "net",
+ "org",
+ "press",
+ "router",
+ "asso",
+ "tm",
+ "blogspot",
+ "ac",
+ "brasilia",
+ "co",
+ "daplie",
+ "ddns",
+ "diskstation",
+ "dnsfor",
+ "dscloud",
+ "edu",
+ "gov",
+ "hopto",
+ "i234",
+ "its",
+ "loginto",
+ "myds",
+ "net",
+ "noip",
+ "org",
+ "priv",
+ "synology",
+ "webhop",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "nom",
+ "org",
+ "prd",
+ "tm",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "inf",
+ "name",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gouv",
+ "gov",
+ "net",
+ "org",
+ "presse",
+ "edu",
+ "gov",
+ "nyc",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "dscloud",
+ "blogspot",
+ "gov",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "net",
+ "org",
+ "blogspot",
+ "ac",
+ "co",
+ "com",
+ "gov",
+ "net",
+ "or",
+ "org",
+ "academy",
+ "agriculture",
+ "air",
+ "airguard",
+ "alabama",
+ "alaska",
+ "amber",
+ "ambulance",
+ "american",
+ "americana",
+ "americanantiques",
+ "americanart",
+ "amsterdam",
+ "and",
+ "annefrank",
+ "anthro",
+ "anthropology",
+ "antiques",
+ "aquarium",
+ "arboretum",
+ "archaeological",
+ "archaeology",
+ "architecture",
+ "art",
+ "artanddesign",
+ "artcenter",
+ "artdeco",
+ "arteducation",
+ "artgallery",
+ "arts",
+ "artsandcrafts",
+ "asmatart",
+ "assassination",
+ "assisi",
+ "association",
+ "astronomy",
+ "atlanta",
+ "austin",
+ "australia",
+ "automotive",
+ "aviation",
+ "axis",
+ "badajoz",
+ "baghdad",
+ "bahn",
+ "bale",
+ "baltimore",
+ "barcelona",
+ "baseball",
+ "basel",
+ "baths",
+ "bauern",
+ "beauxarts",
+ "beeldengeluid",
+ "bellevue",
+ "bergbau",
+ "berkeley",
+ "berlin",
+ "bern",
+ "bible",
+ "bilbao",
+ "bill",
+ "birdart",
+ "birthplace",
+ "bonn",
+ "boston",
+ "botanical",
+ "botanicalgarden",
+ "botanicgarden",
+ "botany",
+ "brandywinevalley",
+ "brasil",
+ "bristol",
+ "british",
+ "britishcolumbia",
+ "broadcast",
+ "brunel",
+ "brussel",
+ "brussels",
+ "bruxelles",
+ "building",
+ "burghof",
+ "bus",
+ "bushey",
+ "cadaques",
+ "california",
+ "cambridge",
+ "can",
+ "canada",
+ "capebreton",
+ "carrier",
+ "cartoonart",
+ "casadelamoneda",
+ "castle",
+ "castres",
+ "celtic",
+ "center",
+ "chattanooga",
+ "cheltenham",
+ "chesapeakebay",
+ "chicago",
+ "children",
+ "childrens",
+ "childrensgarden",
+ "chiropractic",
+ "chocolate",
+ "christiansburg",
+ "cincinnati",
+ "cinema",
+ "circus",
+ "civilisation",
+ "civilization",
+ "civilwar",
+ "clinton",
+ "clock",
+ "coal",
+ "coastaldefence",
+ "cody",
+ "coldwar",
+ "collection",
+ "colonialwilliamsburg",
+ "coloradoplateau",
+ "columbia",
+ "columbus",
+ "communication",
+ "communications",
+ "community",
+ "computer",
+ "computerhistory",
+ "contemporary",
+ "contemporaryart",
+ "convent",
+ "copenhagen",
+ "corporation",
+ "corvette",
+ "costume",
+ "countryestate",
+ "county",
+ "crafts",
+ "cranbrook",
+ "creation",
+ "cultural",
+ "culturalcenter",
+ "culture",
+ "cyber",
+ "cymru",
+ "dali",
+ "dallas",
+ "database",
+ "ddr",
+ "decorativearts",
+ "delaware",
+ "delmenhorst",
+ "denmark",
+ "depot",
+ "design",
+ "detroit",
+ "dinosaur",
+ "discovery",
+ "dolls",
+ "donostia",
+ "durham",
+ "eastafrica",
+ "eastcoast",
+ "education",
+ "educational",
+ "egyptian",
+ "eisenbahn",
+ "elburg",
+ "elvendrell",
+ "embroidery",
+ "encyclopedic",
+ "england",
+ "entomology",
+ "environment",
+ "environmentalconservation",
+ "epilepsy",
+ "essex",
+ "estate",
+ "ethnology",
+ "exeter",
+ "exhibition",
+ "family",
+ "farm",
+ "farmequipment",
+ "farmers",
+ "farmstead",
+ "field",
+ "figueres",
+ "filatelia",
+ "film",
+ "fineart",
+ "finearts",
+ "finland",
+ "flanders",
+ "florida",
+ "force",
+ "fortmissoula",
+ "fortworth",
+ "foundation",
+ "francaise",
+ "frankfurt",
+ "franziskaner",
+ "freemasonry",
+ "freiburg",
+ "fribourg",
+ "frog",
+ "fundacio",
+ "furniture",
+ "gallery",
+ "garden",
+ "gateway",
+ "geelvinck",
+ "gemological",
+ "geology",
+ "georgia",
+ "giessen",
+ "glas",
+ "glass",
+ "gorge",
+ "grandrapids",
+ "graz",
+ "guernsey",
+ "halloffame",
+ "hamburg",
+ "handson",
+ "harvestcelebration",
+ "hawaii",
+ "health",
+ "heimatunduhren",
+ "hellas",
+ "helsinki",
+ "hembygdsforbund",
+ "heritage",
+ "histoire",
+ "historical",
+ "historicalsociety",
+ "historichouses",
+ "historisch",
+ "historisches",
+ "history",
+ "historyofscience",
+ "horology",
+ "house",
+ "humanities",
+ "illustration",
+ "imageandsound",
+ "indian",
+ "indiana",
+ "indianapolis",
+ "indianmarket",
+ "intelligence",
+ "interactive",
+ "iraq",
+ "iron",
+ "isleofman",
+ "jamison",
+ "jefferson",
+ "jerusalem",
+ "jewelry",
+ "jewish",
+ "jewishart",
+ "jfk",
+ "journalism",
+ "judaica",
+ "judygarland",
+ "juedisches",
+ "juif",
+ "karate",
+ "karikatur",
+ "kids",
+ "koebenhavn",
+ "koeln",
+ "kunst",
+ "kunstsammlung",
+ "kunstunddesign",
+ "labor",
+ "labour",
+ "lajolla",
+ "lancashire",
+ "landes",
+ "lans",
+ "larsson",
+ "lewismiller",
+ "lincoln",
+ "linz",
+ "living",
+ "livinghistory",
+ "localhistory",
+ "london",
+ "losangeles",
+ "louvre",
+ "loyalist",
+ "lucerne",
+ "luxembourg",
+ "luzern",
+ "mad",
+ "madrid",
+ "mallorca",
+ "manchester",
+ "mansion",
+ "mansions",
+ "manx",
+ "marburg",
+ "maritime",
+ "maritimo",
+ "maryland",
+ "marylhurst",
+ "media",
+ "medical",
+ "medizinhistorisches",
+ "meeres",
+ "memorial",
+ "mesaverde",
+ "michigan",
+ "midatlantic",
+ "military",
+ "mill",
+ "miners",
+ "mining",
+ "minnesota",
+ "missile",
+ "missoula",
+ "modern",
+ "moma",
+ "money",
+ "monmouth",
+ "monticello",
+ "montreal",
+ "moscow",
+ "motorcycle",
+ "muenchen",
+ "muenster",
+ "mulhouse",
+ "muncie",
+ "museet",
+ "museumcenter",
+ "museumvereniging",
+ "music",
+ "national",
+ "nationalfirearms",
+ "nationalheritage",
+ "nativeamerican",
+ "naturalhistory",
+ "naturalhistorymuseum",
+ "naturalsciences",
+ "nature",
+ "naturhistorisches",
+ "natuurwetenschappen",
+ "naumburg",
+ "naval",
+ "nebraska",
+ "neues",
+ "newhampshire",
+ "newjersey",
+ "newmexico",
+ "newport",
+ "newspaper",
+ "newyork",
+ "niepce",
+ "norfolk",
+ "north",
+ "nrw",
+ "nuernberg",
+ "nuremberg",
+ "nyc",
+ "nyny",
+ "oceanographic",
+ "oceanographique",
+ "omaha",
+ "online",
+ "ontario",
+ "openair",
+ "oregon",
+ "oregontrail",
+ "otago",
+ "oxford",
+ "pacific",
+ "paderborn",
+ "palace",
+ "paleo",
+ "palmsprings",
+ "panama",
+ "paris",
+ "pasadena",
+ "pharmacy",
+ "philadelphia",
+ "philadelphiaarea",
+ "philately",
+ "phoenix",
+ "photography",
+ "pilots",
+ "pittsburgh",
+ "planetarium",
+ "plantation",
+ "plants",
+ "plaza",
+ "portal",
+ "portland",
+ "portlligat",
+ "posts-and-telecommunications",
+ "preservation",
+ "presidio",
+ "press",
+ "project",
+ "public",
+ "pubol",
+ "quebec",
+ "railroad",
+ "railway",
+ "research",
+ "resistance",
+ "riodejaneiro",
+ "rochester",
+ "rockart",
+ "roma",
+ "russia",
+ "saintlouis",
+ "salem",
+ "salvadordali",
+ "salzburg",
+ "sandiego",
+ "sanfrancisco",
+ "santabarbara",
+ "santacruz",
+ "santafe",
+ "saskatchewan",
+ "satx",
+ "savannahga",
+ "schlesisches",
+ "schoenbrunn",
+ "schokoladen",
+ "school",
+ "schweiz",
+ "science",
+ "science-fiction",
+ "scienceandhistory",
+ "scienceandindustry",
+ "sciencecenter",
+ "sciencecenters",
+ "sciencehistory",
+ "sciences",
+ "sciencesnaturelles",
+ "scotland",
+ "seaport",
+ "settlement",
+ "settlers",
+ "shell",
+ "sherbrooke",
+ "sibenik",
+ "silk",
+ "ski",
+ "skole",
+ "society",
+ "sologne",
+ "soundandvision",
+ "southcarolina",
+ "southwest",
+ "space",
+ "spy",
+ "square",
+ "stadt",
+ "stalbans",
+ "starnberg",
+ "state",
+ "stateofdelaware",
+ "station",
+ "steam",
+ "steiermark",
+ "stjohn",
+ "stockholm",
+ "stpetersburg",
+ "stuttgart",
+ "suisse",
+ "surgeonshall",
+ "surrey",
+ "svizzera",
+ "sweden",
+ "sydney",
+ "tank",
+ "tcm",
+ "technology",
+ "telekommunikation",
+ "television",
+ "texas",
+ "textile",
+ "theater",
+ "time",
+ "timekeeping",
+ "topology",
+ "torino",
+ "touch",
+ "town",
+ "transport",
+ "tree",
+ "trolley",
+ "trust",
+ "trustee",
+ "uhren",
+ "ulm",
+ "undersea",
+ "university",
+ "usa",
+ "usantiques",
+ "usarts",
+ "uscountryestate",
+ "usculture",
+ "usdecorativearts",
+ "usgarden",
+ "ushistory",
+ "ushuaia",
+ "uslivinghistory",
+ "utah",
+ "uvic",
+ "valley",
+ "vantaa",
+ "versailles",
+ "viking",
+ "village",
+ "virginia",
+ "virtual",
+ "virtuel",
+ "vlaanderen",
+ "volkenkunde",
+ "wales",
+ "wallonie",
+ "war",
+ "washingtondc",
+ "watch-and-clock",
+ "watchandclock",
+ "western",
+ "westfalen",
+ "whaling",
+ "wildlife",
+ "williamsburg",
+ "windmill",
+ "workshop",
+ "xn--9dbhblg6di",
+ "xn--comunicaes-v6a2o",
+ "xn--correios-e-telecomunicaes-ghc29a",
+ "xn--h1aegh",
+ "xn--lns-qla",
+ "york",
+ "yorkshire",
+ "yosemite",
+ "youth",
+ "zoological",
+ "zoology",
+ "aero",
+ "biz",
+ "com",
+ "coop",
+ "edu",
+ "gov",
+ "info",
+ "int",
+ "mil",
+ "museum",
+ "name",
+ "net",
+ "org",
+ "pro",
+ "ac",
+ "biz",
+ "co",
+ "com",
+ "coop",
+ "edu",
+ "gov",
+ "int",
+ "museum",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gob",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "name",
+ "net",
+ "org",
+ "teledata",
+ "ca",
+ "cc",
+ "co",
+ "com",
+ "dr",
+ "in",
+ "info",
+ "mobi",
+ "mx",
+ "name",
+ "or",
+ "org",
+ "pro",
+ "school",
+ "tv",
+ "us",
+ "ws",
+ "her",
+ "his",
+ "forgot",
+ "forgot",
+ "asso",
+ "at-band-camp",
+ "azure-mobile",
+ "azurewebsites",
+ "blogdns",
+ "bounceme",
+ "broke-it",
+ "buyshouses",
+ "cdn77",
+ "cdn77-ssl",
+ "cloudapp",
+ "cloudfront",
+ "cloudfunctions",
+ "cryptonomic",
+ "ddns",
+ "dnsalias",
+ "dnsdojo",
+ "does-it",
+ "dontexist",
+ "dsmynas",
+ "dynalias",
+ "dynathome",
+ "dynv6",
+ "eating-organic",
+ "endofinternet",
+ "familyds",
+ "fastly",
+ "from-az",
+ "from-co",
+ "from-la",
+ "from-ny",
+ "gb",
+ "gets-it",
+ "ham-radio-op",
+ "homeftp",
+ "homeip",
+ "homelinux",
+ "homeunix",
+ "hu",
+ "in",
+ "in-the-band",
+ "is-a-chef",
+ "is-a-geek",
+ "isa-geek",
+ "jp",
+ "kicks-ass",
+ "mydissent",
+ "myeffect",
+ "myfritz",
+ "mymediapc",
+ "mypsx",
+ "mysecuritycamera",
+ "nhlfan",
+ "no-ip",
+ "office-on-the",
+ "pgafan",
+ "podzone",
+ "privatizehealthinsurance",
+ "rackmaze",
+ "redirectme",
+ "scrapper-site",
+ "se",
+ "selfip",
+ "sells-it",
+ "servebbs",
+ "serveblog",
+ "serveftp",
+ "serveminecraft",
+ "sytes",
+ "thruhere",
+ "uk",
+ "webhop",
+ "za",
+ "r",
+ "prod",
+ "ssl",
+ "a",
+ "global",
+ "a",
+ "b",
+ "global",
+ "alces",
+ "arts",
+ "com",
+ "firm",
+ "info",
+ "net",
+ "other",
+ "per",
+ "rec",
+ "store",
+ "web",
+ "com",
+ "edu",
+ "gov",
+ "i",
+ "mil",
+ "mobi",
+ "name",
+ "net",
+ "org",
+ "sch",
+ "blogspot",
+ "ac",
+ "biz",
+ "co",
+ "com",
+ "edu",
+ "gob",
+ "in",
+ "info",
+ "int",
+ "mil",
+ "net",
+ "nom",
+ "org",
+ "web",
+ "blogspot",
+ "bv",
+ "co",
+ "virtueeldomein",
+ "aa",
+ "aarborte",
+ "aejrie",
+ "afjord",
+ "agdenes",
+ "ah",
+ "akershus",
+ "aknoluokta",
+ "akrehamn",
+ "al",
+ "alaheadju",
+ "alesund",
+ "algard",
+ "alstahaug",
+ "alta",
+ "alvdal",
+ "amli",
+ "amot",
+ "andasuolo",
+ "andebu",
+ "andoy",
+ "ardal",
+ "aremark",
+ "arendal",
+ "arna",
+ "aseral",
+ "asker",
+ "askim",
+ "askoy",
+ "askvoll",
+ "asnes",
+ "audnedaln",
+ "aukra",
+ "aure",
+ "aurland",
+ "aurskog-holand",
+ "austevoll",
+ "austrheim",
+ "averoy",
+ "badaddja",
+ "bahcavuotna",
+ "bahccavuotna",
+ "baidar",
+ "bajddar",
+ "balat",
+ "balestrand",
+ "ballangen",
+ "balsfjord",
+ "bamble",
+ "bardu",
+ "barum",
+ "batsfjord",
+ "bearalvahki",
+ "beardu",
+ "beiarn",
+ "berg",
+ "bergen",
+ "berlevag",
+ "bievat",
+ "bindal",
+ "birkenes",
+ "bjarkoy",
+ "bjerkreim",
+ "bjugn",
+ "blogspot",
+ "bodo",
+ "bokn",
+ "bomlo",
+ "bremanger",
+ "bronnoy",
+ "bronnoysund",
+ "brumunddal",
+ "bryne",
+ "bu",
+ "budejju",
+ "buskerud",
+ "bygland",
+ "bykle",
+ "cahcesuolo",
+ "co",
+ "davvenjarga",
+ "davvesiida",
+ "deatnu",
+ "dep",
+ "dielddanuorri",
+ "divtasvuodna",
+ "divttasvuotna",
+ "donna",
+ "dovre",
+ "drammen",
+ "drangedal",
+ "drobak",
+ "dyroy",
+ "egersund",
+ "eid",
+ "eidfjord",
+ "eidsberg",
+ "eidskog",
+ "eidsvoll",
+ "eigersund",
+ "elverum",
+ "enebakk",
+ "engerdal",
+ "etne",
+ "etnedal",
+ "evenassi",
+ "evenes",
+ "evje-og-hornnes",
+ "farsund",
+ "fauske",
+ "fedje",
+ "fet",
+ "fetsund",
+ "fhs",
+ "finnoy",
+ "fitjar",
+ "fjaler",
+ "fjell",
+ "fla",
+ "flakstad",
+ "flatanger",
+ "flekkefjord",
+ "flesberg",
+ "flora",
+ "floro",
+ "fm",
+ "folkebibl",
+ "folldal",
+ "forde",
+ "forsand",
+ "fosnes",
+ "frana",
+ "fredrikstad",
+ "frei",
+ "frogn",
+ "froland",
+ "frosta",
+ "froya",
+ "fuoisku",
+ "fuossko",
+ "fusa",
+ "fylkesbibl",
+ "fyresdal",
+ "gaivuotna",
+ "galsa",
+ "gamvik",
+ "gangaviika",
+ "gaular",
+ "gausdal",
+ "giehtavuoatna",
+ "gildeskal",
+ "giske",
+ "gjemnes",
+ "gjerdrum",
+ "gjerstad",
+ "gjesdal",
+ "gjovik",
+ "gloppen",
+ "gol",
+ "gran",
+ "grane",
+ "granvin",
+ "gratangen",
+ "grimstad",
+ "grong",
+ "grue",
+ "gulen",
+ "guovdageaidnu",
+ "ha",
+ "habmer",
+ "hadsel",
+ "hagebostad",
+ "halden",
+ "halsa",
+ "hamar",
+ "hamaroy",
+ "hammarfeasta",
+ "hammerfest",
+ "hapmir",
+ "haram",
+ "hareid",
+ "harstad",
+ "hasvik",
+ "hattfjelldal",
+ "haugesund",
+ "hedmark",
+ "hemne",
+ "hemnes",
+ "hemsedal",
+ "herad",
+ "hitra",
+ "hjartdal",
+ "hjelmeland",
+ "hl",
+ "hm",
+ "hobol",
+ "hof",
+ "hokksund",
+ "hol",
+ "hole",
+ "holmestrand",
+ "holtalen",
+ "honefoss",
+ "hordaland",
+ "hornindal",
+ "horten",
+ "hoyanger",
+ "hoylandet",
+ "hurdal",
+ "hurum",
+ "hvaler",
+ "hyllestad",
+ "ibestad",
+ "idrett",
+ "inderoy",
+ "iveland",
+ "ivgu",
+ "jan-mayen",
+ "jessheim",
+ "jevnaker",
+ "jolster",
+ "jondal",
+ "jorpeland",
+ "kafjord",
+ "karasjohka",
+ "karasjok",
+ "karlsoy",
+ "karmoy",
+ "kautokeino",
+ "kirkenes",
+ "klabu",
+ "klepp",
+ "kommune",
+ "kongsberg",
+ "kongsvinger",
+ "kopervik",
+ "kraanghke",
+ "kragero",
+ "kristiansand",
+ "kristiansund",
+ "krodsherad",
+ "krokstadelva",
+ "kvafjord",
+ "kvalsund",
+ "kvam",
+ "kvanangen",
+ "kvinesdal",
+ "kvinnherad",
+ "kviteseid",
+ "kvitsoy",
+ "laakesvuemie",
+ "lahppi",
+ "langevag",
+ "lardal",
+ "larvik",
+ "lavagis",
+ "lavangen",
+ "leangaviika",
+ "lebesby",
+ "leikanger",
+ "leirfjord",
+ "leirvik",
+ "leka",
+ "leksvik",
+ "lenvik",
+ "lerdal",
+ "lesja",
+ "levanger",
+ "lier",
+ "lierne",
+ "lillehammer",
+ "lillesand",
+ "lindas",
+ "lindesnes",
+ "loabat",
+ "lodingen",
+ "lom",
+ "loppa",
+ "lorenskog",
+ "loten",
+ "lund",
+ "lunner",
+ "luroy",
+ "luster",
+ "lyngdal",
+ "lyngen",
+ "malatvuopmi",
+ "malselv",
+ "malvik",
+ "mandal",
+ "marker",
+ "marnardal",
+ "masfjorden",
+ "masoy",
+ "matta-varjjat",
+ "meland",
+ "meldal",
+ "melhus",
+ "meloy",
+ "meraker",
+ "midsund",
+ "midtre-gauldal",
+ "mil",
+ "mjondalen",
+ "mo-i-rana",
+ "moareke",
+ "modalen",
+ "modum",
+ "molde",
+ "more-og-romsdal",
+ "mosjoen",
+ "moskenes",
+ "moss",
+ "mosvik",
+ "mr",
+ "muosat",
+ "museum",
+ "naamesjevuemie",
+ "namdalseid",
+ "namsos",
+ "namsskogan",
+ "nannestad",
+ "naroy",
+ "narviika",
+ "narvik",
+ "naustdal",
+ "navuotna",
+ "nedre-eiker",
+ "nesna",
+ "nesodden",
+ "nesoddtangen",
+ "nesseby",
+ "nesset",
+ "nissedal",
+ "nittedal",
+ "nl",
+ "nord-aurdal",
+ "nord-fron",
+ "nord-odal",
+ "norddal",
+ "nordkapp",
+ "nordland",
+ "nordre-land",
+ "nordreisa",
+ "nore-og-uvdal",
+ "notodden",
+ "notteroy",
+ "nt",
+ "odda",
+ "of",
+ "oksnes",
+ "ol",
+ "omasvuotna",
+ "oppdal",
+ "oppegard",
+ "orkanger",
+ "orkdal",
+ "orland",
+ "orskog",
+ "orsta",
+ "osen",
+ "oslo",
+ "osoyro",
+ "osteroy",
+ "ostfold",
+ "ostre-toten",
+ "overhalla",
+ "ovre-eiker",
+ "oyer",
+ "oygarden",
+ "oystre-slidre",
+ "porsanger",
+ "porsangu",
+ "porsgrunn",
+ "priv",
+ "rade",
+ "radoy",
+ "rahkkeravju",
+ "raholt",
+ "raisa",
+ "rakkestad",
+ "ralingen",
+ "rana",
+ "randaberg",
+ "rauma",
+ "rendalen",
+ "rennebu",
+ "rennesoy",
+ "rindal",
+ "ringebu",
+ "ringerike",
+ "ringsaker",
+ "risor",
+ "rissa",
+ "rl",
+ "roan",
+ "rodoy",
+ "rollag",
+ "romsa",
+ "romskog",
+ "roros",
+ "rost",
+ "royken",
+ "royrvik",
+ "ruovat",
+ "rygge",
+ "salangen",
+ "salat",
+ "saltdal",
+ "samnanger",
+ "sandefjord",
+ "sandnes",
+ "sandnessjoen",
+ "sandoy",
+ "sarpsborg",
+ "sauda",
+ "sauherad",
+ "sel",
+ "selbu",
+ "selje",
+ "seljord",
+ "sf",
+ "siellak",
+ "sigdal",
+ "siljan",
+ "sirdal",
+ "skanit",
+ "skanland",
+ "skaun",
+ "skedsmo",
+ "skedsmokorset",
+ "ski",
+ "skien",
+ "skierva",
+ "skiptvet",
+ "skjak",
+ "skjervoy",
+ "skodje",
+ "slattum",
+ "smola",
+ "snaase",
+ "snasa",
+ "snillfjord",
+ "snoasa",
+ "sogndal",
+ "sogne",
+ "sokndal",
+ "sola",
+ "solund",
+ "somna",
+ "sondre-land",
+ "songdalen",
+ "sor-aurdal",
+ "sor-fron",
+ "sor-odal",
+ "sor-varanger",
+ "sorfold",
+ "sorreisa",
+ "sortland",
+ "sorum",
+ "spjelkavik",
+ "spydeberg",
+ "st",
+ "stange",
+ "stat",
+ "stathelle",
+ "stavanger",
+ "stavern",
+ "steigen",
+ "steinkjer",
+ "stjordal",
+ "stjordalshalsen",
+ "stokke",
+ "stor-elvdal",
+ "stord",
+ "stordal",
+ "storfjord",
+ "strand",
+ "stranda",
+ "stryn",
+ "sula",
+ "suldal",
+ "sund",
+ "sunndal",
+ "surnadal",
+ "svalbard",
+ "sveio",
+ "svelvik",
+ "sykkylven",
+ "tana",
+ "tananger",
+ "telemark",
+ "time",
+ "tingvoll",
+ "tinn",
+ "tjeldsund",
+ "tjome",
+ "tm",
+ "tokke",
+ "tolga",
+ "tonsberg",
+ "torsken",
+ "tr",
+ "trana",
+ "tranby",
+ "tranoy",
+ "troandin",
+ "trogstad",
+ "tromsa",
+ "tromso",
+ "trondheim",
+ "trysil",
+ "tvedestrand",
+ "tydal",
+ "tynset",
+ "tysfjord",
+ "tysnes",
+ "tysvar",
+ "ullensaker",
+ "ullensvang",
+ "ulvik",
+ "unjarga",
+ "utsira",
+ "va",
+ "vaapste",
+ "vadso",
+ "vaga",
+ "vagan",
+ "vagsoy",
+ "vaksdal",
+ "valle",
+ "vang",
+ "vanylven",
+ "vardo",
+ "varggat",
+ "varoy",
+ "vefsn",
+ "vega",
+ "vegarshei",
+ "vennesla",
+ "verdal",
+ "verran",
+ "vestby",
+ "vestfold",
+ "vestnes",
+ "vestre-slidre",
+ "vestre-toten",
+ "vestvagoy",
+ "vevelstad",
+ "vf",
+ "vgs",
+ "vik",
+ "vikna",
+ "vindafjord",
+ "voagat",
+ "volda",
+ "voss",
+ "vossevangen",
+ "xn--andy-ira",
+ "xn--asky-ira",
+ "xn--aurskog-hland-jnb",
+ "xn--avery-yua",
+ "xn--bdddj-mrabd",
+ "xn--bearalvhki-y4a",
+ "xn--berlevg-jxa",
+ "xn--bhcavuotna-s4a",
+ "xn--bhccavuotna-k7a",
+ "xn--bidr-5nac",
+ "xn--bievt-0qa",
+ "xn--bjarky-fya",
+ "xn--bjddar-pta",
+ "xn--blt-elab",
+ "xn--bmlo-gra",
+ "xn--bod-2na",
+ "xn--brnny-wuac",
+ "xn--brnnysund-m8ac",
+ "xn--brum-voa",
+ "xn--btsfjord-9za",
+ "xn--davvenjrga-y4a",
+ "xn--dnna-gra",
+ "xn--drbak-wua",
+ "xn--dyry-ira",
+ "xn--eveni-0qa01ga",
+ "xn--finny-yua",
+ "xn--fjord-lra",
+ "xn--fl-zia",
+ "xn--flor-jra",
+ "xn--frde-gra",
+ "xn--frna-woa",
+ "xn--frya-hra",
+ "xn--ggaviika-8ya47h",
+ "xn--gildeskl-g0a",
+ "xn--givuotna-8ya",
+ "xn--gjvik-wua",
+ "xn--gls-elac",
+ "xn--h-2fa",
+ "xn--hbmer-xqa",
+ "xn--hcesuolo-7ya35b",
+ "xn--hgebostad-g3a",
+ "xn--hmmrfeasta-s4ac",
+ "xn--hnefoss-q1a",
+ "xn--hobl-ira",
+ "xn--holtlen-hxa",
+ "xn--hpmir-xqa",
+ "xn--hyanger-q1a",
+ "xn--hylandet-54a",
+ "xn--indery-fya",
+ "xn--jlster-bya",
+ "xn--jrpeland-54a",
+ "xn--karmy-yua",
+ "xn--kfjord-iua",
+ "xn--klbu-woa",
+ "xn--koluokta-7ya57h",
+ "xn--krager-gya",
+ "xn--kranghke-b0a",
+ "xn--krdsherad-m8a",
+ "xn--krehamn-dxa",
+ "xn--krjohka-hwab49j",
+ "xn--ksnes-uua",
+ "xn--kvfjord-nxa",
+ "xn--kvitsy-fya",
+ "xn--kvnangen-k0a",
+ "xn--l-1fa",
+ "xn--laheadju-7ya",
+ "xn--langevg-jxa",
+ "xn--ldingen-q1a",
+ "xn--leagaviika-52b",
+ "xn--lesund-hua",
+ "xn--lgrd-poac",
+ "xn--lhppi-xqa",
+ "xn--linds-pra",
+ "xn--loabt-0qa",
+ "xn--lrdal-sra",
+ "xn--lrenskog-54a",
+ "xn--lt-liac",
+ "xn--lten-gra",
+ "xn--lury-ira",
+ "xn--mely-ira",
+ "xn--merker-kua",
+ "xn--mjndalen-64a",
+ "xn--mlatvuopmi-s4a",
+ "xn--mli-tla",
+ "xn--mlselv-iua",
+ "xn--moreke-jua",
+ "xn--mosjen-eya",
+ "xn--mot-tla",
+ "xn--mre-og-romsdal-qqb",
+ "xn--msy-ula0h",
+ "xn--mtta-vrjjat-k7af",
+ "xn--muost-0qa",
+ "xn--nmesjevuemie-tcba",
+ "xn--nry-yla5g",
+ "xn--nttery-byae",
+ "xn--nvuotna-hwa",
+ "xn--oppegrd-ixa",
+ "xn--ostery-fya",
+ "xn--osyro-wua",
+ "xn--porsgu-sta26f",
+ "xn--rady-ira",
+ "xn--rdal-poa",
+ "xn--rde-ula",
+ "xn--rdy-0nab",
+ "xn--rennesy-v1a",
+ "xn--rhkkervju-01af",
+ "xn--rholt-mra",
+ "xn--risa-5na",
+ "xn--risr-ira",
+ "xn--rland-uua",
+ "xn--rlingen-mxa",
+ "xn--rmskog-bya",
+ "xn--rros-gra",
+ "xn--rskog-uua",
+ "xn--rst-0na",
+ "xn--rsta-fra",
+ "xn--ryken-vua",
+ "xn--ryrvik-bya",
+ "xn--s-1fa",
+ "xn--sandnessjen-ogb",
+ "xn--sandy-yua",
+ "xn--seral-lra",
+ "xn--sgne-gra",
+ "xn--skierv-uta",
+ "xn--skjervy-v1a",
+ "xn--skjk-soa",
+ "xn--sknit-yqa",
+ "xn--sknland-fxa",
+ "xn--slat-5na",
+ "xn--slt-elab",
+ "xn--smla-hra",
+ "xn--smna-gra",
+ "xn--snase-nra",
+ "xn--sndre-land-0cb",
+ "xn--snes-poa",
+ "xn--snsa-roa",
+ "xn--sr-aurdal-l8a",
+ "xn--sr-fron-q1a",
+ "xn--sr-odal-q1a",
+ "xn--sr-varanger-ggb",
+ "xn--srfold-bya",
+ "xn--srreisa-q1a",
+ "xn--srum-gra",
+ "xn--stfold-9xa",
+ "xn--stjrdal-s1a",
+ "xn--stjrdalshalsen-sqb",
+ "xn--stre-toten-zcb",
+ "xn--tjme-hra",
+ "xn--tnsberg-q1a",
+ "xn--trany-yua",
+ "xn--trgstad-r1a",
+ "xn--trna-woa",
+ "xn--troms-zua",
+ "xn--tysvr-vra",
+ "xn--unjrga-rta",
+ "xn--vads-jra",
+ "xn--vard-jra",
+ "xn--vegrshei-c0a",
+ "xn--vestvgy-ixa6o",
+ "xn--vg-yiab",
+ "xn--vgan-qoa",
+ "xn--vgsy-qoa0j",
+ "xn--vre-eiker-k8a",
+ "xn--vrggt-xqad",
+ "xn--vry-yla5g",
+ "xn--yer-zna",
+ "xn--ygarden-p1a",
+ "xn--ystre-slidre-ujb",
+ "gs",
+ "gs",
+ "nes",
+ "gs",
+ "nes",
+ "gs",
+ "os",
+ "valer",
+ "xn--vler-qoa",
+ "gs",
+ "gs",
+ "os",
+ "gs",
+ "heroy",
+ "sande",
+ "gs",
+ "gs",
+ "bo",
+ "heroy",
+ "xn--b-5ga",
+ "xn--hery-ira",
+ "gs",
+ "gs",
+ "gs",
+ "gs",
+ "valer",
+ "gs",
+ "gs",
+ "gs",
+ "gs",
+ "bo",
+ "xn--b-5ga",
+ "gs",
+ "gs",
+ "gs",
+ "sande",
+ "gs",
+ "sande",
+ "xn--hery-ira",
+ "xn--vler-qoa",
+ "biz",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "net",
+ "org",
+ "merseine",
+ "mine",
+ "shacknet",
+ "ac",
+ "co",
+ "cri",
+ "geek",
+ "gen",
+ "govt",
+ "health",
+ "iwi",
+ "kiwi",
+ "maori",
+ "mil",
+ "net",
+ "org",
+ "parliament",
+ "school",
+ "xn--mori-qsa",
+ "blogspot",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "med",
+ "museum",
+ "net",
+ "org",
+ "pro",
+ "ae",
+ "blogdns",
+ "blogsite",
+ "bmoattachments",
+ "boldlygoingnowhere",
+ "cable-modem",
+ "cdn77",
+ "cdn77-secure",
+ "certmgr",
+ "collegefan",
+ "couchpotatofries",
+ "dnsalias",
+ "dnsdojo",
+ "doesntexist",
+ "dontexist",
+ "doomdns",
+ "dsmynas",
+ "duckdns",
+ "dvrdns",
+ "dynalias",
+ "dyndns",
+ "endofinternet",
+ "endoftheinternet",
+ "eu",
+ "familyds",
+ "from-me",
+ "game-host",
+ "gotdns",
+ "hepforge",
+ "hk",
+ "hobby-site",
+ "homedns",
+ "homeftp",
+ "homelinux",
+ "homeunix",
+ "hopto",
+ "is-a-bruinsfan",
+ "is-a-candidate",
+ "is-a-celticsfan",
+ "is-a-chef",
+ "is-a-geek",
+ "is-a-knight",
+ "is-a-linux-user",
+ "is-a-patsfan",
+ "is-a-soxfan",
+ "is-found",
+ "is-lost",
+ "is-saved",
+ "is-very-bad",
+ "is-very-evil",
+ "is-very-good",
+ "is-very-nice",
+ "is-very-sweet",
+ "isa-geek",
+ "kicks-ass",
+ "misconfused",
+ "mlbfan",
+ "myftp",
+ "mysecuritycamera",
+ "nflfan",
+ "no-ip",
+ "pimienta",
+ "podzone",
+ "poivron",
+ "potager",
+ "read-books",
+ "readmyblog",
+ "selfip",
+ "sellsyourhome",
+ "servebbs",
+ "serveftp",
+ "servegame",
+ "stuff-4-sale",
+ "sweetpepper",
+ "tunk",
+ "tuxfamily",
+ "ufcfan",
+ "us",
+ "webhop",
+ "za",
+ "zapto",
+ "c",
+ "rsc",
+ "origin",
+ "ssl",
+ "go",
+ "home",
+ "al",
+ "asso",
+ "at",
+ "au",
+ "be",
+ "bg",
+ "ca",
+ "cd",
+ "ch",
+ "cn",
+ "cy",
+ "cz",
+ "de",
+ "dk",
+ "edu",
+ "ee",
+ "es",
+ "fi",
+ "fr",
+ "gr",
+ "hr",
+ "hu",
+ "ie",
+ "il",
+ "in",
+ "int",
+ "is",
+ "it",
+ "jp",
+ "kr",
+ "lt",
+ "lu",
+ "lv",
+ "mc",
+ "me",
+ "mk",
+ "mt",
+ "my",
+ "net",
+ "ng",
+ "nl",
+ "no",
+ "nz",
+ "paris",
+ "pl",
+ "pt",
+ "q-a",
+ "ro",
+ "ru",
+ "se",
+ "si",
+ "sk",
+ "tr",
+ "uk",
+ "us",
+ "nerdpol",
+ "abo",
+ "ac",
+ "com",
+ "edu",
+ "gob",
+ "ing",
+ "med",
+ "net",
+ "nom",
+ "org",
+ "sld",
+ "blogspot",
+ "com",
+ "edu",
+ "gob",
+ "mil",
+ "net",
+ "nom",
+ "org",
+ "com",
+ "edu",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "i",
+ "mil",
+ "net",
+ "ngo",
+ "org",
+ "biz",
+ "com",
+ "edu",
+ "fam",
+ "gob",
+ "gok",
+ "gon",
+ "gop",
+ "gos",
+ "gov",
+ "info",
+ "net",
+ "org",
+ "web",
+ "agro",
+ "aid",
+ "art",
+ "atm",
+ "augustow",
+ "auto",
+ "babia-gora",
+ "bedzin",
+ "beep",
+ "beskidy",
+ "bialowieza",
+ "bialystok",
+ "bielawa",
+ "bieszczady",
+ "biz",
+ "boleslawiec",
+ "bydgoszcz",
+ "bytom",
+ "cieszyn",
+ "co",
+ "com",
+ "czeladz",
+ "czest",
+ "dlugoleka",
+ "edu",
+ "elblag",
+ "elk",
+ "gda",
+ "gdansk",
+ "gdynia",
+ "gliwice",
+ "glogow",
+ "gmina",
+ "gniezno",
+ "gorlice",
+ "gov",
+ "grajewo",
+ "gsm",
+ "ilawa",
+ "info",
+ "jaworzno",
+ "jelenia-gora",
+ "jgora",
+ "kalisz",
+ "karpacz",
+ "kartuzy",
+ "kaszuby",
+ "katowice",
+ "kazimierz-dolny",
+ "kepno",
+ "ketrzyn",
+ "klodzko",
+ "kobierzyce",
+ "kolobrzeg",
+ "konin",
+ "konskowola",
+ "krakow",
+ "kutno",
+ "lapy",
+ "lebork",
+ "legnica",
+ "lezajsk",
+ "limanowa",
+ "lomza",
+ "lowicz",
+ "lubin",
+ "lukow",
+ "mail",
+ "malbork",
+ "malopolska",
+ "mazowsze",
+ "mazury",
+ "med",
+ "media",
+ "miasta",
+ "mielec",
+ "mielno",
+ "mil",
+ "mragowo",
+ "naklo",
+ "net",
+ "nieruchomosci",
+ "nom",
+ "nowaruda",
+ "nysa",
+ "olawa",
+ "olecko",
+ "olkusz",
+ "olsztyn",
+ "opoczno",
+ "opole",
+ "org",
+ "ostroda",
+ "ostroleka",
+ "ostrowiec",
+ "ostrowwlkp",
+ "pc",
+ "pila",
+ "pisz",
+ "podhale",
+ "podlasie",
+ "polkowice",
+ "pomorskie",
+ "pomorze",
+ "powiat",
+ "poznan",
+ "priv",
+ "prochowice",
+ "pruszkow",
+ "przeworsk",
+ "pulawy",
+ "radom",
+ "rawa-maz",
+ "realestate",
+ "rel",
+ "rybnik",
+ "rzeszow",
+ "sanok",
+ "sejny",
+ "sex",
+ "shop",
+ "sklep",
+ "skoczow",
+ "slask",
+ "slupsk",
+ "sopot",
+ "sos",
+ "sosnowiec",
+ "stalowa-wola",
+ "starachowice",
+ "stargard",
+ "suwalki",
+ "swidnica",
+ "swiebodzin",
+ "swinoujscie",
+ "szczecin",
+ "szczytno",
+ "szkola",
+ "targi",
+ "tarnobrzeg",
+ "tgory",
+ "tm",
+ "tourism",
+ "travel",
+ "turek",
+ "turystyka",
+ "tychy",
+ "ustka",
+ "walbrzych",
+ "warmia",
+ "warszawa",
+ "waw",
+ "wegrow",
+ "wielun",
+ "wlocl",
+ "wloclawek",
+ "wodzislaw",
+ "wolomin",
+ "wroc",
+ "wroclaw",
+ "zachpomor",
+ "zagan",
+ "zakopane",
+ "zarow",
+ "zgora",
+ "zgorzelec",
+ "ap",
+ "griw",
+ "ic",
+ "is",
+ "kmpsp",
+ "konsulat",
+ "kppsp",
+ "kwp",
+ "kwpsp",
+ "mup",
+ "mw",
+ "oirm",
+ "oum",
+ "pa",
+ "pinb",
+ "piw",
+ "po",
+ "psp",
+ "psse",
+ "pup",
+ "rzgw",
+ "sa",
+ "sdn",
+ "sko",
+ "so",
+ "sr",
+ "starostwo",
+ "ug",
+ "ugim",
+ "um",
+ "umig",
+ "upow",
+ "uppo",
+ "us",
+ "uw",
+ "uzs",
+ "wif",
+ "wiih",
+ "winb",
+ "wios",
+ "witd",
+ "wiw",
+ "wsa",
+ "wskr",
+ "wuoz",
+ "wzmiuw",
+ "zp",
+ "co",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "ac",
+ "biz",
+ "com",
+ "edu",
+ "est",
+ "gov",
+ "info",
+ "isla",
+ "name",
+ "net",
+ "org",
+ "pro",
+ "prof",
+ "aaa",
+ "aca",
+ "acct",
+ "avocat",
+ "bar",
+ "cpa",
+ "eng",
+ "jur",
+ "law",
+ "med",
+ "recht",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "plo",
+ "sec",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "int",
+ "net",
+ "nome",
+ "org",
+ "publ",
+ "belau",
+ "co",
+ "ed",
+ "go",
+ "ne",
+ "or",
+ "com",
+ "coop",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "name",
+ "net",
+ "org",
+ "sch",
+ "asso",
+ "blogspot",
+ "com",
+ "nom",
+ "arts",
+ "blogspot",
+ "com",
+ "firm",
+ "info",
+ "nom",
+ "nt",
+ "org",
+ "rec",
+ "store",
+ "tm",
+ "www",
+ "ac",
+ "blogspot",
+ "co",
+ "edu",
+ "gov",
+ "in",
+ "org",
+ "ac",
+ "adygeya",
+ "altai",
+ "amur",
+ "amursk",
+ "arkhangelsk",
+ "astrakhan",
+ "baikal",
+ "bashkiria",
+ "belgorod",
+ "bir",
+ "blogspot",
+ "bryansk",
+ "buryatia",
+ "cbg",
+ "chel",
+ "chelyabinsk",
+ "chita",
+ "chukotka",
+ "chuvashia",
+ "cmw",
+ "com",
+ "dagestan",
+ "dudinka",
+ "e-burg",
+ "edu",
+ "fareast",
+ "gov",
+ "grozny",
+ "int",
+ "irkutsk",
+ "ivanovo",
+ "izhevsk",
+ "jamal",
+ "jar",
+ "joshkar-ola",
+ "k-uralsk",
+ "kalmykia",
+ "kaluga",
+ "kamchatka",
+ "karelia",
+ "kazan",
+ "kchr",
+ "kemerovo",
+ "khabarovsk",
+ "khakassia",
+ "khv",
+ "kirov",
+ "kms",
+ "koenig",
+ "komi",
+ "kostroma",
+ "krasnoyarsk",
+ "kuban",
+ "kurgan",
+ "kursk",
+ "kustanai",
+ "kuzbass",
+ "lipetsk",
+ "magadan",
+ "mari",
+ "mari-el",
+ "marine",
+ "mil",
+ "mordovia",
+ "msk",
+ "murmansk",
+ "mytis",
+ "nakhodka",
+ "nalchik",
+ "net",
+ "nkz",
+ "nnov",
+ "norilsk",
+ "nov",
+ "novosibirsk",
+ "nsk",
+ "omsk",
+ "orenburg",
+ "org",
+ "oryol",
+ "oskol",
+ "palana",
+ "penza",
+ "perm",
+ "pp",
+ "ptz",
+ "pyatigorsk",
+ "rnd",
+ "rubtsovsk",
+ "ryazan",
+ "sakhalin",
+ "samara",
+ "saratov",
+ "simbirsk",
+ "smolensk",
+ "snz",
+ "spb",
+ "stavropol",
+ "stv",
+ "surgut",
+ "syzran",
+ "tambov",
+ "tatarstan",
+ "test",
+ "tom",
+ "tomsk",
+ "tsaritsyn",
+ "tsk",
+ "tula",
+ "tuva",
+ "tver",
+ "tyumen",
+ "udm",
+ "udmurtia",
+ "ulan-ude",
+ "vdonsk",
+ "vladikavkaz",
+ "vladimir",
+ "vladivostok",
+ "volgograd",
+ "vologda",
+ "voronezh",
+ "vrn",
+ "vyatka",
+ "yakutia",
+ "yamal",
+ "yaroslavl",
+ "yekaterinburg",
+ "yuzhno-sakhalinsk",
+ "zgrad",
+ "ac",
+ "co",
+ "com",
+ "edu",
+ "gouv",
+ "gov",
+ "int",
+ "mil",
+ "net",
+ "com",
+ "edu",
+ "gov",
+ "med",
+ "net",
+ "org",
+ "pub",
+ "sch",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "med",
+ "net",
+ "org",
+ "tv",
+ "a",
+ "ac",
+ "b",
+ "bd",
+ "blogspot",
+ "brand",
+ "c",
+ "com",
+ "d",
+ "e",
+ "f",
+ "fh",
+ "fhsk",
+ "fhv",
+ "g",
+ "h",
+ "i",
+ "k",
+ "komforb",
+ "kommunalforbund",
+ "komvux",
+ "l",
+ "lanbib",
+ "m",
+ "n",
+ "naturbruksgymn",
+ "o",
+ "org",
+ "p",
+ "parti",
+ "pp",
+ "press",
+ "r",
+ "s",
+ "t",
+ "tm",
+ "u",
+ "w",
+ "x",
+ "y",
+ "z",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "per",
+ "com",
+ "gov",
+ "hashbang",
+ "mil",
+ "net",
+ "org",
+ "platform",
+ "blogspot",
+ "cyon",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "net",
+ "org",
+ "art",
+ "blogspot",
+ "com",
+ "edu",
+ "gouv",
+ "org",
+ "perso",
+ "univ",
+ "com",
+ "net",
+ "org",
+ "stackspace",
+ "co",
+ "com",
+ "consulado",
+ "edu",
+ "embaixada",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "principe",
+ "saotome",
+ "store",
+ "adygeya",
+ "arkhangelsk",
+ "balashov",
+ "bashkiria",
+ "bryansk",
+ "dagestan",
+ "grozny",
+ "ivanovo",
+ "kalmykia",
+ "kaluga",
+ "karelia",
+ "khakassia",
+ "krasnodar",
+ "kurgan",
+ "lenug",
+ "mordovia",
+ "msk",
+ "murmansk",
+ "nalchik",
+ "nov",
+ "obninsk",
+ "penza",
+ "pokrovsk",
+ "sochi",
+ "spb",
+ "togliatti",
+ "troitsk",
+ "tula",
+ "tuva",
+ "vladikavkaz",
+ "vladimir",
+ "vologda",
+ "com",
+ "edu",
+ "gob",
+ "org",
+ "red",
+ "gov",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "ac",
+ "co",
+ "org",
+ "blogspot",
+ "ac",
+ "co",
+ "go",
+ "in",
+ "mi",
+ "net",
+ "or",
+ "ac",
+ "biz",
+ "co",
+ "com",
+ "edu",
+ "go",
+ "gov",
+ "int",
+ "mil",
+ "name",
+ "net",
+ "nic",
+ "org",
+ "test",
+ "web",
+ "gov",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "nom",
+ "org",
+ "agrinet",
+ "com",
+ "defense",
+ "edunet",
+ "ens",
+ "fin",
+ "gov",
+ "ind",
+ "info",
+ "intl",
+ "mincom",
+ "nat",
+ "net",
+ "org",
+ "perso",
+ "rnrt",
+ "rns",
+ "rnu",
+ "tourism",
+ "turen",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "av",
+ "bbs",
+ "bel",
+ "biz",
+ "com",
+ "dr",
+ "edu",
+ "gen",
+ "gov",
+ "info",
+ "k12",
+ "kep",
+ "mil",
+ "name",
+ "nc",
+ "net",
+ "org",
+ "pol",
+ "tel",
+ "tv",
+ "web",
+ "blogspot",
+ "gov",
+ "aero",
+ "biz",
+ "co",
+ "com",
+ "coop",
+ "edu",
+ "gov",
+ "info",
+ "int",
+ "jobs",
+ "mobi",
+ "museum",
+ "name",
+ "net",
+ "org",
+ "pro",
+ "travel",
+ "better-than",
+ "dyndns",
+ "on-the-web",
+ "worse-than",
+ "blogspot",
+ "club",
+ "com",
+ "ebiz",
+ "edu",
+ "game",
+ "gov",
+ "idv",
+ "mil",
+ "net",
+ "org",
+ "xn--czrw28b",
+ "xn--uc0atv",
+ "xn--zf0ao64a",
+ "ac",
+ "co",
+ "go",
+ "hotel",
+ "info",
+ "me",
+ "mil",
+ "mobi",
+ "ne",
+ "or",
+ "sc",
+ "tv",
+ "biz",
+ "cherkassy",
+ "cherkasy",
+ "chernigov",
+ "chernihiv",
+ "chernivtsi",
+ "chernovtsy",
+ "ck",
+ "cn",
+ "co",
+ "com",
+ "cr",
+ "crimea",
+ "cv",
+ "dn",
+ "dnepropetrovsk",
+ "dnipropetrovsk",
+ "dominic",
+ "donetsk",
+ "dp",
+ "edu",
+ "gov",
+ "if",
+ "in",
+ "ivano-frankivsk",
+ "kh",
+ "kharkiv",
+ "kharkov",
+ "kherson",
+ "khmelnitskiy",
+ "khmelnytskyi",
+ "kiev",
+ "kirovograd",
+ "km",
+ "kr",
+ "krym",
+ "ks",
+ "kv",
+ "kyiv",
+ "lg",
+ "lt",
+ "lugansk",
+ "lutsk",
+ "lv",
+ "lviv",
+ "mk",
+ "mykolaiv",
+ "net",
+ "nikolaev",
+ "od",
+ "odesa",
+ "odessa",
+ "org",
+ "pl",
+ "poltava",
+ "pp",
+ "rivne",
+ "rovno",
+ "rv",
+ "sb",
+ "sebastopol",
+ "sevastopol",
+ "sm",
+ "sumy",
+ "te",
+ "ternopil",
+ "uz",
+ "uzhgorod",
+ "vinnica",
+ "vinnytsia",
+ "vn",
+ "volyn",
+ "yalta",
+ "zaporizhzhe",
+ "zaporizhzhia",
+ "zhitomir",
+ "zhytomyr",
+ "zp",
+ "zt",
+ "ac",
+ "blogspot",
+ "co",
+ "com",
+ "go",
+ "ne",
+ "or",
+ "org",
+ "sc",
+ "ac",
+ "co",
+ "gov",
+ "ltd",
+ "me",
+ "net",
+ "nhs",
+ "org",
+ "plc",
+ "police",
+ "sch",
+ "blogspot",
+ "no-ip",
+ "service",
+ "ak",
+ "al",
+ "ar",
+ "as",
+ "az",
+ "ca",
+ "co",
+ "ct",
+ "dc",
+ "de",
+ "dni",
+ "drud",
+ "fed",
+ "fl",
+ "ga",
+ "golffan",
+ "gu",
+ "hi",
+ "ia",
+ "id",
+ "il",
+ "in",
+ "is-by",
+ "isa",
+ "kids",
+ "ks",
+ "ky",
+ "la",
+ "land-4-sale",
+ "ma",
+ "md",
+ "me",
+ "mi",
+ "mn",
+ "mo",
+ "ms",
+ "mt",
+ "nc",
+ "nd",
+ "ne",
+ "nh",
+ "nj",
+ "nm",
+ "noip",
+ "nsn",
+ "nv",
+ "ny",
+ "oh",
+ "ok",
+ "or",
+ "pa",
+ "pointto",
+ "pr",
+ "ri",
+ "sc",
+ "sd",
+ "stuff-4-sale",
+ "tn",
+ "tx",
+ "ut",
+ "va",
+ "vi",
+ "vt",
+ "wa",
+ "wi",
+ "wv",
+ "wy",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "chtr",
+ "paroch",
+ "pvt",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "k12",
+ "lib",
+ "cc",
+ "cc",
+ "k12",
+ "lib",
+ "com",
+ "edu",
+ "gub",
+ "mil",
+ "net",
+ "org",
+ "blogspot",
+ "co",
+ "com",
+ "net",
+ "org",
+ "com",
+ "edu",
+ "gov",
+ "mil",
+ "net",
+ "org",
+ "arts",
+ "co",
+ "com",
+ "e12",
+ "edu",
+ "firm",
+ "gob",
+ "gov",
+ "info",
+ "int",
+ "mil",
+ "net",
+ "org",
+ "rec",
+ "store",
+ "tec",
+ "web",
+ "co",
+ "com",
+ "k12",
+ "net",
+ "org",
+ "ac",
+ "biz",
+ "blogspot",
+ "com",
+ "edu",
+ "gov",
+ "health",
+ "info",
+ "int",
+ "name",
+ "net",
+ "org",
+ "pro",
+ "com",
+ "edu",
+ "net",
+ "org",
+ "com",
+ "dyndns",
+ "edu",
+ "gov",
+ "mypets",
+ "net",
+ "org",
+ "xn--80au",
+ "xn--90azh",
+ "xn--c1avg",
+ "xn--d1at",
+ "xn--o1ac",
+ "xn--o1ach",
+ "fhapp",
+ "ac",
+ "agric",
+ "alt",
+ "co",
+ "edu",
+ "gov",
+ "grondar",
+ "law",
+ "mil",
+ "net",
+ "ngo",
+ "nis",
+ "nom",
+ "org",
+ "school",
+ "tm",
+ "web",
+ "blogspot",
+ "ac",
+ "biz",
+ "co",
+ "com",
+ "edu",
+ "gov",
+ "info",
+ "mil",
+ "net",
+ "org",
+ "sch",
+}
diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go
new file mode 100644
index 000000000..a56909c10
--- /dev/null
+++ b/vendor/golang.org/x/net/route/address.go
@@ -0,0 +1,281 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import "runtime"
+
+// An Addr represents an address associated with packet routing.
+type Addr interface {
+ // Family returns an address family.
+ Family() int
+}
+
+// A LinkAddr represents a link-layer address.
+type LinkAddr struct {
+ Index int // interface index when attached
+ Name string // interface name when attached
+ Addr []byte // link-layer address when attached
+}
+
+// Family implements the Family method of Addr interface.
+func (a *LinkAddr) Family() int { return sysAF_LINK }
+
+func parseLinkAddr(b []byte) (Addr, error) {
+ if len(b) < 8 {
+ return nil, errInvalidAddr
+ }
+ _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:])
+ if err != nil {
+ return nil, err
+ }
+ a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))
+ return a, nil
+}
+
+// parseKernelLinkAddr parses b as a link-layer address in
+// conventional BSD kernel form.
+func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {
+ // The encoding looks like the following:
+ // +----------------------------+
+ // | Type (1 octet) |
+ // +----------------------------+
+ // | Name length (1 octet) |
+ // +----------------------------+
+ // | Address length (1 octet) |
+ // +----------------------------+
+ // | Selector length (1 octet) |
+ // +----------------------------+
+ // | Data (variable) |
+ // +----------------------------+
+ //
+ // On some platforms, all-bit-one of length field means "don't
+ // care".
+ nlen, alen, slen := int(b[1]), int(b[2]), int(b[3])
+ if nlen == 0xff {
+ nlen = 0
+ }
+ if alen == 0xff {
+ alen = 0
+ }
+ if slen == 0xff {
+ slen = 0
+ }
+ l := 4 + nlen + alen + slen
+ if len(b) < l {
+ return 0, nil, errInvalidAddr
+ }
+ data := b[4:]
+ var name string
+ var addr []byte
+ if nlen > 0 {
+ name = string(data[:nlen])
+ data = data[nlen:]
+ }
+ if alen > 0 {
+ addr = data[:alen]
+ data = data[alen:]
+ }
+ return l, &LinkAddr{Name: name, Addr: addr}, nil
+}
+
+// An Inet4Addr represents an internet address for IPv4.
+type Inet4Addr struct {
+ IP [4]byte // IP address
+}
+
+// Family implements the Family method of Addr interface.
+func (a *Inet4Addr) Family() int { return sysAF_INET }
+
+// An Inet6Addr represents an internet address for IPv6.
+type Inet6Addr struct {
+ IP [16]byte // IP address
+ ZoneID int // zone identifier
+}
+
+// Family implements the Family method of Addr interface.
+func (a *Inet6Addr) Family() int { return sysAF_INET6 }
+
+// parseInetAddr parses b as an internet address for IPv4 or IPv6.
+func parseInetAddr(af int, b []byte) (Addr, error) {
+ switch af {
+ case sysAF_INET:
+ if len(b) < 16 {
+ return nil, errInvalidAddr
+ }
+ a := &Inet4Addr{}
+ copy(a.IP[:], b[4:8])
+ return a, nil
+ case sysAF_INET6:
+ if len(b) < 28 {
+ return nil, errInvalidAddr
+ }
+ a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
+ copy(a.IP[:], b[8:24])
+ if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
+ // KAME based IPv6 protocol stack usually
+ // embeds the interface index in the
+ // interface-local or link-local address as
+ // the kernel-internal form.
+ id := int(bigEndian.Uint16(a.IP[2:4]))
+ if id != 0 {
+ a.ZoneID = id
+ a.IP[2], a.IP[3] = 0, 0
+ }
+ }
+ return a, nil
+ default:
+ return nil, errInvalidAddr
+ }
+}
+
+// parseKernelInetAddr parses b as an internet address in conventional
+// BSD kernel form.
+func parseKernelInetAddr(af int, b []byte) (int, Addr, error) {
+ // The encoding looks similar to the NLRI encoding.
+ // +----------------------------+
+ // | Length (1 octet) |
+ // +----------------------------+
+ // | Address prefix (variable) |
+ // +----------------------------+
+ //
+ // The differences between the kernel form and the NLRI
+ // encoding are:
+ //
+ // - The length field of the kernel form indicates the prefix
+ // length in bytes, not in bits
+ //
+ // - In the kernel form, zero value of the length field
+ // doesn't mean 0.0.0.0/0 or ::/0
+ //
+ // - The kernel form appends leading bytes to the prefix field
+ // to make the <length, prefix> tuple to be conformed with
+ // the routing message boundary
+ l := int(b[0])
+ if runtime.GOOS == "darwin" {
+ // On Darwn, an address in the kernel form is also
+ // used as a message filler.
+ if l == 0 || len(b) > roundup(l) {
+ l = roundup(l)
+ }
+ } else {
+ l = roundup(l)
+ }
+ if len(b) < l {
+ return 0, nil, errInvalidAddr
+ }
+ // Don't reorder case expressions.
+ // The case expressions for IPv6 must come first.
+ const (
+ off4 = 4 // offset of in_addr
+ off6 = 8 // offset of in6_addr
+ )
+ switch {
+ case b[0] == 28: // size of sockaddr_in6
+ a := &Inet6Addr{}
+ copy(a.IP[:], b[off6:off6+16])
+ return int(b[0]), a, nil
+ case af == sysAF_INET6:
+ a := &Inet6Addr{}
+ if l-1 < off6 {
+ copy(a.IP[:], b[1:l])
+ } else {
+ copy(a.IP[:], b[l-off6:l])
+ }
+ return int(b[0]), a, nil
+ case b[0] == 16: // size of sockaddr_in
+ a := &Inet4Addr{}
+ copy(a.IP[:], b[off4:off4+4])
+ return int(b[0]), a, nil
+ default: // an old fashion, AF_UNSPEC or unknown means AF_INET
+ a := &Inet4Addr{}
+ if l-1 < off4 {
+ copy(a.IP[:], b[1:l])
+ } else {
+ copy(a.IP[:], b[l-off4:l])
+ }
+ return int(b[0]), a, nil
+ }
+}
+
+// A DefaultAddr represents an address of various operating
+// system-specific features.
+type DefaultAddr struct {
+ af int
+ Raw []byte // raw format of address
+}
+
+// Family implements the Family method of Addr interface.
+func (a *DefaultAddr) Family() int { return a.af }
+
+func parseDefaultAddr(b []byte) (Addr, error) {
+ if len(b) < 2 || len(b) < int(b[0]) {
+ return nil, errInvalidAddr
+ }
+ a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}
+ return a, nil
+}
+
+func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {
+ var as [sysRTAX_MAX]Addr
+ af := int(sysAF_UNSPEC)
+ for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ {
+ if attrs&(1<<i) == 0 {
+ continue
+ }
+ if i <= sysRTAX_BRD {
+ switch b[1] {
+ case sysAF_LINK:
+ a, err := parseLinkAddr(b)
+ if err != nil {
+ return nil, err
+ }
+ as[i] = a
+ l := roundup(int(b[0]))
+ if len(b) < l {
+ return nil, errMessageTooShort
+ }
+ b = b[l:]
+ case sysAF_INET, sysAF_INET6:
+ af = int(b[1])
+ a, err := parseInetAddr(af, b)
+ if err != nil {
+ return nil, err
+ }
+ as[i] = a
+ l := roundup(int(b[0]))
+ if len(b) < l {
+ return nil, errMessageTooShort
+ }
+ b = b[l:]
+ default:
+ l, a, err := fn(af, b)
+ if err != nil {
+ return nil, err
+ }
+ as[i] = a
+ ll := roundup(l)
+ if len(b) < ll {
+ b = b[l:]
+ } else {
+ b = b[ll:]
+ }
+ }
+ } else {
+ a, err := parseDefaultAddr(b)
+ if err != nil {
+ return nil, err
+ }
+ as[i] = a
+ l := roundup(int(b[0]))
+ if len(b) < l {
+ return nil, errMessageTooShort
+ }
+ b = b[l:]
+ }
+ }
+ return as[:], nil
+}
diff --git a/vendor/golang.org/x/net/route/address_darwin_test.go b/vendor/golang.org/x/net/route/address_darwin_test.go
new file mode 100644
index 000000000..b86bd3df1
--- /dev/null
+++ b/vendor/golang.org/x/net/route/address_darwin_test.go
@@ -0,0 +1,63 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import (
+ "reflect"
+ "testing"
+)
+
+type parseAddrsOnDarwinTest struct {
+ attrs uint
+ fn func(int, []byte) (int, Addr, error)
+ b []byte
+ as []Addr
+}
+
+var parseAddrsOnDarwinLittleEndianTests = []parseAddrsOnDarwinTest{
+ {
+ sysRTA_DST | sysRTA_GATEWAY | sysRTA_NETMASK,
+ parseKernelInetAddr,
+ []byte{
+ 0x10, 0x2, 0x0, 0x0, 0xc0, 0xa8, 0x56, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+
+ 0x14, 0x12, 0x4, 0x0, 0x6, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+
+ 0x7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ []Addr{
+ &Inet4Addr{IP: [4]byte{192, 168, 86, 0}},
+ &LinkAddr{Index: 4},
+ &Inet4Addr{IP: [4]byte{255, 255, 255, 255}},
+ nil,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
+ },
+}
+
+func TestParseAddrsOnDarwin(t *testing.T) {
+ tests := parseAddrsOnDarwinLittleEndianTests
+ if nativeEndian != littleEndian {
+ t.Skip("no test for non-little endian machine yet")
+ }
+
+ for i, tt := range tests {
+ as, err := parseAddrs(tt.attrs, tt.fn, tt.b)
+ if err != nil {
+ t.Error(i, err)
+ continue
+ }
+ if !reflect.DeepEqual(as, tt.as) {
+ t.Errorf("#%d: got %+v; want %+v", i, as, tt.as)
+ continue
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/route/address_test.go b/vendor/golang.org/x/net/route/address_test.go
new file mode 100644
index 000000000..2005ef7c2
--- /dev/null
+++ b/vendor/golang.org/x/net/route/address_test.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import (
+ "reflect"
+ "testing"
+)
+
+type parseAddrsTest struct {
+ attrs uint
+ fn func(int, []byte) (int, Addr, error)
+ b []byte
+ as []Addr
+}
+
+var parseAddrsLittleEndianTests = []parseAddrsTest{
+ {
+ sysRTA_DST | sysRTA_GATEWAY | sysRTA_NETMASK | sysRTA_BRD,
+ parseKernelInetAddr,
+ []byte{
+ 0x38, 0x12, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+
+ 0x38, 0x12, 0x2, 0x0, 0x6, 0x3, 0x6, 0x0,
+ 0x65, 0x6d, 0x31, 0x0, 0xc, 0x29, 0x66, 0x2c,
+ 0xdc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+
+ 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xb4,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+
+ 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xff,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+ []Addr{
+ &LinkAddr{Index: 0},
+ &LinkAddr{Index: 2, Name: "em1", Addr: []byte{0x00, 0x0c, 0x29, 0x66, 0x2c, 0xdc}},
+ &Inet4Addr{IP: [4]byte{172, 16, 220, 180}},
+ nil,
+ nil,
+ nil,
+ nil,
+ &Inet4Addr{IP: [4]byte{172, 16, 220, 255}},
+ },
+ },
+ {
+ sysRTA_NETMASK | sysRTA_IFP | sysRTA_IFA,
+ parseKernelInetAddr,
+ []byte{
+ 0x7, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0,
+
+ 0x18, 0x12, 0xa, 0x0, 0x87, 0x8, 0x0, 0x0,
+ 0x76, 0x6c, 0x61, 0x6e, 0x35, 0x36, 0x38, 0x32,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+
+ 0x10, 0x2, 0x0, 0x0, 0xa9, 0xfe, 0x0, 0x1,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+ []Addr{
+ nil,
+ nil,
+ &Inet4Addr{IP: [4]byte{255, 255, 255, 0}},
+ nil,
+ &LinkAddr{Index: 10, Name: "vlan5682"},
+ &Inet4Addr{IP: [4]byte{169, 254, 0, 1}},
+ nil,
+ nil,
+ },
+ },
+}
+
+func TestParseAddrs(t *testing.T) {
+ tests := parseAddrsLittleEndianTests
+ if nativeEndian != littleEndian {
+ t.Skip("no test for non-little endian machine yet")
+ }
+
+ for i, tt := range tests {
+ as, err := parseAddrs(tt.attrs, tt.fn, tt.b)
+ if err != nil {
+ t.Error(i, err)
+ continue
+ }
+ as = as[:8] // the list varies between operating systems
+ if !reflect.DeepEqual(as, tt.as) {
+ t.Errorf("#%d: got %+v; want %+v", i, as, tt.as)
+ continue
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/route/binary.go b/vendor/golang.org/x/net/route/binary.go
new file mode 100644
index 000000000..4c561631b
--- /dev/null
+++ b/vendor/golang.org/x/net/route/binary.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+// This file contains duplicates of encoding/binary package.
+//
+// This package is supposed to be used by the net package of standard
+// library. Therefore a package set used in the package must be the
+// same as net package.
+
+var (
+ littleEndian binaryLittleEndian
+ bigEndian binaryBigEndian
+)
+
+type binaryByteOrder interface {
+ Uint16([]byte) uint16
+ Uint32([]byte) uint32
+ PutUint16([]byte, uint16)
+ PutUint32([]byte, uint32)
+ Uint64([]byte) uint64
+}
+
+type binaryLittleEndian struct{}
+
+func (binaryLittleEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func (binaryLittleEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+}
+
+func (binaryLittleEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (binaryLittleEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}
+
+func (binaryLittleEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+type binaryBigEndian struct{}
+
+func (binaryBigEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[1]) | uint16(b[0])<<8
+}
+
+func (binaryBigEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 8)
+ b[1] = byte(v)
+}
+
+func (binaryBigEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (binaryBigEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 24)
+ b[1] = byte(v >> 16)
+ b[2] = byte(v >> 8)
+ b[3] = byte(v)
+}
+
+func (binaryBigEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go
new file mode 100644
index 000000000..f452ad14c
--- /dev/null
+++ b/vendor/golang.org/x/net/route/defs_darwin.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package route
+
+/*
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+*/
+import "C"
+
+const (
+ sysAF_UNSPEC = C.AF_UNSPEC
+ sysAF_INET = C.AF_INET
+ sysAF_ROUTE = C.AF_ROUTE
+ sysAF_LINK = C.AF_LINK
+ sysAF_INET6 = C.AF_INET6
+
+ sysNET_RT_DUMP = C.NET_RT_DUMP
+ sysNET_RT_FLAGS = C.NET_RT_FLAGS
+ sysNET_RT_IFLIST = C.NET_RT_IFLIST
+ sysNET_RT_STAT = C.NET_RT_STAT
+ sysNET_RT_TRASH = C.NET_RT_TRASH
+ sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2
+ sysNET_RT_DUMP2 = C.NET_RT_DUMP2
+ sysNET_RT_MAXID = C.NET_RT_MAXID
+)
+
+const (
+ sysCTL_MAXNAME = C.CTL_MAXNAME
+
+ sysCTL_UNSPEC = C.CTL_UNSPEC
+ sysCTL_KERN = C.CTL_KERN
+ sysCTL_VM = C.CTL_VM
+ sysCTL_VFS = C.CTL_VFS
+ sysCTL_NET = C.CTL_NET
+ sysCTL_DEBUG = C.CTL_DEBUG
+ sysCTL_HW = C.CTL_HW
+ sysCTL_MACHDEP = C.CTL_MACHDEP
+ sysCTL_USER = C.CTL_USER
+ sysCTL_MAXID = C.CTL_MAXID
+)
+
+const (
+ sysRTM_VERSION = C.RTM_VERSION
+
+ sysRTM_ADD = C.RTM_ADD
+ sysRTM_DELETE = C.RTM_DELETE
+ sysRTM_CHANGE = C.RTM_CHANGE
+ sysRTM_GET = C.RTM_GET
+ sysRTM_LOSING = C.RTM_LOSING
+ sysRTM_REDIRECT = C.RTM_REDIRECT
+ sysRTM_MISS = C.RTM_MISS
+ sysRTM_LOCK = C.RTM_LOCK
+ sysRTM_OLDADD = C.RTM_OLDADD
+ sysRTM_OLDDEL = C.RTM_OLDDEL
+ sysRTM_RESOLVE = C.RTM_RESOLVE
+ sysRTM_NEWADDR = C.RTM_NEWADDR
+ sysRTM_DELADDR = C.RTM_DELADDR
+ sysRTM_IFINFO = C.RTM_IFINFO
+ sysRTM_NEWMADDR = C.RTM_NEWMADDR
+ sysRTM_DELMADDR = C.RTM_DELMADDR
+ sysRTM_IFINFO2 = C.RTM_IFINFO2
+ sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2
+ sysRTM_GET2 = C.RTM_GET2
+
+ sysRTA_DST = C.RTA_DST
+ sysRTA_GATEWAY = C.RTA_GATEWAY
+ sysRTA_NETMASK = C.RTA_NETMASK
+ sysRTA_GENMASK = C.RTA_GENMASK
+ sysRTA_IFP = C.RTA_IFP
+ sysRTA_IFA = C.RTA_IFA
+ sysRTA_AUTHOR = C.RTA_AUTHOR
+ sysRTA_BRD = C.RTA_BRD
+
+ sysRTAX_DST = C.RTAX_DST
+ sysRTAX_GATEWAY = C.RTAX_GATEWAY
+ sysRTAX_NETMASK = C.RTAX_NETMASK
+ sysRTAX_GENMASK = C.RTAX_GENMASK
+ sysRTAX_IFP = C.RTAX_IFP
+ sysRTAX_IFA = C.RTAX_IFA
+ sysRTAX_AUTHOR = C.RTAX_AUTHOR
+ sysRTAX_BRD = C.RTAX_BRD
+ sysRTAX_MAX = C.RTAX_MAX
+)
+
+const (
+ sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr
+ sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr
+ sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr
+ sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2
+ sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2
+ sizeofIfDataDarwin15 = C.sizeof_struct_if_data
+ sizeofIfData64Darwin15 = C.sizeof_struct_if_data64
+
+ sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr
+ sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2
+ sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics
+)
diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go
new file mode 100644
index 000000000..c737751d7
--- /dev/null
+++ b/vendor/golang.org/x/net/route/defs_dragonfly.go
@@ -0,0 +1,105 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package route
+
+/*
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+*/
+import "C"
+
+const (
+ sysAF_UNSPEC = C.AF_UNSPEC
+ sysAF_INET = C.AF_INET
+ sysAF_ROUTE = C.AF_ROUTE
+ sysAF_LINK = C.AF_LINK
+ sysAF_INET6 = C.AF_INET6
+
+ sysNET_RT_DUMP = C.NET_RT_DUMP
+ sysNET_RT_FLAGS = C.NET_RT_FLAGS
+ sysNET_RT_IFLIST = C.NET_RT_IFLIST
+ sysNET_RT_MAXID = C.NET_RT_MAXID
+)
+
+const (
+ sysCTL_MAXNAME = C.CTL_MAXNAME
+
+ sysCTL_UNSPEC = C.CTL_UNSPEC
+ sysCTL_KERN = C.CTL_KERN
+ sysCTL_VM = C.CTL_VM
+ sysCTL_VFS = C.CTL_VFS
+ sysCTL_NET = C.CTL_NET
+ sysCTL_DEBUG = C.CTL_DEBUG
+ sysCTL_HW = C.CTL_HW
+ sysCTL_MACHDEP = C.CTL_MACHDEP
+ sysCTL_USER = C.CTL_USER
+ sysCTL_P1003_1B = C.CTL_P1003_1B
+ sysCTL_LWKT = C.CTL_LWKT
+ sysCTL_MAXID = C.CTL_MAXID
+)
+
+const (
+ sysRTM_VERSION = C.RTM_VERSION
+
+ sysRTM_ADD = C.RTM_ADD
+ sysRTM_DELETE = C.RTM_DELETE
+ sysRTM_CHANGE = C.RTM_CHANGE
+ sysRTM_GET = C.RTM_GET
+ sysRTM_LOSING = C.RTM_LOSING
+ sysRTM_REDIRECT = C.RTM_REDIRECT
+ sysRTM_MISS = C.RTM_MISS
+ sysRTM_LOCK = C.RTM_LOCK
+ sysRTM_OLDADD = C.RTM_OLDADD
+ sysRTM_OLDDEL = C.RTM_OLDDEL
+ sysRTM_RESOLVE = C.RTM_RESOLVE
+ sysRTM_NEWADDR = C.RTM_NEWADDR
+ sysRTM_DELADDR = C.RTM_DELADDR
+ sysRTM_IFINFO = C.RTM_IFINFO
+ sysRTM_NEWMADDR = C.RTM_NEWMADDR
+ sysRTM_DELMADDR = C.RTM_DELMADDR
+ sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE
+ sysRTM_IEEE80211 = C.RTM_IEEE80211
+
+ sysRTA_DST = C.RTA_DST
+ sysRTA_GATEWAY = C.RTA_GATEWAY
+ sysRTA_NETMASK = C.RTA_NETMASK
+ sysRTA_GENMASK = C.RTA_GENMASK
+ sysRTA_IFP = C.RTA_IFP
+ sysRTA_IFA = C.RTA_IFA
+ sysRTA_AUTHOR = C.RTA_AUTHOR
+ sysRTA_BRD = C.RTA_BRD
+ sysRTA_MPLS1 = C.RTA_MPLS1
+ sysRTA_MPLS2 = C.RTA_MPLS2
+ sysRTA_MPLS3 = C.RTA_MPLS3
+
+ sysRTAX_DST = C.RTAX_DST
+ sysRTAX_GATEWAY = C.RTAX_GATEWAY
+ sysRTAX_NETMASK = C.RTAX_NETMASK
+ sysRTAX_GENMASK = C.RTAX_GENMASK
+ sysRTAX_IFP = C.RTAX_IFP
+ sysRTAX_IFA = C.RTAX_IFA
+ sysRTAX_AUTHOR = C.RTAX_AUTHOR
+ sysRTAX_BRD = C.RTAX_BRD
+ sysRTAX_MPLS1 = C.RTAX_MPLS1
+ sysRTAX_MPLS2 = C.RTAX_MPLS2
+ sysRTAX_MPLS3 = C.RTAX_MPLS3
+ sysRTAX_MAX = C.RTAX_MAX
+)
+
+const (
+ sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr
+ sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr
+ sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr
+ sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr
+
+ sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr
+ sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics
+)
diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go
new file mode 100644
index 000000000..8f834e81d
--- /dev/null
+++ b/vendor/golang.org/x/net/route/defs_freebsd.go
@@ -0,0 +1,329 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package route
+
+/*
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+
+struct if_data_freebsd7 {
+ u_char ifi_type;
+ u_char ifi_physical;
+ u_char ifi_addrlen;
+ u_char ifi_hdrlen;
+ u_char ifi_link_state;
+ u_char ifi_spare_char1;
+ u_char ifi_spare_char2;
+ u_char ifi_datalen;
+ u_long ifi_mtu;
+ u_long ifi_metric;
+ u_long ifi_baudrate;
+ u_long ifi_ipackets;
+ u_long ifi_ierrors;
+ u_long ifi_opackets;
+ u_long ifi_oerrors;
+ u_long ifi_collisions;
+ u_long ifi_ibytes;
+ u_long ifi_obytes;
+ u_long ifi_imcasts;
+ u_long ifi_omcasts;
+ u_long ifi_iqdrops;
+ u_long ifi_noproto;
+ u_long ifi_hwassist;
+ time_t __ifi_epoch;
+ struct timeval __ifi_lastchange;
+};
+
+struct if_data_freebsd8 {
+ u_char ifi_type;
+ u_char ifi_physical;
+ u_char ifi_addrlen;
+ u_char ifi_hdrlen;
+ u_char ifi_link_state;
+ u_char ifi_spare_char1;
+ u_char ifi_spare_char2;
+ u_char ifi_datalen;
+ u_long ifi_mtu;
+ u_long ifi_metric;
+ u_long ifi_baudrate;
+ u_long ifi_ipackets;
+ u_long ifi_ierrors;
+ u_long ifi_opackets;
+ u_long ifi_oerrors;
+ u_long ifi_collisions;
+ u_long ifi_ibytes;
+ u_long ifi_obytes;
+ u_long ifi_imcasts;
+ u_long ifi_omcasts;
+ u_long ifi_iqdrops;
+ u_long ifi_noproto;
+ u_long ifi_hwassist;
+ time_t __ifi_epoch;
+ struct timeval __ifi_lastchange;
+};
+
+struct if_data_freebsd9 {
+ u_char ifi_type;
+ u_char ifi_physical;
+ u_char ifi_addrlen;
+ u_char ifi_hdrlen;
+ u_char ifi_link_state;
+ u_char ifi_spare_char1;
+ u_char ifi_spare_char2;
+ u_char ifi_datalen;
+ u_long ifi_mtu;
+ u_long ifi_metric;
+ u_long ifi_baudrate;
+ u_long ifi_ipackets;
+ u_long ifi_ierrors;
+ u_long ifi_opackets;
+ u_long ifi_oerrors;
+ u_long ifi_collisions;
+ u_long ifi_ibytes;
+ u_long ifi_obytes;
+ u_long ifi_imcasts;
+ u_long ifi_omcasts;
+ u_long ifi_iqdrops;
+ u_long ifi_noproto;
+ u_long ifi_hwassist;
+ time_t __ifi_epoch;
+ struct timeval __ifi_lastchange;
+};
+
+struct if_data_freebsd10 {
+ u_char ifi_type;
+ u_char ifi_physical;
+ u_char ifi_addrlen;
+ u_char ifi_hdrlen;
+ u_char ifi_link_state;
+ u_char ifi_vhid;
+ u_char ifi_baudrate_pf;
+ u_char ifi_datalen;
+ u_long ifi_mtu;
+ u_long ifi_metric;
+ u_long ifi_baudrate;
+ u_long ifi_ipackets;
+ u_long ifi_ierrors;
+ u_long ifi_opackets;
+ u_long ifi_oerrors;
+ u_long ifi_collisions;
+ u_long ifi_ibytes;
+ u_long ifi_obytes;
+ u_long ifi_imcasts;
+ u_long ifi_omcasts;
+ u_long ifi_iqdrops;
+ u_long ifi_noproto;
+ uint64_t ifi_hwassist;
+ time_t __ifi_epoch;
+ struct timeval __ifi_lastchange;
+};
+
+struct if_data_freebsd11 {
+ uint8_t ifi_type;
+ uint8_t ifi_physical;
+ uint8_t ifi_addrlen;
+ uint8_t ifi_hdrlen;
+ uint8_t ifi_link_state;
+ uint8_t ifi_vhid;
+ uint16_t ifi_datalen;
+ uint32_t ifi_mtu;
+ uint32_t ifi_metric;
+ uint64_t ifi_baudrate;
+ uint64_t ifi_ipackets;
+ uint64_t ifi_ierrors;
+ uint64_t ifi_opackets;
+ uint64_t ifi_oerrors;
+ uint64_t ifi_collisions;
+ uint64_t ifi_ibytes;
+ uint64_t ifi_obytes;
+ uint64_t ifi_imcasts;
+ uint64_t ifi_omcasts;
+ uint64_t ifi_iqdrops;
+ uint64_t ifi_oqdrops;
+ uint64_t ifi_noproto;
+ uint64_t ifi_hwassist;
+ union {
+ time_t tt;
+ uint64_t ph;
+ } __ifi_epoch;
+ union {
+ struct timeval tv;
+ struct {
+ uint64_t ph1;
+ uint64_t ph2;
+ } ph;
+ } __ifi_lastchange;
+};
+
+struct if_msghdr_freebsd7 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data_freebsd7 ifm_data;
+};
+
+struct if_msghdr_freebsd8 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data_freebsd8 ifm_data;
+};
+
+struct if_msghdr_freebsd9 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data_freebsd9 ifm_data;
+};
+
+struct if_msghdr_freebsd10 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data_freebsd10 ifm_data;
+};
+
+struct if_msghdr_freebsd11 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data_freebsd11 ifm_data;
+};
+*/
+import "C"
+
+const (
+ sysAF_UNSPEC = C.AF_UNSPEC
+ sysAF_INET = C.AF_INET
+ sysAF_ROUTE = C.AF_ROUTE
+ sysAF_LINK = C.AF_LINK
+ sysAF_INET6 = C.AF_INET6
+
+ sysNET_RT_DUMP = C.NET_RT_DUMP
+ sysNET_RT_FLAGS = C.NET_RT_FLAGS
+ sysNET_RT_IFLIST = C.NET_RT_IFLIST
+ sysNET_RT_IFMALIST = C.NET_RT_IFMALIST
+ sysNET_RT_IFLISTL = C.NET_RT_IFLISTL
+)
+
+const (
+ sysCTL_MAXNAME = C.CTL_MAXNAME
+
+ sysCTL_UNSPEC = C.CTL_UNSPEC
+ sysCTL_KERN = C.CTL_KERN
+ sysCTL_VM = C.CTL_VM
+ sysCTL_VFS = C.CTL_VFS
+ sysCTL_NET = C.CTL_NET
+ sysCTL_DEBUG = C.CTL_DEBUG
+ sysCTL_HW = C.CTL_HW
+ sysCTL_MACHDEP = C.CTL_MACHDEP
+ sysCTL_USER = C.CTL_USER
+ sysCTL_P1003_1B = C.CTL_P1003_1B
+)
+
+const (
+ sysRTM_VERSION = C.RTM_VERSION
+
+ sysRTM_ADD = C.RTM_ADD
+ sysRTM_DELETE = C.RTM_DELETE
+ sysRTM_CHANGE = C.RTM_CHANGE
+ sysRTM_GET = C.RTM_GET
+ sysRTM_LOSING = C.RTM_LOSING
+ sysRTM_REDIRECT = C.RTM_REDIRECT
+ sysRTM_MISS = C.RTM_MISS
+ sysRTM_LOCK = C.RTM_LOCK
+ sysRTM_RESOLVE = C.RTM_RESOLVE
+ sysRTM_NEWADDR = C.RTM_NEWADDR
+ sysRTM_DELADDR = C.RTM_DELADDR
+ sysRTM_IFINFO = C.RTM_IFINFO
+ sysRTM_NEWMADDR = C.RTM_NEWMADDR
+ sysRTM_DELMADDR = C.RTM_DELMADDR
+ sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE
+ sysRTM_IEEE80211 = C.RTM_IEEE80211
+
+ sysRTA_DST = C.RTA_DST
+ sysRTA_GATEWAY = C.RTA_GATEWAY
+ sysRTA_NETMASK = C.RTA_NETMASK
+ sysRTA_GENMASK = C.RTA_GENMASK
+ sysRTA_IFP = C.RTA_IFP
+ sysRTA_IFA = C.RTA_IFA
+ sysRTA_AUTHOR = C.RTA_AUTHOR
+ sysRTA_BRD = C.RTA_BRD
+
+ sysRTAX_DST = C.RTAX_DST
+ sysRTAX_GATEWAY = C.RTAX_GATEWAY
+ sysRTAX_NETMASK = C.RTAX_NETMASK
+ sysRTAX_GENMASK = C.RTAX_GENMASK
+ sysRTAX_IFP = C.RTAX_IFP
+ sysRTAX_IFA = C.RTAX_IFA
+ sysRTAX_AUTHOR = C.RTAX_AUTHOR
+ sysRTAX_BRD = C.RTAX_BRD
+ sysRTAX_MAX = C.RTAX_MAX
+)
+
+const (
+ sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl
+ sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr
+ sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl
+ sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr
+ sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr
+
+ sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr
+ sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics
+
+ sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7
+ sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8
+ sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9
+ sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10
+ sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11
+
+ sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7
+ sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8
+ sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9
+ sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10
+ sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11
+
+ sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl
+ sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr
+ sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl
+ sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr
+ sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr
+
+ sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr
+ sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics
+
+ sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7
+ sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8
+ sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9
+ sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10
+ sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11
+
+ sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7
+ sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8
+ sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9
+ sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10
+ sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11
+)
diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go
new file mode 100644
index 000000000..b18d85e01
--- /dev/null
+++ b/vendor/golang.org/x/net/route/defs_netbsd.go
@@ -0,0 +1,104 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package route
+
+/*
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+*/
+import "C"
+
+const (
+ sysAF_UNSPEC = C.AF_UNSPEC
+ sysAF_INET = C.AF_INET
+ sysAF_ROUTE = C.AF_ROUTE
+ sysAF_LINK = C.AF_LINK
+ sysAF_INET6 = C.AF_INET6
+
+ sysNET_RT_DUMP = C.NET_RT_DUMP
+ sysNET_RT_FLAGS = C.NET_RT_FLAGS
+ sysNET_RT_IFLIST = C.NET_RT_IFLIST
+ sysNET_RT_MAXID = C.NET_RT_MAXID
+)
+
+const (
+ sysCTL_MAXNAME = C.CTL_MAXNAME
+
+ sysCTL_UNSPEC = C.CTL_UNSPEC
+ sysCTL_KERN = C.CTL_KERN
+ sysCTL_VM = C.CTL_VM
+ sysCTL_VFS = C.CTL_VFS
+ sysCTL_NET = C.CTL_NET
+ sysCTL_DEBUG = C.CTL_DEBUG
+ sysCTL_HW = C.CTL_HW
+ sysCTL_MACHDEP = C.CTL_MACHDEP
+ sysCTL_USER = C.CTL_USER
+ sysCTL_DDB = C.CTL_DDB
+ sysCTL_PROC = C.CTL_PROC
+ sysCTL_VENDOR = C.CTL_VENDOR
+ sysCTL_EMUL = C.CTL_EMUL
+ sysCTL_SECURITY = C.CTL_SECURITY
+ sysCTL_MAXID = C.CTL_MAXID
+)
+
+const (
+ sysRTM_VERSION = C.RTM_VERSION
+
+ sysRTM_ADD = C.RTM_ADD
+ sysRTM_DELETE = C.RTM_DELETE
+ sysRTM_CHANGE = C.RTM_CHANGE
+ sysRTM_GET = C.RTM_GET
+ sysRTM_LOSING = C.RTM_LOSING
+ sysRTM_REDIRECT = C.RTM_REDIRECT
+ sysRTM_MISS = C.RTM_MISS
+ sysRTM_LOCK = C.RTM_LOCK
+ sysRTM_OLDADD = C.RTM_OLDADD
+ sysRTM_OLDDEL = C.RTM_OLDDEL
+ sysRTM_RESOLVE = C.RTM_RESOLVE
+ sysRTM_NEWADDR = C.RTM_NEWADDR
+ sysRTM_DELADDR = C.RTM_DELADDR
+ sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE
+ sysRTM_IEEE80211 = C.RTM_IEEE80211
+ sysRTM_SETGATE = C.RTM_SETGATE
+ sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD
+ sysRTM_IFINFO = C.RTM_IFINFO
+ sysRTM_CHGADDR = C.RTM_CHGADDR
+
+ sysRTA_DST = C.RTA_DST
+ sysRTA_GATEWAY = C.RTA_GATEWAY
+ sysRTA_NETMASK = C.RTA_NETMASK
+ sysRTA_GENMASK = C.RTA_GENMASK
+ sysRTA_IFP = C.RTA_IFP
+ sysRTA_IFA = C.RTA_IFA
+ sysRTA_AUTHOR = C.RTA_AUTHOR
+ sysRTA_BRD = C.RTA_BRD
+ sysRTA_TAG = C.RTA_TAG
+
+ sysRTAX_DST = C.RTAX_DST
+ sysRTAX_GATEWAY = C.RTAX_GATEWAY
+ sysRTAX_NETMASK = C.RTAX_NETMASK
+ sysRTAX_GENMASK = C.RTAX_GENMASK
+ sysRTAX_IFP = C.RTAX_IFP
+ sysRTAX_IFA = C.RTAX_IFA
+ sysRTAX_AUTHOR = C.RTAX_AUTHOR
+ sysRTAX_BRD = C.RTAX_BRD
+ sysRTAX_TAG = C.RTAX_TAG
+ sysRTAX_MAX = C.RTAX_MAX
+)
+
+const (
+ sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr
+ sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr
+ sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr
+
+ sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr
+ sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics
+)
diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go
new file mode 100644
index 000000000..5df7a43bc
--- /dev/null
+++ b/vendor/golang.org/x/net/route/defs_openbsd.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package route
+
+/*
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+*/
+import "C"
+
+const (
+ sysAF_UNSPEC = C.AF_UNSPEC
+ sysAF_INET = C.AF_INET
+ sysAF_ROUTE = C.AF_ROUTE
+ sysAF_LINK = C.AF_LINK
+ sysAF_INET6 = C.AF_INET6
+
+ sysNET_RT_DUMP = C.NET_RT_DUMP
+ sysNET_RT_FLAGS = C.NET_RT_FLAGS
+ sysNET_RT_IFLIST = C.NET_RT_IFLIST
+ sysNET_RT_STATS = C.NET_RT_STATS
+ sysNET_RT_TABLE = C.NET_RT_TABLE
+ sysNET_RT_IFNAMES = C.NET_RT_IFNAMES
+ sysNET_RT_MAXID = C.NET_RT_MAXID
+)
+
+const (
+ sysCTL_MAXNAME = C.CTL_MAXNAME
+
+ sysCTL_UNSPEC = C.CTL_UNSPEC
+ sysCTL_KERN = C.CTL_KERN
+ sysCTL_VM = C.CTL_VM
+ sysCTL_FS = C.CTL_FS
+ sysCTL_NET = C.CTL_NET
+ sysCTL_DEBUG = C.CTL_DEBUG
+ sysCTL_HW = C.CTL_HW
+ sysCTL_MACHDEP = C.CTL_MACHDEP
+ sysCTL_DDB = C.CTL_DDB
+ sysCTL_VFS = C.CTL_VFS
+ sysCTL_MAXID = C.CTL_MAXID
+)
+
+const (
+ sysRTM_VERSION = C.RTM_VERSION
+
+ sysRTM_ADD = C.RTM_ADD
+ sysRTM_DELETE = C.RTM_DELETE
+ sysRTM_CHANGE = C.RTM_CHANGE
+ sysRTM_GET = C.RTM_GET
+ sysRTM_LOSING = C.RTM_LOSING
+ sysRTM_REDIRECT = C.RTM_REDIRECT
+ sysRTM_MISS = C.RTM_MISS
+ sysRTM_LOCK = C.RTM_LOCK
+ sysRTM_RESOLVE = C.RTM_RESOLVE
+ sysRTM_NEWADDR = C.RTM_NEWADDR
+ sysRTM_DELADDR = C.RTM_DELADDR
+ sysRTM_IFINFO = C.RTM_IFINFO
+ sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE
+ sysRTM_DESYNC = C.RTM_DESYNC
+
+ sysRTA_DST = C.RTA_DST
+ sysRTA_GATEWAY = C.RTA_GATEWAY
+ sysRTA_NETMASK = C.RTA_NETMASK
+ sysRTA_GENMASK = C.RTA_GENMASK
+ sysRTA_IFP = C.RTA_IFP
+ sysRTA_IFA = C.RTA_IFA
+ sysRTA_AUTHOR = C.RTA_AUTHOR
+ sysRTA_BRD = C.RTA_BRD
+ sysRTA_SRC = C.RTA_SRC
+ sysRTA_SRCMASK = C.RTA_SRCMASK
+ sysRTA_LABEL = C.RTA_LABEL
+
+ sysRTAX_DST = C.RTAX_DST
+ sysRTAX_GATEWAY = C.RTAX_GATEWAY
+ sysRTAX_NETMASK = C.RTAX_NETMASK
+ sysRTAX_GENMASK = C.RTAX_GENMASK
+ sysRTAX_IFP = C.RTAX_IFP
+ sysRTAX_IFA = C.RTAX_IFA
+ sysRTAX_AUTHOR = C.RTAX_AUTHOR
+ sysRTAX_BRD = C.RTAX_BRD
+ sysRTAX_SRC = C.RTAX_SRC
+ sysRTAX_SRCMASK = C.RTAX_SRCMASK
+ sysRTAX_LABEL = C.RTAX_LABEL
+ sysRTAX_MAX = C.RTAX_MAX
+)
diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go
new file mode 100644
index 000000000..854906d9c
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface.go
@@ -0,0 +1,64 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+// An InterfaceMessage represents an interface message.
+type InterfaceMessage struct {
+ Version int // message version
+ Type int // message type
+ Flags int // interface flags
+ Index int // interface index
+ Name string // interface name
+ Addrs []Addr // addresses
+
+ extOff int // offset of header extension
+ raw []byte // raw message
+}
+
+// An InterfaceAddrMessage represents an interface address message.
+type InterfaceAddrMessage struct {
+ Version int // message version
+ Type int // message type
+ Flags int // interface flags
+ Index int // interface index
+ Addrs []Addr // addresses
+
+ raw []byte // raw message
+}
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceAddrMessage) Sys() []Sys { return nil }
+
+// An InterfaceMulticastAddrMessage represents an interface multicast
+// address message.
+type InterfaceMulticastAddrMessage struct {
+ Version int // message version
+ Type int // messsage type
+ Flags int // interface flags
+ Index int // interface index
+ Addrs []Addr // addresses
+
+ raw []byte // raw message
+}
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil }
+
+// An InterfaceAnnounceMessage represents an interface announcement
+// message.
+type InterfaceAnnounceMessage struct {
+ Version int // message version
+ Type int // message type
+ Index int // interface index
+ Name string // interface name
+ What int // what type of announcement
+
+ raw []byte // raw message
+}
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil }
diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go
new file mode 100644
index 000000000..520d657b5
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface_announce.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd netbsd
+
+package route
+
+func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < w.bodyOff {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceAnnounceMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Index: int(nativeEndian.Uint16(b[4:6])),
+ What: int(nativeEndian.Uint16(b[22:24])),
+ raw: b[:l],
+ }
+ for i := 0; i < 16; i++ {
+ if b[6+i] != 0 {
+ continue
+ }
+ m.Name = string(b[6 : 6+i])
+ break
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go
new file mode 100644
index 000000000..ac4e7a680
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface_classic.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly netbsd
+
+package route
+
+import "runtime"
+
+func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < w.bodyOff {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ attrs := uint(nativeEndian.Uint32(b[4:8]))
+ if attrs&sysRTA_IFP == 0 {
+ return nil, nil
+ }
+ m := &InterfaceMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Addrs: make([]Addr, sysRTAX_MAX),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ Index: int(nativeEndian.Uint16(b[12:14])),
+ extOff: w.extOff,
+ raw: b[:l],
+ }
+ a, err := parseLinkAddr(b[w.bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ m.Addrs[sysRTAX_IFP] = a
+ m.Name = a.(*LinkAddr).Name
+ return m, nil
+}
+
+func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < w.bodyOff {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceAddrMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ raw: b[:l],
+ }
+ if runtime.GOOS == "netbsd" {
+ m.Index = int(nativeEndian.Uint16(b[16:18]))
+ } else {
+ m.Index = int(nativeEndian.Uint16(b[12:14]))
+ }
+ var err error
+ m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go
new file mode 100644
index 000000000..9f6f50c00
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface_freebsd.go
@@ -0,0 +1,78 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) {
+ var extOff, bodyOff int
+ if typ == sysNET_RT_IFLISTL {
+ if len(b) < 20 {
+ return nil, errMessageTooShort
+ }
+ extOff = int(nativeEndian.Uint16(b[18:20]))
+ bodyOff = int(nativeEndian.Uint16(b[16:18]))
+ } else {
+ extOff = w.extOff
+ bodyOff = w.bodyOff
+ }
+ if len(b) < extOff || len(b) < bodyOff {
+ return nil, errInvalidMessage
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ attrs := uint(nativeEndian.Uint32(b[4:8]))
+ if attrs&sysRTA_IFP == 0 {
+ return nil, nil
+ }
+ m := &InterfaceMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ Index: int(nativeEndian.Uint16(b[12:14])),
+ Addrs: make([]Addr, sysRTAX_MAX),
+ extOff: extOff,
+ raw: b[:l],
+ }
+ a, err := parseLinkAddr(b[bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ m.Addrs[sysRTAX_IFP] = a
+ m.Name = a.(*LinkAddr).Name
+ return m, nil
+}
+
+func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) {
+ var bodyOff int
+ if typ == sysNET_RT_IFLISTL {
+ if len(b) < 24 {
+ return nil, errMessageTooShort
+ }
+ bodyOff = int(nativeEndian.Uint16(b[16:18]))
+ } else {
+ bodyOff = w.bodyOff
+ }
+ if len(b) < bodyOff {
+ return nil, errInvalidMessage
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceAddrMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ Index: int(nativeEndian.Uint16(b[12:14])),
+ raw: b[:l],
+ }
+ var err error
+ m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go
new file mode 100644
index 000000000..1e99a9cc6
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface_multicast.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd
+
+package route
+
+func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < w.bodyOff {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceMulticastAddrMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ Index: int(nativeEndian.Uint16(b[12:14])),
+ raw: b[:l],
+ }
+ var err error
+ m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go
new file mode 100644
index 000000000..e4a143c1c
--- /dev/null
+++ b/vendor/golang.org/x/net/route/interface_openbsd.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < 32 {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ attrs := uint(nativeEndian.Uint32(b[12:16]))
+ if attrs&sysRTA_IFP == 0 {
+ return nil, nil
+ }
+ m := &InterfaceMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[16:20])),
+ Index: int(nativeEndian.Uint16(b[6:8])),
+ Addrs: make([]Addr, sysRTAX_MAX),
+ raw: b[:l],
+ }
+ ll := int(nativeEndian.Uint16(b[4:6]))
+ if len(b) < ll {
+ return nil, errInvalidMessage
+ }
+ a, err := parseLinkAddr(b[ll:])
+ if err != nil {
+ return nil, err
+ }
+ m.Addrs[sysRTAX_IFP] = a
+ m.Name = a.(*LinkAddr).Name
+ return m, nil
+}
+
+func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < 24 {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ bodyOff := int(nativeEndian.Uint16(b[4:6]))
+ if len(b) < bodyOff {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceAddrMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[12:16])),
+ Index: int(nativeEndian.Uint16(b[6:8])),
+ raw: b[:l],
+ }
+ var err error
+ m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < 26 {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &InterfaceAnnounceMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Index: int(nativeEndian.Uint16(b[6:8])),
+ What: int(nativeEndian.Uint16(b[8:10])),
+ raw: b[:l],
+ }
+ for i := 0; i < 16; i++ {
+ if b[10+i] != 0 {
+ continue
+ }
+ m.Name = string(b[10 : 10+i])
+ break
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go
new file mode 100644
index 000000000..d7ae0eb50
--- /dev/null
+++ b/vendor/golang.org/x/net/route/message.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+// A Message represents a routing message.
+//
+// Note: This interface will be changed to support Marshal method in
+// future version.
+type Message interface {
+ // Sys returns operating system-specific information.
+ Sys() []Sys
+}
+
+// A Sys reprensents operating system-specific information.
+type Sys interface {
+ // SysType returns a type of operating system-specific
+ // information.
+ SysType() SysType
+}
+
+// A SysType represents a type of operating system-specific
+// information.
+type SysType int
+
+const (
+ SysMetrics SysType = iota
+ SysStats
+)
+
+// ParseRIB parses b as a routing information base and returns a list
+// of routing messages.
+func ParseRIB(typ RIBType, b []byte) ([]Message, error) {
+ if !typ.parseable() {
+ return nil, errUnsupportedMessage
+ }
+ var msgs []Message
+ nmsgs, nskips := 0, 0
+ for len(b) > 4 {
+ nmsgs++
+ l := int(nativeEndian.Uint16(b[:2]))
+ if l == 0 {
+ return nil, errInvalidMessage
+ }
+ if len(b) < l {
+ return nil, errMessageTooShort
+ }
+ if b[2] != sysRTM_VERSION {
+ b = b[l:]
+ continue
+ }
+ mtyp := int(b[3])
+ if fn, ok := parseFns[mtyp]; !ok {
+ nskips++
+ } else {
+ m, err := fn(typ, b)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ nskips++
+ } else {
+ msgs = append(msgs, m)
+ }
+ }
+ b = b[l:]
+ }
+ // We failed to parse any of the messages - version mismatch?
+ if nmsgs != len(msgs)+nskips {
+ return nil, errMessageMismatch
+ }
+ return msgs, nil
+}
diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go
new file mode 100644
index 000000000..3fdd12df5
--- /dev/null
+++ b/vendor/golang.org/x/net/route/message_darwin_test.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import "testing"
+
+func TestFetchAndParseRIBOnDarwin(t *testing.T) {
+ for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {
+ for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} {
+ ms, err := fetchAndParseRIB(af, typ)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ ss, err := msgs(ms).validate()
+ if err != nil {
+ t.Errorf("%v %d %v", addrFamily(af), typ, err)
+ continue
+ }
+ for _, s := range ss {
+ t.Log(s)
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go
new file mode 100644
index 000000000..785c273f6
--- /dev/null
+++ b/vendor/golang.org/x/net/route/message_freebsd_test.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import (
+ "testing"
+ "time"
+ "unsafe"
+)
+
+func TestFetchAndParseRIBOnFreeBSD(t *testing.T) {
+ for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {
+ for _, typ := range []RIBType{sysNET_RT_IFMALIST} {
+ ms, err := fetchAndParseRIB(af, typ)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ ss, err := msgs(ms).validate()
+ if err != nil {
+ t.Errorf("%v %d %v", addrFamily(af), typ, err)
+ continue
+ }
+ for _, s := range ss {
+ t.Log(s)
+ }
+ }
+ }
+}
+
+func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) {
+ if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil {
+ t.Skip("NET_RT_IFLISTL not supported")
+ }
+ var p uintptr
+ if kernelAlign != int(unsafe.Sizeof(p)) {
+ t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64")
+ }
+
+ var tests = [2]struct {
+ typ RIBType
+ b []byte
+ msgs []Message
+ ss []string
+ }{
+ {typ: sysNET_RT_IFLIST},
+ {typ: sysNET_RT_IFLISTL},
+ }
+ for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {
+ var lastErr error
+ for i := 0; i < 3; i++ {
+ for j := range tests {
+ var err error
+ if tests[j].b, err = FetchRIB(af, tests[j].typ, 0); err != nil {
+ lastErr = err
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+ if lastErr == nil {
+ break
+ }
+ }
+ if lastErr != nil {
+ t.Error(af, lastErr)
+ continue
+ }
+ for i := range tests {
+ var err error
+ if tests[i].msgs, err = ParseRIB(tests[i].typ, tests[i].b); err != nil {
+ lastErr = err
+ t.Error(af, err)
+ }
+ }
+ if lastErr != nil {
+ continue
+ }
+ for i := range tests {
+ var err error
+ tests[i].ss, err = msgs(tests[i].msgs).validate()
+ if err != nil {
+ lastErr = err
+ t.Error(af, err)
+ }
+ for _, s := range tests[i].ss {
+ t.Log(s)
+ }
+ }
+ if lastErr != nil {
+ continue
+ }
+ for i := len(tests) - 1; i > 0; i-- {
+ if len(tests[i].ss) != len(tests[i-1].ss) {
+ t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss)
+ continue
+ }
+ for j, s1 := range tests[i].ss {
+ s0 := tests[i-1].ss[j]
+ if s1 != s0 {
+ t.Errorf("got %s; want %s", s1, s0)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go
new file mode 100644
index 000000000..c0c7c57a9
--- /dev/null
+++ b/vendor/golang.org/x/net/route/message_test.go
@@ -0,0 +1,118 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import (
+ "os"
+ "syscall"
+ "testing"
+ "time"
+)
+
+func TestFetchAndParseRIB(t *testing.T) {
+ for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {
+ for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} {
+ ms, err := fetchAndParseRIB(af, typ)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ ss, err := msgs(ms).validate()
+ if err != nil {
+ t.Errorf("%v %d %v", addrFamily(af), typ, err)
+ continue
+ }
+ for _, s := range ss {
+ t.Log(s)
+ }
+ }
+ }
+}
+
+func TestMonitorAndParseRIB(t *testing.T) {
+ if testing.Short() || os.Getuid() != 0 {
+ t.Skip("must be root")
+ }
+
+ // We suppose that using an IPv4 link-local address and the
+ // dot1Q ID for Token Ring and FDDI doesn't harm anyone.
+ pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"}
+ if err := pv.configure(1002); err != nil {
+ t.Skip(err)
+ }
+ if err := pv.setup(); err != nil {
+ t.Skip(err)
+ }
+ pv.teardown()
+
+ s, err := syscall.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer syscall.Close(s)
+
+ go func() {
+ b := make([]byte, os.Getpagesize())
+ for {
+ n, err := syscall.Read(s, b)
+ if err != nil {
+ return
+ }
+ ms, err := ParseRIB(0, b[:n])
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ ss, err := msgs(ms).validate()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ for _, s := range ss {
+ t.Log(s)
+ }
+ }
+ }()
+
+ for _, vid := range []int{1002, 1003, 1004, 1005} {
+ pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"}
+ if err := pv.configure(vid); err != nil {
+ t.Fatal(err)
+ }
+ if err := pv.setup(); err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(200 * time.Millisecond)
+ if err := pv.teardown(); err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(200 * time.Millisecond)
+ }
+}
+
+func TestParseRIBWithFuzz(t *testing.T) {
+ for _, fuzz := range []string{
+ "0\x00\x05\x050000000000000000" +
+ "00000000000000000000" +
+ "00000000000000000000" +
+ "00000000000000000000" +
+ "0000000000000\x02000000" +
+ "00000000",
+ "\x02\x00\x05\f0000000000000000" +
+ "0\x0200000000000000",
+ "\x02\x00\x05\x100000000000000\x1200" +
+ "0\x00\xff\x00",
+ "\x02\x00\x05\f0000000000000000" +
+ "0\x12000\x00\x02\x0000",
+ "\x00\x00\x00\x01\x00",
+ "00000",
+ } {
+ for typ := RIBType(0); typ < 256; typ++ {
+ ParseRIB(typ, []byte(fuzz))
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go
new file mode 100644
index 000000000..c986e29eb
--- /dev/null
+++ b/vendor/golang.org/x/net/route/route.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+// Package route provides basic functions for the manipulation of
+// packet routing facilities on BSD variants.
+//
+// The package supports any version of Darwin, any version of
+// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and
+// OpenBSD 5.6 and above.
+package route
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+var (
+ errUnsupportedMessage = errors.New("unsupported message")
+ errMessageMismatch = errors.New("message mismatch")
+ errMessageTooShort = errors.New("message too short")
+ errInvalidMessage = errors.New("invalid message")
+ errInvalidAddr = errors.New("invalid address")
+)
+
+// A RouteMessage represents a message conveying an address prefix, a
+// nexthop address and an output interface.
+type RouteMessage struct {
+ Version int // message version
+ Type int // message type
+ Flags int // route flags
+ Index int // interface index when atatched
+ Addrs []Addr // addresses
+
+ extOff int // offset of header extension
+ raw []byte // raw message
+}
+
+// A RIBType reprensents a type of routing information base.
+type RIBType int
+
+const (
+ RIBTypeRoute RIBType = syscall.NET_RT_DUMP
+ RIBTypeInterface RIBType = syscall.NET_RT_IFLIST
+)
+
+// FetchRIB fetches a routing information base from the operating
+// system.
+//
+// The provided af must be an address family.
+//
+// The provided arg must be a RIBType-specific argument.
+// When RIBType is related to routes, arg might be a set of route
+// flags. When RIBType is related to network interfaces, arg might be
+// an interface index or a set of interface flags. In most cases, zero
+// means a wildcard.
+func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) {
+ mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)}
+ n := uintptr(0)
+ if err := sysctl(mib[:], nil, &n, nil, 0); err != nil {
+ return nil, os.NewSyscallError("sysctl", err)
+ }
+ if n == 0 {
+ return nil, nil
+ }
+ b := make([]byte, n)
+ if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil {
+ return nil, os.NewSyscallError("sysctl", err)
+ }
+ return b[:n], nil
+}
diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go
new file mode 100644
index 000000000..d333c6aa5
--- /dev/null
+++ b/vendor/golang.org/x/net/route/route_classic.go
@@ -0,0 +1,31 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd
+
+package route
+
+func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) {
+ if len(b) < w.bodyOff {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &RouteMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[8:12])),
+ Index: int(nativeEndian.Uint16(b[4:6])),
+ extOff: w.extOff,
+ raw: b[:l],
+ }
+ var err error
+ m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:])
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go
new file mode 100644
index 000000000..76eae40d8
--- /dev/null
+++ b/vendor/golang.org/x/net/route/route_openbsd.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) {
+ if len(b) < 40 {
+ return nil, errMessageTooShort
+ }
+ l := int(nativeEndian.Uint16(b[:2]))
+ if len(b) < l {
+ return nil, errInvalidMessage
+ }
+ m := &RouteMessage{
+ Version: int(b[2]),
+ Type: int(b[3]),
+ Flags: int(nativeEndian.Uint32(b[16:20])),
+ Index: int(nativeEndian.Uint16(b[6:8])),
+ raw: b[:l],
+ }
+ ll := int(nativeEndian.Uint16(b[4:6]))
+ if len(b) < ll {
+ return nil, errInvalidMessage
+ }
+ as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:])
+ if err != nil {
+ return nil, err
+ }
+ m.Addrs = as
+ return m, nil
+}
diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go
new file mode 100644
index 000000000..63fd8c561
--- /dev/null
+++ b/vendor/golang.org/x/net/route/route_test.go
@@ -0,0 +1,386 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import (
+ "fmt"
+ "os/exec"
+ "runtime"
+ "time"
+)
+
+func (m *RouteMessage) String() string {
+ return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16])))
+}
+
+func (m *InterfaceMessage) String() string {
+ var attrs addrAttrs
+ if runtime.GOOS == "openbsd" {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))
+ } else {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))
+ }
+ return fmt.Sprintf("%s", attrs)
+}
+
+func (m *InterfaceAddrMessage) String() string {
+ var attrs addrAttrs
+ if runtime.GOOS == "openbsd" {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))
+ } else {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))
+ }
+ return fmt.Sprintf("%s", attrs)
+}
+
+func (m *InterfaceMulticastAddrMessage) String() string {
+ return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8])))
+}
+
+func (m *InterfaceAnnounceMessage) String() string {
+ what := "<nil>"
+ switch m.What {
+ case 0:
+ what = "arrival"
+ case 1:
+ what = "departure"
+ }
+ return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what)
+}
+
+func (m *InterfaceMetrics) String() string {
+ return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU)
+}
+
+func (m *RouteMetrics) String() string {
+ return fmt.Sprintf("(pmtu=%d)", m.PathMTU)
+}
+
+type addrAttrs uint
+
+var addrAttrNames = [...]string{
+ "dst",
+ "gateway",
+ "netmask",
+ "genmask",
+ "ifp",
+ "ifa",
+ "author",
+ "brd",
+ "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd
+ "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd
+ "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd
+}
+
+func (attrs addrAttrs) String() string {
+ var s string
+ for i, name := range addrAttrNames {
+ if attrs&(1<<uint(i)) != 0 {
+ if s != "" {
+ s += "|"
+ }
+ s += name
+ }
+ }
+ if s == "" {
+ return "<nil>"
+ }
+ return s
+}
+
+type msgs []Message
+
+func (ms msgs) validate() ([]string, error) {
+ var ss []string
+ for _, m := range ms {
+ switch m := m.(type) {
+ case *RouteMessage:
+ if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil {
+ return nil, err
+ }
+ sys := m.Sys()
+ if sys == nil {
+ return nil, fmt.Errorf("no sys for %s", m.String())
+ }
+ ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String())
+ case *InterfaceMessage:
+ var attrs addrAttrs
+ if runtime.GOOS == "openbsd" {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))
+ } else {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))
+ }
+ if err := addrs(m.Addrs).match(attrs); err != nil {
+ return nil, err
+ }
+ sys := m.Sys()
+ if sys == nil {
+ return nil, fmt.Errorf("no sys for %s", m.String())
+ }
+ ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String())
+ case *InterfaceAddrMessage:
+ var attrs addrAttrs
+ if runtime.GOOS == "openbsd" {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))
+ } else {
+ attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))
+ }
+ if err := addrs(m.Addrs).match(attrs); err != nil {
+ return nil, err
+ }
+ ss = append(ss, m.String()+" "+addrs(m.Addrs).String())
+ case *InterfaceMulticastAddrMessage:
+ if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil {
+ return nil, err
+ }
+ ss = append(ss, m.String()+" "+addrs(m.Addrs).String())
+ case *InterfaceAnnounceMessage:
+ ss = append(ss, m.String())
+ default:
+ ss = append(ss, fmt.Sprintf("%+v", m))
+ }
+ }
+ return ss, nil
+}
+
+type syss []Sys
+
+func (sys syss) String() string {
+ var s string
+ for _, sy := range sys {
+ switch sy := sy.(type) {
+ case *InterfaceMetrics:
+ if len(s) > 0 {
+ s += " "
+ }
+ s += sy.String()
+ case *RouteMetrics:
+ if len(s) > 0 {
+ s += " "
+ }
+ s += sy.String()
+ }
+ }
+ return s
+}
+
+type addrFamily int
+
+func (af addrFamily) String() string {
+ switch af {
+ case sysAF_UNSPEC:
+ return "unspec"
+ case sysAF_LINK:
+ return "link"
+ case sysAF_INET:
+ return "inet4"
+ case sysAF_INET6:
+ return "inet6"
+ default:
+ return fmt.Sprintf("%d", af)
+ }
+}
+
+const hexDigit = "0123456789abcdef"
+
+type llAddr []byte
+
+func (a llAddr) String() string {
+ if len(a) == 0 {
+ return ""
+ }
+ buf := make([]byte, 0, len(a)*3-1)
+ for i, b := range a {
+ if i > 0 {
+ buf = append(buf, ':')
+ }
+ buf = append(buf, hexDigit[b>>4])
+ buf = append(buf, hexDigit[b&0xF])
+ }
+ return string(buf)
+}
+
+type ipAddr []byte
+
+func (a ipAddr) String() string {
+ if len(a) == 0 {
+ return "<nil>"
+ }
+ if len(a) == 4 {
+ return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3])
+ }
+ if len(a) == 16 {
+ return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15])
+ }
+ s := make([]byte, len(a)*2)
+ for i, tn := range a {
+ s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf]
+ }
+ return string(s)
+}
+
+func (a *LinkAddr) String() string {
+ name := a.Name
+ if name == "" {
+ name = "<nil>"
+ }
+ lla := llAddr(a.Addr).String()
+ if lla == "" {
+ lla = "<nil>"
+ }
+ return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla)
+}
+
+func (a *Inet4Addr) String() string {
+ return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:]))
+}
+
+func (a *Inet6Addr) String() string {
+ return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID)
+}
+
+func (a *DefaultAddr) String() string {
+ return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String())
+}
+
+type addrs []Addr
+
+func (as addrs) String() string {
+ var s string
+ for _, a := range as {
+ if a == nil {
+ continue
+ }
+ if len(s) > 0 {
+ s += " "
+ }
+ switch a := a.(type) {
+ case *LinkAddr:
+ s += a.String()
+ case *Inet4Addr:
+ s += a.String()
+ case *Inet6Addr:
+ s += a.String()
+ case *DefaultAddr:
+ s += a.String()
+ }
+ }
+ if s == "" {
+ return "<nil>"
+ }
+ return s
+}
+
+func (as addrs) match(attrs addrAttrs) error {
+ var ts addrAttrs
+ af := sysAF_UNSPEC
+ for i := range as {
+ if as[i] != nil {
+ ts |= 1 << uint(i)
+ }
+ switch as[i].(type) {
+ case *Inet4Addr:
+ if af == sysAF_UNSPEC {
+ af = sysAF_INET
+ }
+ if af != sysAF_INET {
+ return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af))
+ }
+ case *Inet6Addr:
+ if af == sysAF_UNSPEC {
+ af = sysAF_INET6
+ }
+ if af != sysAF_INET6 {
+ return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af))
+ }
+ }
+ }
+ if ts != attrs && ts > attrs {
+ return fmt.Errorf("%v not included in %v", ts, attrs)
+ }
+ return nil
+}
+
+func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) {
+ var err error
+ var b []byte
+ for i := 0; i < 3; i++ {
+ if b, err = FetchRIB(af, typ, 0); err != nil {
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err)
+ }
+ ms, err := ParseRIB(typ, b)
+ if err != nil {
+ return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err)
+ }
+ return ms, nil
+}
+
+// propVirtual is a proprietary virtual network interface.
+type propVirtual struct {
+ name string
+ addr, mask string
+ setupCmds []*exec.Cmd
+ teardownCmds []*exec.Cmd
+}
+
+func (pv *propVirtual) setup() error {
+ for _, cmd := range pv.setupCmds {
+ if err := cmd.Run(); err != nil {
+ pv.teardown()
+ return err
+ }
+ }
+ return nil
+}
+
+func (pv *propVirtual) teardown() error {
+ for _, cmd := range pv.teardownCmds {
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (pv *propVirtual) configure(suffix int) error {
+ if runtime.GOOS == "openbsd" {
+ pv.name = fmt.Sprintf("vether%d", suffix)
+ } else {
+ pv.name = fmt.Sprintf("vlan%d", suffix)
+ }
+ xname, err := exec.LookPath("ifconfig")
+ if err != nil {
+ return err
+ }
+ pv.setupCmds = append(pv.setupCmds, &exec.Cmd{
+ Path: xname,
+ Args: []string{"ifconfig", pv.name, "create"},
+ })
+ if runtime.GOOS == "netbsd" {
+ // NetBSD requires an underlying dot1Q-capable network
+ // interface.
+ pv.setupCmds = append(pv.setupCmds, &exec.Cmd{
+ Path: xname,
+ Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"},
+ })
+ }
+ pv.setupCmds = append(pv.setupCmds, &exec.Cmd{
+ Path: xname,
+ Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask},
+ })
+ pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{
+ Path: xname,
+ Args: []string{"ifconfig", pv.name, "destroy"},
+ })
+ return nil
+}
diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go
new file mode 100644
index 000000000..80ca83ae1
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import "unsafe"
+
+var (
+ nativeEndian binaryByteOrder
+ kernelAlign int
+ parseFns map[int]parseFn
+)
+
+func init() {
+ i := uint32(1)
+ b := (*[4]byte)(unsafe.Pointer(&i))
+ if b[0] == 1 {
+ nativeEndian = littleEndian
+ } else {
+ nativeEndian = bigEndian
+ }
+ kernelAlign, parseFns = probeRoutingStack()
+}
+
+func roundup(l int) int {
+ if l == 0 {
+ return kernelAlign
+ }
+ return (l + kernelAlign - 1) & ^(kernelAlign - 1)
+}
+
+type parseFn func(RIBType, []byte) (Message, error)
+
+type wireFormat struct {
+ extOff int // offset of header extension
+ bodyOff int // offset of message body
+}
diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go
new file mode 100644
index 000000000..fff3a0fd1
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys_darwin.go
@@ -0,0 +1,80 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+func (typ RIBType) parseable() bool {
+ switch typ {
+ case sysNET_RT_STAT, sysNET_RT_TRASH:
+ return false
+ default:
+ return true
+ }
+}
+
+// A RouteMetrics represents route metrics.
+type RouteMetrics struct {
+ PathMTU int // path maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (rmx *RouteMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *RouteMessage) Sys() []Sys {
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])),
+ },
+ }
+}
+
+// A InterfaceMetrics represents interface metrics.
+type InterfaceMetrics struct {
+ Type int // interface type
+ MTU int // maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMessage) Sys() []Sys {
+ return []Sys{
+ &InterfaceMetrics{
+ Type: int(m.raw[m.extOff]),
+ MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),
+ },
+ }
+}
+
+func probeRoutingStack() (int, map[int]parseFn) {
+ rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15}
+ rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15}
+ ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15}
+ ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15}
+ ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15}
+ ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15}
+ ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15}
+ // Darwin kernels require 32-bit aligned access to routing facilities.
+ return 4, map[int]parseFn{
+ sysRTM_ADD: rtm.parseRouteMessage,
+ sysRTM_DELETE: rtm.parseRouteMessage,
+ sysRTM_CHANGE: rtm.parseRouteMessage,
+ sysRTM_GET: rtm.parseRouteMessage,
+ sysRTM_LOSING: rtm.parseRouteMessage,
+ sysRTM_REDIRECT: rtm.parseRouteMessage,
+ sysRTM_MISS: rtm.parseRouteMessage,
+ sysRTM_LOCK: rtm.parseRouteMessage,
+ sysRTM_RESOLVE: rtm.parseRouteMessage,
+ sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_DELADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_IFINFO: ifm.parseInterfaceMessage,
+ sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_IFINFO2: ifm2.parseInterfaceMessage,
+ sysRTM_NEWMADDR2: ifmam2.parseInterfaceMulticastAddrMessage,
+ sysRTM_GET2: rtm2.parseRouteMessage,
+ }
+}
diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go
new file mode 100644
index 000000000..da848b3d0
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys_dragonfly.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import "unsafe"
+
+func (typ RIBType) parseable() bool { return true }
+
+// A RouteMetrics represents route metrics.
+type RouteMetrics struct {
+ PathMTU int // path maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (rmx *RouteMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *RouteMessage) Sys() []Sys {
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),
+ },
+ }
+}
+
+// A InterfaceMetrics represents interface metrics.
+type InterfaceMetrics struct {
+ Type int // interface type
+ MTU int // maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMessage) Sys() []Sys {
+ return []Sys{
+ &InterfaceMetrics{
+ Type: int(m.raw[m.extOff]),
+ MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),
+ },
+ }
+}
+
+func probeRoutingStack() (int, map[int]parseFn) {
+ var p uintptr
+ rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4}
+ ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4}
+ ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4}
+ ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4}
+ ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4}
+ return int(unsafe.Sizeof(p)), map[int]parseFn{
+ sysRTM_ADD: rtm.parseRouteMessage,
+ sysRTM_DELETE: rtm.parseRouteMessage,
+ sysRTM_CHANGE: rtm.parseRouteMessage,
+ sysRTM_GET: rtm.parseRouteMessage,
+ sysRTM_LOSING: rtm.parseRouteMessage,
+ sysRTM_REDIRECT: rtm.parseRouteMessage,
+ sysRTM_MISS: rtm.parseRouteMessage,
+ sysRTM_LOCK: rtm.parseRouteMessage,
+ sysRTM_RESOLVE: rtm.parseRouteMessage,
+ sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_DELADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_IFINFO: ifm.parseInterfaceMessage,
+ sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage,
+ }
+}
diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go
new file mode 100644
index 000000000..7b05c1a5a
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys_freebsd.go
@@ -0,0 +1,150 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (typ RIBType) parseable() bool { return true }
+
+// A RouteMetrics represents route metrics.
+type RouteMetrics struct {
+ PathMTU int // path maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (rmx *RouteMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *RouteMessage) Sys() []Sys {
+ if kernelAlign == 8 {
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),
+ },
+ }
+ }
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])),
+ },
+ }
+}
+
+// A InterfaceMetrics represents interface metrics.
+type InterfaceMetrics struct {
+ Type int // interface type
+ MTU int // maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMessage) Sys() []Sys {
+ return []Sys{
+ &InterfaceMetrics{
+ Type: int(m.raw[m.extOff]),
+ MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),
+ },
+ }
+}
+
+func probeRoutingStack() (int, map[int]parseFn) {
+ var p uintptr
+ wordSize := int(unsafe.Sizeof(p))
+ align := int(unsafe.Sizeof(p))
+ // In the case of kern.supported_archs="amd64 i386", we need
+ // to know the underlying kernel's architecture because the
+ // alignment for routing facilities are set at the build time
+ // of the kernel.
+ conf, _ := syscall.Sysctl("kern.conftxt")
+ for i, j := 0, 0; j < len(conf); j++ {
+ if conf[j] != '\n' {
+ continue
+ }
+ s := conf[i:j]
+ i = j + 1
+ if len(s) > len("machine") && s[:len("machine")] == "machine" {
+ s = s[len("machine"):]
+ for k := 0; k < len(s); k++ {
+ if s[k] == ' ' || s[k] == '\t' {
+ s = s[1:]
+ }
+ break
+ }
+ if s == "amd64" {
+ align = 8
+ }
+ break
+ }
+ }
+ var rtm, ifm, ifam, ifmam, ifanm *wireFormat
+ if align != wordSize { // 386 emulation on amd64
+ rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu}
+ ifm = &wireFormat{extOff: 16}
+ ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu}
+ ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu}
+ ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu}
+ } else {
+ rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10}
+ ifm = &wireFormat{extOff: 16}
+ ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10}
+ ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10}
+ ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10}
+ }
+ rel, _ := syscall.SysctlUint32("kern.osreldate")
+ switch {
+ case rel < 800000:
+ if align != wordSize { // 386 emulation on amd64
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu
+ } else {
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD7
+ }
+ case 800000 <= rel && rel < 900000:
+ if align != wordSize { // 386 emulation on amd64
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu
+ } else {
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD8
+ }
+ case 900000 <= rel && rel < 1000000:
+ if align != wordSize { // 386 emulation on amd64
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu
+ } else {
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD9
+ }
+ case 1000000 <= rel && rel < 1100000:
+ if align != wordSize { // 386 emulation on amd64
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu
+ } else {
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD10
+ }
+ default:
+ if align != wordSize { // 386 emulation on amd64
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu
+ } else {
+ ifm.bodyOff = sizeofIfMsghdrFreeBSD11
+ }
+ }
+ return align, map[int]parseFn{
+ sysRTM_ADD: rtm.parseRouteMessage,
+ sysRTM_DELETE: rtm.parseRouteMessage,
+ sysRTM_CHANGE: rtm.parseRouteMessage,
+ sysRTM_GET: rtm.parseRouteMessage,
+ sysRTM_LOSING: rtm.parseRouteMessage,
+ sysRTM_REDIRECT: rtm.parseRouteMessage,
+ sysRTM_MISS: rtm.parseRouteMessage,
+ sysRTM_LOCK: rtm.parseRouteMessage,
+ sysRTM_RESOLVE: rtm.parseRouteMessage,
+ sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_DELADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_IFINFO: ifm.parseInterfaceMessage,
+ sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage,
+ sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage,
+ }
+}
diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go
new file mode 100644
index 000000000..4d8076b51
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys_netbsd.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+func (typ RIBType) parseable() bool { return true }
+
+// A RouteMetrics represents route metrics.
+type RouteMetrics struct {
+ PathMTU int // path maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (rmx *RouteMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *RouteMessage) Sys() []Sys {
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),
+ },
+ }
+}
+
+// A InterfaceMetrics represents interface metrics.
+type InterfaceMetrics struct {
+ Type int // interface type
+ MTU int // maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMessage) Sys() []Sys {
+ return []Sys{
+ &InterfaceMetrics{
+ Type: int(m.raw[m.extOff]),
+ MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),
+ },
+ }
+}
+
+func probeRoutingStack() (int, map[int]parseFn) {
+ rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7}
+ ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7}
+ ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7}
+ ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7}
+ // NetBSD 6 and above kernels require 64-bit aligned access to
+ // routing facilities.
+ return 8, map[int]parseFn{
+ sysRTM_ADD: rtm.parseRouteMessage,
+ sysRTM_DELETE: rtm.parseRouteMessage,
+ sysRTM_CHANGE: rtm.parseRouteMessage,
+ sysRTM_GET: rtm.parseRouteMessage,
+ sysRTM_LOSING: rtm.parseRouteMessage,
+ sysRTM_REDIRECT: rtm.parseRouteMessage,
+ sysRTM_MISS: rtm.parseRouteMessage,
+ sysRTM_LOCK: rtm.parseRouteMessage,
+ sysRTM_RESOLVE: rtm.parseRouteMessage,
+ sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_DELADDR: ifam.parseInterfaceAddrMessage,
+ sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage,
+ sysRTM_IFINFO: ifm.parseInterfaceMessage,
+ }
+}
diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go
new file mode 100644
index 000000000..26d043869
--- /dev/null
+++ b/vendor/golang.org/x/net/route/sys_openbsd.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package route
+
+import "unsafe"
+
+func (typ RIBType) parseable() bool {
+ switch typ {
+ case sysNET_RT_STATS, sysNET_RT_TABLE:
+ return false
+ default:
+ return true
+ }
+}
+
+// A RouteMetrics represents route metrics.
+type RouteMetrics struct {
+ PathMTU int // path maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (rmx *RouteMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *RouteMessage) Sys() []Sys {
+ return []Sys{
+ &RouteMetrics{
+ PathMTU: int(nativeEndian.Uint32(m.raw[60:64])),
+ },
+ }
+}
+
+// A InterfaceMetrics represents interface metrics.
+type InterfaceMetrics struct {
+ Type int // interface type
+ MTU int // maximum transmission unit
+}
+
+// SysType implements the SysType method of Sys interface.
+func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }
+
+// Sys implements the Sys method of Message interface.
+func (m *InterfaceMessage) Sys() []Sys {
+ return []Sys{
+ &InterfaceMetrics{
+ Type: int(m.raw[24]),
+ MTU: int(nativeEndian.Uint32(m.raw[28:32])),
+ },
+ }
+}
+
+func probeRoutingStack() (int, map[int]parseFn) {
+ var p uintptr
+ nooff := &wireFormat{extOff: -1, bodyOff: -1}
+ return int(unsafe.Sizeof(p)), map[int]parseFn{
+ sysRTM_ADD: nooff.parseRouteMessage,
+ sysRTM_DELETE: nooff.parseRouteMessage,
+ sysRTM_CHANGE: nooff.parseRouteMessage,
+ sysRTM_GET: nooff.parseRouteMessage,
+ sysRTM_LOSING: nooff.parseRouteMessage,
+ sysRTM_REDIRECT: nooff.parseRouteMessage,
+ sysRTM_MISS: nooff.parseRouteMessage,
+ sysRTM_LOCK: nooff.parseRouteMessage,
+ sysRTM_RESOLVE: nooff.parseRouteMessage,
+ sysRTM_NEWADDR: nooff.parseInterfaceAddrMessage,
+ sysRTM_DELADDR: nooff.parseInterfaceAddrMessage,
+ sysRTM_IFINFO: nooff.parseInterfaceMessage,
+ sysRTM_IFANNOUNCE: nooff.parseInterfaceAnnounceMessage,
+ }
+}
diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go
new file mode 100644
index 000000000..d136325a3
--- /dev/null
+++ b/vendor/golang.org/x/net/route/syscall.go
@@ -0,0 +1,33 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package route
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// TODO: replace with runtime.KeepAlive when available
+//go:noescape
+func keepAlive(p unsafe.Pointer)
+
+var zero uintptr
+
+func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
+ var p unsafe.Pointer
+ if len(mib) > 0 {
+ p = unsafe.Pointer(&mib[0])
+ } else {
+ p = unsafe.Pointer(&zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ keepAlive(p)
+ if errno != 0 {
+ return error(errno)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/route/syscall.s b/vendor/golang.org/x/net/route/syscall.s
new file mode 100644
index 000000000..fa6297f0a
--- /dev/null
+++ b/vendor/golang.org/x/net/route/syscall.s
@@ -0,0 +1,8 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·keepAlive(SB),NOSPLIT,$0
+ RET
diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go
new file mode 100644
index 000000000..265b81cd5
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_darwin.go
@@ -0,0 +1,93 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_darwin.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x1e
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_STAT = 0x4
+ sysNET_RT_TRASH = 0x5
+ sysNET_RT_IFLIST2 = 0x6
+ sysNET_RT_DUMP2 = 0x7
+ sysNET_RT_MAXID = 0xa
+)
+
+const (
+ sysCTL_MAXNAME = 0xc
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_MAXID = 0x9
+)
+
+const (
+ sysRTM_VERSION = 0x5
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_OLDADD = 0x9
+ sysRTM_OLDDEL = 0xa
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_NEWMADDR = 0xf
+ sysRTM_DELMADDR = 0x10
+ sysRTM_IFINFO2 = 0x12
+ sysRTM_NEWMADDR2 = 0x13
+ sysRTM_GET2 = 0x14
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_MAX = 0x8
+)
+
+const (
+ sizeofIfMsghdrDarwin15 = 0x70
+ sizeofIfaMsghdrDarwin15 = 0x14
+ sizeofIfmaMsghdrDarwin15 = 0x10
+ sizeofIfMsghdr2Darwin15 = 0xa0
+ sizeofIfmaMsghdr2Darwin15 = 0x14
+ sizeofIfDataDarwin15 = 0x60
+ sizeofIfData64Darwin15 = 0x80
+
+ sizeofRtMsghdrDarwin15 = 0x5c
+ sizeofRtMsghdr2Darwin15 = 0x5c
+ sizeofRtMetricsDarwin15 = 0x38
+)
diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go
new file mode 100644
index 000000000..dd36dece0
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go
@@ -0,0 +1,92 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_dragonfly.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x1c
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_MAXID = 0x4
+)
+
+const (
+ sysCTL_MAXNAME = 0xc
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_P1003_1B = 0x9
+ sysCTL_LWKT = 0xa
+ sysCTL_MAXID = 0xb
+)
+
+const (
+ sysRTM_VERSION = 0x6
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_OLDADD = 0x9
+ sysRTM_OLDDEL = 0xa
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_NEWMADDR = 0xf
+ sysRTM_DELMADDR = 0x10
+ sysRTM_IFANNOUNCE = 0x11
+ sysRTM_IEEE80211 = 0x12
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+ sysRTA_MPLS1 = 0x100
+ sysRTA_MPLS2 = 0x200
+ sysRTA_MPLS3 = 0x400
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_MPLS1 = 0x8
+ sysRTAX_MPLS2 = 0x9
+ sysRTAX_MPLS3 = 0xa
+ sysRTAX_MAX = 0xb
+)
+
+const (
+ sizeofIfMsghdrDragonFlyBSD4 = 0xb0
+ sizeofIfaMsghdrDragonFlyBSD4 = 0x14
+ sizeofIfmaMsghdrDragonFlyBSD4 = 0x10
+ sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18
+
+ sizeofRtMsghdrDragonFlyBSD4 = 0x98
+ sizeofRtMetricsDragonFlyBSD4 = 0x70
+)
diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go
new file mode 100644
index 000000000..9bac2e390
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go
@@ -0,0 +1,120 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x1c
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_IFMALIST = 0x4
+ sysNET_RT_IFLISTL = 0x5
+)
+
+const (
+ sysCTL_MAXNAME = 0x18
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_P1003_1B = 0x9
+)
+
+const (
+ sysRTM_VERSION = 0x5
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_NEWMADDR = 0xf
+ sysRTM_DELMADDR = 0x10
+ sysRTM_IFANNOUNCE = 0x11
+ sysRTM_IEEE80211 = 0x12
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_MAX = 0x8
+)
+
+const (
+ sizeofIfMsghdrlFreeBSD10 = 0x68
+ sizeofIfaMsghdrFreeBSD10 = 0x14
+ sizeofIfaMsghdrlFreeBSD10 = 0x6c
+ sizeofIfmaMsghdrFreeBSD10 = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
+
+ sizeofRtMsghdrFreeBSD10 = 0x5c
+ sizeofRtMetricsFreeBSD10 = 0x38
+
+ sizeofIfMsghdrFreeBSD7 = 0x60
+ sizeofIfMsghdrFreeBSD8 = 0x60
+ sizeofIfMsghdrFreeBSD9 = 0x60
+ sizeofIfMsghdrFreeBSD10 = 0x64
+ sizeofIfMsghdrFreeBSD11 = 0xa8
+
+ sizeofIfDataFreeBSD7 = 0x50
+ sizeofIfDataFreeBSD8 = 0x50
+ sizeofIfDataFreeBSD9 = 0x50
+ sizeofIfDataFreeBSD10 = 0x54
+ sizeofIfDataFreeBSD11 = 0x98
+
+ // MODIFIED BY HAND FOR 386 EMULATION ON AMD64
+ // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT
+
+ sizeofIfMsghdrlFreeBSD10Emu = 0xb0
+ sizeofIfaMsghdrFreeBSD10Emu = 0x14
+ sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
+ sizeofIfmaMsghdrFreeBSD10Emu = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
+
+ sizeofRtMsghdrFreeBSD10Emu = 0x98
+ sizeofRtMetricsFreeBSD10Emu = 0x70
+
+ sizeofIfMsghdrFreeBSD7Emu = 0xa8
+ sizeofIfMsghdrFreeBSD8Emu = 0xa8
+ sizeofIfMsghdrFreeBSD9Emu = 0xa8
+ sizeofIfMsghdrFreeBSD10Emu = 0xa8
+ sizeofIfMsghdrFreeBSD11Emu = 0xa8
+
+ sizeofIfDataFreeBSD7Emu = 0x98
+ sizeofIfDataFreeBSD8Emu = 0x98
+ sizeofIfDataFreeBSD9Emu = 0x98
+ sizeofIfDataFreeBSD10Emu = 0x98
+ sizeofIfDataFreeBSD11Emu = 0x98
+)
diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go
new file mode 100644
index 000000000..b1920d7ac
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go
@@ -0,0 +1,117 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x1c
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_IFMALIST = 0x4
+ sysNET_RT_IFLISTL = 0x5
+)
+
+const (
+ sysCTL_MAXNAME = 0x18
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_P1003_1B = 0x9
+)
+
+const (
+ sysRTM_VERSION = 0x5
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_NEWMADDR = 0xf
+ sysRTM_DELMADDR = 0x10
+ sysRTM_IFANNOUNCE = 0x11
+ sysRTM_IEEE80211 = 0x12
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_MAX = 0x8
+)
+
+const (
+ sizeofIfMsghdrlFreeBSD10 = 0xb0
+ sizeofIfaMsghdrFreeBSD10 = 0x14
+ sizeofIfaMsghdrlFreeBSD10 = 0xb0
+ sizeofIfmaMsghdrFreeBSD10 = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
+
+ sizeofRtMsghdrFreeBSD10 = 0x98
+ sizeofRtMetricsFreeBSD10 = 0x70
+
+ sizeofIfMsghdrFreeBSD7 = 0xa8
+ sizeofIfMsghdrFreeBSD8 = 0xa8
+ sizeofIfMsghdrFreeBSD9 = 0xa8
+ sizeofIfMsghdrFreeBSD10 = 0xa8
+ sizeofIfMsghdrFreeBSD11 = 0xa8
+
+ sizeofIfDataFreeBSD7 = 0x98
+ sizeofIfDataFreeBSD8 = 0x98
+ sizeofIfDataFreeBSD9 = 0x98
+ sizeofIfDataFreeBSD10 = 0x98
+ sizeofIfDataFreeBSD11 = 0x98
+
+ sizeofIfMsghdrlFreeBSD10Emu = 0xb0
+ sizeofIfaMsghdrFreeBSD10Emu = 0x14
+ sizeofIfaMsghdrlFreeBSD10Emu = 0xb0
+ sizeofIfmaMsghdrFreeBSD10Emu = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
+
+ sizeofRtMsghdrFreeBSD10Emu = 0x98
+ sizeofRtMetricsFreeBSD10Emu = 0x70
+
+ sizeofIfMsghdrFreeBSD7Emu = 0xa8
+ sizeofIfMsghdrFreeBSD8Emu = 0xa8
+ sizeofIfMsghdrFreeBSD9Emu = 0xa8
+ sizeofIfMsghdrFreeBSD10Emu = 0xa8
+ sizeofIfMsghdrFreeBSD11Emu = 0xa8
+
+ sizeofIfDataFreeBSD7Emu = 0x98
+ sizeofIfDataFreeBSD8Emu = 0x98
+ sizeofIfDataFreeBSD9Emu = 0x98
+ sizeofIfDataFreeBSD10Emu = 0x98
+ sizeofIfDataFreeBSD11Emu = 0x98
+)
diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go
new file mode 100644
index 000000000..a034d6fcb
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go
@@ -0,0 +1,117 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x1c
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_IFMALIST = 0x4
+ sysNET_RT_IFLISTL = 0x5
+)
+
+const (
+ sysCTL_MAXNAME = 0x18
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_P1003_1B = 0x9
+)
+
+const (
+ sysRTM_VERSION = 0x5
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_NEWMADDR = 0xf
+ sysRTM_DELMADDR = 0x10
+ sysRTM_IFANNOUNCE = 0x11
+ sysRTM_IEEE80211 = 0x12
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_MAX = 0x8
+)
+
+const (
+ sizeofIfMsghdrlFreeBSD10 = 0x68
+ sizeofIfaMsghdrFreeBSD10 = 0x14
+ sizeofIfaMsghdrlFreeBSD10 = 0x6c
+ sizeofIfmaMsghdrFreeBSD10 = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10 = 0x18
+
+ sizeofRtMsghdrFreeBSD10 = 0x5c
+ sizeofRtMetricsFreeBSD10 = 0x38
+
+ sizeofIfMsghdrFreeBSD7 = 0x70
+ sizeofIfMsghdrFreeBSD8 = 0x70
+ sizeofIfMsghdrFreeBSD9 = 0x70
+ sizeofIfMsghdrFreeBSD10 = 0x70
+ sizeofIfMsghdrFreeBSD11 = 0xa8
+
+ sizeofIfDataFreeBSD7 = 0x60
+ sizeofIfDataFreeBSD8 = 0x60
+ sizeofIfDataFreeBSD9 = 0x60
+ sizeofIfDataFreeBSD10 = 0x60
+ sizeofIfDataFreeBSD11 = 0x98
+
+ sizeofIfMsghdrlFreeBSD10Emu = 0x68
+ sizeofIfaMsghdrFreeBSD10Emu = 0x14
+ sizeofIfaMsghdrlFreeBSD10Emu = 0x6c
+ sizeofIfmaMsghdrFreeBSD10Emu = 0x10
+ sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18
+
+ sizeofRtMsghdrFreeBSD10Emu = 0x5c
+ sizeofRtMetricsFreeBSD10Emu = 0x38
+
+ sizeofIfMsghdrFreeBSD7Emu = 0x70
+ sizeofIfMsghdrFreeBSD8Emu = 0x70
+ sizeofIfMsghdrFreeBSD9Emu = 0x70
+ sizeofIfMsghdrFreeBSD10Emu = 0x70
+ sizeofIfMsghdrFreeBSD11Emu = 0xa8
+
+ sizeofIfDataFreeBSD7Emu = 0x60
+ sizeofIfDataFreeBSD8Emu = 0x60
+ sizeofIfDataFreeBSD9Emu = 0x60
+ sizeofIfDataFreeBSD10Emu = 0x60
+ sizeofIfDataFreeBSD11Emu = 0x98
+)
diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go
new file mode 100644
index 000000000..aa4aad161
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_netbsd.go
@@ -0,0 +1,91 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_netbsd.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x22
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x18
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x5
+ sysNET_RT_MAXID = 0x6
+)
+
+const (
+ sysCTL_MAXNAME = 0xc
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_VFS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_USER = 0x8
+ sysCTL_DDB = 0x9
+ sysCTL_PROC = 0xa
+ sysCTL_VENDOR = 0xb
+ sysCTL_EMUL = 0xc
+ sysCTL_SECURITY = 0xd
+ sysCTL_MAXID = 0xe
+)
+
+const (
+ sysRTM_VERSION = 0x4
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_OLDADD = 0x9
+ sysRTM_OLDDEL = 0xa
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFANNOUNCE = 0x10
+ sysRTM_IEEE80211 = 0x11
+ sysRTM_SETGATE = 0x12
+ sysRTM_LLINFO_UPD = 0x13
+ sysRTM_IFINFO = 0x14
+ sysRTM_CHGADDR = 0x15
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+ sysRTA_TAG = 0x100
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_TAG = 0x8
+ sysRTAX_MAX = 0x9
+)
+
+const (
+ sizeofIfMsghdrNetBSD7 = 0x98
+ sizeofIfaMsghdrNetBSD7 = 0x18
+ sizeofIfAnnouncemsghdrNetBSD7 = 0x18
+
+ sizeofRtMsghdrNetBSD7 = 0x78
+ sizeofRtMetricsNetBSD7 = 0x50
+)
diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go
new file mode 100644
index 000000000..4fadc4e8f
--- /dev/null
+++ b/vendor/golang.org/x/net/route/zsys_openbsd.go
@@ -0,0 +1,80 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_openbsd.go
+
+package route
+
+const (
+ sysAF_UNSPEC = 0x0
+ sysAF_INET = 0x2
+ sysAF_ROUTE = 0x11
+ sysAF_LINK = 0x12
+ sysAF_INET6 = 0x18
+
+ sysNET_RT_DUMP = 0x1
+ sysNET_RT_FLAGS = 0x2
+ sysNET_RT_IFLIST = 0x3
+ sysNET_RT_STATS = 0x4
+ sysNET_RT_TABLE = 0x5
+ sysNET_RT_IFNAMES = 0x6
+ sysNET_RT_MAXID = 0x7
+)
+
+const (
+ sysCTL_MAXNAME = 0xc
+
+ sysCTL_UNSPEC = 0x0
+ sysCTL_KERN = 0x1
+ sysCTL_VM = 0x2
+ sysCTL_FS = 0x3
+ sysCTL_NET = 0x4
+ sysCTL_DEBUG = 0x5
+ sysCTL_HW = 0x6
+ sysCTL_MACHDEP = 0x7
+ sysCTL_DDB = 0x9
+ sysCTL_VFS = 0xa
+ sysCTL_MAXID = 0xb
+)
+
+const (
+ sysRTM_VERSION = 0x5
+
+ sysRTM_ADD = 0x1
+ sysRTM_DELETE = 0x2
+ sysRTM_CHANGE = 0x3
+ sysRTM_GET = 0x4
+ sysRTM_LOSING = 0x5
+ sysRTM_REDIRECT = 0x6
+ sysRTM_MISS = 0x7
+ sysRTM_LOCK = 0x8
+ sysRTM_RESOLVE = 0xb
+ sysRTM_NEWADDR = 0xc
+ sysRTM_DELADDR = 0xd
+ sysRTM_IFINFO = 0xe
+ sysRTM_IFANNOUNCE = 0xf
+ sysRTM_DESYNC = 0x10
+
+ sysRTA_DST = 0x1
+ sysRTA_GATEWAY = 0x2
+ sysRTA_NETMASK = 0x4
+ sysRTA_GENMASK = 0x8
+ sysRTA_IFP = 0x10
+ sysRTA_IFA = 0x20
+ sysRTA_AUTHOR = 0x40
+ sysRTA_BRD = 0x80
+ sysRTA_SRC = 0x100
+ sysRTA_SRCMASK = 0x200
+ sysRTA_LABEL = 0x400
+
+ sysRTAX_DST = 0x0
+ sysRTAX_GATEWAY = 0x1
+ sysRTAX_NETMASK = 0x2
+ sysRTAX_GENMASK = 0x3
+ sysRTAX_IFP = 0x4
+ sysRTAX_IFA = 0x5
+ sysRTAX_AUTHOR = 0x6
+ sysRTAX_BRD = 0x7
+ sysRTAX_SRC = 0x8
+ sysRTAX_SRCMASK = 0x9
+ sysRTAX_LABEL = 0xa
+ sysRTAX_MAX = 0xb
+)
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 000000000..e66c7e328
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+}).Parse(eventsHTML))
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl.Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+const eventsHTML = `
+<html>
+ <head>
+ <title>events</title>
+ </head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#req-status td.family {
+ padding-right: 2em;
+ }
+ table#req-status td.active {
+ padding-right: 1em;
+ }
+ table#req-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ <body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+ {{range $i, $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{range $j, $bucket := $.Buckets}}
+ {{$n := index $.Counts $i $j}}
+ <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} {{$bucket.String}}]
+ {{if $n}}</a>{{end}}
+ </td>
+ {{end}}
+
+ </tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+ <tr><th>When</th><th>Elapsed</th></tr>
+ {{range $el := $.EventLogs}}
+ <tr class="first">
+ <td class="when">{{$el.When}}</td>
+ <td class="elapsed">{{$el.ElapsedTime}}</td>
+ <td>{{$el.Title}}
+ </tr>
+ {{if $.Expanded}}
+ <tr>
+ <td class="when"></td>
+ <td class="elapsed"></td>
+ <td><pre>{{$el.Stack|trimSpace}}</pre></td>
+ </tr>
+ {{range $el.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}}
+ </body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 000000000..bb42aa532
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl.Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+ <td style="padding:0.25em">Count: {{.Count}}</td>
+ <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+ <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+ <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+ <tr>
+ <td style="padding:0 0 0 0.25em">[</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+ <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+ <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+ </tr>
+{{end}}
+{{end}}
+</table>
+`))
diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go
new file mode 100644
index 000000000..d384b9332
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram_test.go
@@ -0,0 +1,325 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "math"
+ "testing"
+)
+
+type sumTest struct {
+ value int64
+ sum int64
+ sumOfSquares float64
+ total int64
+}
+
+var sumTests = []sumTest{
+ {100, 100, 10000, 1},
+ {50, 150, 12500, 2},
+ {50, 200, 15000, 3},
+ {50, 250, 17500, 4},
+}
+
+type bucketingTest struct {
+ in int64
+ log int
+ bucket int
+}
+
+var bucketingTests = []bucketingTest{
+ {0, 0, 0},
+ {1, 1, 0},
+ {2, 2, 1},
+ {3, 2, 1},
+ {4, 3, 2},
+ {1000, 10, 9},
+ {1023, 10, 9},
+ {1024, 11, 10},
+ {1000000, 20, 19},
+}
+
+type multiplyTest struct {
+ in int64
+ ratio float64
+ expectedSum int64
+ expectedTotal int64
+ expectedSumOfSquares float64
+}
+
+var multiplyTests = []multiplyTest{
+ {15, 2.5, 37, 2, 562.5},
+ {128, 4.6, 758, 13, 77953.9},
+}
+
+type percentileTest struct {
+ fraction float64
+ expected int64
+}
+
+var percentileTests = []percentileTest{
+ {0.25, 48},
+ {0.5, 96},
+ {0.6, 109},
+ {0.75, 128},
+ {0.90, 205},
+ {0.95, 230},
+ {0.99, 256},
+}
+
+func TestSum(t *testing.T) {
+ var h histogram
+
+ for _, test := range sumTests {
+ h.addMeasurement(test.value)
+ sum := h.sum
+ if sum != test.sum {
+ t.Errorf("h.Sum = %v WANT: %v", sum, test.sum)
+ }
+
+ sumOfSquares := h.sumOfSquares
+ if sumOfSquares != test.sumOfSquares {
+ t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares)
+ }
+
+ total := h.total()
+ if total != test.total {
+ t.Errorf("h.Total = %v WANT: %v", total, test.total)
+ }
+ }
+}
+
+func TestMultiply(t *testing.T) {
+ var h histogram
+ for i, test := range multiplyTests {
+ h.addMeasurement(test.in)
+ h.Multiply(test.ratio)
+ if h.sum != test.expectedSum {
+ t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum)
+ }
+ if h.total() != test.expectedTotal {
+ t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal)
+ }
+ if h.sumOfSquares != test.expectedSumOfSquares {
+ t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares)
+ }
+ }
+}
+
+func TestBucketingFunctions(t *testing.T) {
+ for _, test := range bucketingTests {
+ log := log2(test.in)
+ if log != test.log {
+ t.Errorf("log2 = %v WANT: %v", log, test.log)
+ }
+
+ bucket := getBucket(test.in)
+ if bucket != test.bucket {
+ t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket)
+ }
+ }
+}
+
+func TestAverage(t *testing.T) {
+ a := new(histogram)
+ average := a.average()
+ if average != 0 {
+ t.Errorf("Average of empty histogram was %v WANT: 0", average)
+ }
+
+ a.addMeasurement(1)
+ a.addMeasurement(1)
+ a.addMeasurement(3)
+ const expected = float64(5) / float64(3)
+ average = a.average()
+
+ if !isApproximate(average, expected) {
+ t.Errorf("Average = %g WANT: %v", average, expected)
+ }
+}
+
+func TestStandardDeviation(t *testing.T) {
+ a := new(histogram)
+ add(a, 10, 1<<4)
+ add(a, 10, 1<<5)
+ add(a, 10, 1<<6)
+ stdDev := a.standardDeviation()
+ const expected = 19.95
+
+ if !isApproximate(stdDev, expected) {
+ t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected)
+ }
+
+ // No values
+ a = new(histogram)
+ stdDev = a.standardDeviation()
+
+ if !isApproximate(stdDev, 0) {
+ t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
+ }
+
+ add(a, 1, 1<<4)
+ if !isApproximate(stdDev, 0) {
+ t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
+ }
+
+ add(a, 10, 1<<4)
+ if !isApproximate(stdDev, 0) {
+ t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
+ }
+}
+
+func TestPercentileBoundary(t *testing.T) {
+ a := new(histogram)
+ add(a, 5, 1<<4)
+ add(a, 10, 1<<6)
+ add(a, 5, 1<<7)
+
+ for _, test := range percentileTests {
+ percentile := a.percentileBoundary(test.fraction)
+ if percentile != test.expected {
+ t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected)
+ }
+ }
+}
+
+func TestCopyFrom(t *testing.T) {
+ a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
+ b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1}
+
+ a.CopyFrom(&b)
+
+ if a.String() != b.String() {
+ t.Errorf("a.String = %s WANT: %s", a.String(), b.String())
+ }
+}
+
+func TestClear(t *testing.T) {
+ a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
+
+ a.Clear()
+
+ expected := "0, 0.000000, 0, 0, []"
+ if a.String() != expected {
+ t.Errorf("a.String = %s WANT %s", a.String(), expected)
+ }
+}
+
+func TestNew(t *testing.T) {
+ a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
+ b := a.New()
+
+ expected := "0, 0.000000, 0, 0, []"
+ if b.(*histogram).String() != expected {
+ t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected)
+ }
+}
+
+func TestAdd(t *testing.T) {
+ // The tests here depend on the associativity of addMeasurement and Add.
+ // Add empty observation
+ a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
+ b := a.New()
+
+ expected := a.String()
+ a.Add(b)
+ if a.String() != expected {
+ t.Errorf("a.String = %s WANT: %s", a.String(), expected)
+ }
+
+ // Add same bucketed value, no new buckets
+ c := new(histogram)
+ d := new(histogram)
+ e := new(histogram)
+ c.addMeasurement(12)
+ d.addMeasurement(11)
+ e.addMeasurement(12)
+ e.addMeasurement(11)
+ c.Add(d)
+ if c.String() != e.String() {
+ t.Errorf("c.String = %s WANT: %s", c.String(), e.String())
+ }
+
+ // Add bucketed values
+ f := new(histogram)
+ g := new(histogram)
+ h := new(histogram)
+ f.addMeasurement(4)
+ f.addMeasurement(12)
+ f.addMeasurement(100)
+ g.addMeasurement(18)
+ g.addMeasurement(36)
+ g.addMeasurement(255)
+ h.addMeasurement(4)
+ h.addMeasurement(12)
+ h.addMeasurement(100)
+ h.addMeasurement(18)
+ h.addMeasurement(36)
+ h.addMeasurement(255)
+ f.Add(g)
+ if f.String() != h.String() {
+ t.Errorf("f.String = %q WANT: %q", f.String(), h.String())
+ }
+
+ // add buckets to no buckets
+ i := new(histogram)
+ j := new(histogram)
+ k := new(histogram)
+ j.addMeasurement(18)
+ j.addMeasurement(36)
+ j.addMeasurement(255)
+ k.addMeasurement(18)
+ k.addMeasurement(36)
+ k.addMeasurement(255)
+ i.Add(j)
+ if i.String() != k.String() {
+ t.Errorf("i.String = %q WANT: %q", i.String(), k.String())
+ }
+
+ // add buckets to single value (no overlap)
+ l := new(histogram)
+ m := new(histogram)
+ n := new(histogram)
+ l.addMeasurement(0)
+ m.addMeasurement(18)
+ m.addMeasurement(36)
+ m.addMeasurement(255)
+ n.addMeasurement(0)
+ n.addMeasurement(18)
+ n.addMeasurement(36)
+ n.addMeasurement(255)
+ l.Add(m)
+ if l.String() != n.String() {
+ t.Errorf("l.String = %q WANT: %q", l.String(), n.String())
+ }
+
+ // mixed order
+ o := new(histogram)
+ p := new(histogram)
+ o.addMeasurement(0)
+ o.addMeasurement(2)
+ o.addMeasurement(0)
+ p.addMeasurement(0)
+ p.addMeasurement(0)
+ p.addMeasurement(2)
+ if o.String() != p.String() {
+ t.Errorf("o.String = %q WANT: %q", o.String(), p.String())
+ }
+}
+
+func add(h *histogram, times int, val int64) {
+ for i := 0; i < times; i++ {
+ h.addMeasurement(val)
+ }
+}
+
+func isApproximate(x, y float64) bool {
+ return math.Abs(x-y) < 1e-2
+}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 000000000..d860fccf9
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1063 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customise its authorisation requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ // RemoteAddr is commonly in the form "IP" or "IP:port".
+ // If it is in the form "IP:port", split off the port.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+ switch host {
+ case "localhost", "127.0.0.1", "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ Render(w, req, sensitive)
+ })
+ http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ RenderEvents(w, req, sensitive)
+ })
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.events = make([]event, 0, maxEventsPerTrace)
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ tr.Elapsed = time.Now().Sub(tr.Start)
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ What interface{} // string or fmt.Stringer
+ Sensitive bool // whether this event contains sensitive information
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Timing information.
+ Start time.Time
+ Elapsed time.Duration // zero while active
+
+ // Trace information if non-zero.
+ traceID uint64
+ spanID uint64
+
+ // Whether this trace resulted in an error.
+ IsError bool
+
+ // Append-only sequence of events (modulo discards).
+ mu sync.RWMutex
+ events []event
+
+ refs int32 // how many buckets this is in
+ recycler func(interface{})
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.events = nil
+ tr.refs = 0
+ tr.recycler = nil
+ tr.disc = 0
+ tr.finishStack = nil
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a requestz.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < cap(tr.events) {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((cap(tr.events) - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[cap(tr.events)-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.events = make([]event, 0, m)
+ }
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ t := tr.Elapsed
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+ <head>
+ <title>/debug/requests</title>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#tr-status td.family {
+ padding-right: 2em;
+ }
+ table#tr-status td.active {
+ padding-right: 1em;
+ }
+ table#tr-status td.latency-first {
+ padding-left: 1em;
+ }
+ table#tr-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ </head>
+ <body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+ {{range $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{$n := index $.ActiveTraceCount $fam}}
+ <td class="active {{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} active]
+ {{if $n}}</a>{{end}}
+ </td>
+
+ {{$f := index $.CompletedTraces $fam}}
+ {{range $i, $b := $f.Buckets}}
+ {{$empty := $b.Empty}}
+ <td {{if $empty}}class="empty"{{end}}>
+ {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{.Cond}}]
+ {{if not $empty}}</a>{{end}}
+ </td>
+ {{end}}
+
+ {{$nb := len $f.Buckets}}
+ <td class="latency-first">
+ <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+ </td>
+
+ </tr>
+ {{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+ <caption>
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+ </caption>
+ <tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+ {{range $tr := $.Traces}}
+ <tr class="first">
+ <td class="when">{{$tr.When}}</td>
+ <td class="elapsed">{{$tr.ElapsedTime}}</td>
+ <td>{{$tr.Title}}</td>
+ {{/* TODO: include traceID/spanID */}}
+ </tr>
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+ </body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go
new file mode 100644
index 000000000..14d7c237a
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace_test.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+type s struct{}
+
+func (s) String() string { return "lazy string" }
+
+// TestReset checks whether all the fields are zeroed after reset.
+func TestReset(t *testing.T) {
+ tr := New("foo", "bar")
+ tr.LazyLog(s{}, false)
+ tr.LazyPrintf("%d", 1)
+ tr.SetRecycler(func(_ interface{}) {})
+ tr.SetTraceInfo(3, 4)
+ tr.SetMaxEvents(100)
+ tr.SetError()
+ tr.Finish()
+
+ tr.(*trace).reset()
+
+ if !reflect.DeepEqual(tr, new(trace)) {
+ t.Errorf("reset didn't clear all fields: %+v", tr)
+ }
+}
+
+// TestResetLog checks whether all the fields are zeroed after reset.
+func TestResetLog(t *testing.T) {
+ el := NewEventLog("foo", "bar")
+ el.Printf("message")
+ el.Errorf("error")
+ el.Finish()
+
+ el.(*eventLog).reset()
+
+ if !reflect.DeepEqual(el, new(eventLog)) {
+ t.Errorf("reset didn't clear all fields: %+v", el)
+ }
+}
+
+func TestAuthRequest(t *testing.T) {
+ testCases := []struct {
+ host string
+ want bool
+ }{
+ {host: "192.168.23.1", want: false},
+ {host: "192.168.23.1:8080", want: false},
+ {host: "malformed remote addr", want: false},
+ {host: "localhost", want: true},
+ {host: "localhost:8080", want: true},
+ {host: "127.0.0.1", want: true},
+ {host: "127.0.0.1:8080", want: true},
+ {host: "::1", want: true},
+ {host: "[::1]:8080", want: true},
+ }
+ for _, tt := range testCases {
+ req := &http.Request{RemoteAddr: tt.host}
+ any, sensitive := AuthRequest(req)
+ if any != tt.want || sensitive != tt.want {
+ t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go
new file mode 100644
index 000000000..3d95c6cba
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/file.go
@@ -0,0 +1,794 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// slashClean is equivalent to but slightly more efficient than
+// path.Clean("/" + name).
+func slashClean(name string) string {
+ if name == "" || name[0] != '/' {
+ name = "/" + name
+ }
+ return path.Clean(name)
+}
+
+// A FileSystem implements access to a collection of named files. The elements
+// in a file path are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+//
+// Each method has the same semantics as the os package's function of the same
+// name.
+//
+// Note that the os.Rename documentation says that "OS-specific restrictions
+// might apply". In particular, whether or not renaming a file or directory
+// overwriting another existing file or directory is an error is OS-dependent.
+type FileSystem interface {
+ Mkdir(name string, perm os.FileMode) error
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+ RemoveAll(name string) error
+ Rename(oldName, newName string) error
+ Stat(name string) (os.FileInfo, error)
+}
+
+// A File is returned by a FileSystem's OpenFile method and can be served by a
+// Handler.
+//
+// A File may optionally implement the DeadPropsHolder interface, if it can
+// load and save dead properties.
+type File interface {
+ http.File
+ io.Writer
+}
+
+// A Dir implements FileSystem using the native file system restricted to a
+// specific directory tree.
+//
+// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
+// string value is a filename on the native file system, not a URL, so it is
+// separated by filepath.Separator, which isn't necessarily '/'.
+//
+// An empty Dir is treated as ".".
+type Dir string
+
+func (d Dir) resolve(name string) string {
+ // This implementation is based on Dir.Open's code in the standard net/http package.
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return ""
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+ return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
+}
+
+func (d Dir) Mkdir(name string, perm os.FileMode) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ return os.Mkdir(name, perm)
+}
+
+func (d Dir) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ f, err := os.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (d Dir) RemoveAll(name string) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ if name == filepath.Clean(string(d)) {
+ // Prohibit removing the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.RemoveAll(name)
+}
+
+func (d Dir) Rename(oldName, newName string) error {
+ if oldName = d.resolve(oldName); oldName == "" {
+ return os.ErrNotExist
+ }
+ if newName = d.resolve(newName); newName == "" {
+ return os.ErrNotExist
+ }
+ if root := filepath.Clean(string(d)); root == oldName || root == newName {
+ // Prohibit renaming from or to the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.Rename(oldName, newName)
+}
+
+func (d Dir) Stat(name string) (os.FileInfo, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ return os.Stat(name)
+}
+
+// NewMemFS returns a new in-memory FileSystem implementation.
+func NewMemFS() FileSystem {
+ return &memFS{
+ root: memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: 0660 | os.ModeDir,
+ modTime: time.Now(),
+ },
+ }
+}
+
+// A memFS implements FileSystem, storing all metadata and actual file data
+// in-memory. No limits on filesystem size are used, so it is not recommended
+// this be used where the clients are untrusted.
+//
+// Concurrent access is permitted. The tree structure is protected by a mutex,
+// and each node's contents and metadata are protected by a per-node mutex.
+//
+// TODO: Enforce file permissions.
+type memFS struct {
+ mu sync.Mutex
+ root memFSNode
+}
+
+// TODO: clean up and rationalize the walk/find code.
+
+// walk walks the directory tree for the fullname, calling f at each step. If f
+// returns an error, the walk will be aborted and return that same error.
+//
+// dir is the directory at that step, frag is the name fragment, and final is
+// whether it is the final step. For example, walking "/foo/bar/x" will result
+// in 3 calls to f:
+// - "/", "foo", false
+// - "/foo/", "bar", false
+// - "/foo/bar/", "x", true
+// The frag argument will be empty only if dir is the root node and the walk
+// ends at that root node.
+func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
+ original := fullname
+ fullname = slashClean(fullname)
+
+ // Strip any leading "/"s to make fullname a relative path, as the walk
+ // starts at fs.root.
+ if fullname[0] == '/' {
+ fullname = fullname[1:]
+ }
+ dir := &fs.root
+
+ for {
+ frag, remaining := fullname, ""
+ i := strings.IndexRune(fullname, '/')
+ final := i < 0
+ if !final {
+ frag, remaining = fullname[:i], fullname[i+1:]
+ }
+ if frag == "" && dir != &fs.root {
+ panic("webdav: empty path fragment for a clean path")
+ }
+ if err := f(dir, frag, final); err != nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: err,
+ }
+ }
+ if final {
+ break
+ }
+ child := dir.children[frag]
+ if child == nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrNotExist,
+ }
+ }
+ if !child.mode.IsDir() {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrInvalid,
+ }
+ }
+ dir, fullname = child, remaining
+ }
+ return nil
+}
+
+// find returns the parent of the named node and the relative name fragment
+// from the parent to the child. For example, if finding "/foo/bar/baz" then
+// parent will be the node for "/foo/bar" and frag will be "baz".
+//
+// If the fullname names the root node, then parent, frag and err will be zero.
+//
+// find returns an error if the parent does not already exist or the parent
+// isn't a directory, but it will not return an error per se if the child does
+// not already exist. The error returned is either nil or an *os.PathError
+// whose Op is op.
+func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
+ err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
+ if !final {
+ return nil
+ }
+ if frag0 != "" {
+ parent, frag = parent0, frag0
+ }
+ return nil
+ })
+ return parent, frag, err
+}
+
+func (fs *memFS) Mkdir(name string, perm os.FileMode) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("mkdir", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't create the root.
+ return os.ErrInvalid
+ }
+ if _, ok := dir.children[frag]; ok {
+ return os.ErrExist
+ }
+ dir.children[frag] = &memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: perm.Perm() | os.ModeDir,
+ modTime: time.Now(),
+ }
+ return nil
+}
+
+func (fs *memFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("open", name)
+ if err != nil {
+ return nil, err
+ }
+ var n *memFSNode
+ if dir == nil {
+ // We're opening the root.
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
+ return nil, os.ErrPermission
+ }
+ n, frag = &fs.root, "/"
+
+ } else {
+ n = dir.children[frag]
+ if flag&(os.O_SYNC|os.O_APPEND) != 0 {
+ // memFile doesn't support these flags yet.
+ return nil, os.ErrInvalid
+ }
+ if flag&os.O_CREATE != 0 {
+ if flag&os.O_EXCL != 0 && n != nil {
+ return nil, os.ErrExist
+ }
+ if n == nil {
+ n = &memFSNode{
+ mode: perm.Perm(),
+ }
+ dir.children[frag] = n
+ }
+ }
+ if n == nil {
+ return nil, os.ErrNotExist
+ }
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
+ n.mu.Lock()
+ n.data = nil
+ n.mu.Unlock()
+ }
+ }
+
+ children := make([]os.FileInfo, 0, len(n.children))
+ for cName, c := range n.children {
+ children = append(children, c.stat(cName))
+ }
+ return &memFile{
+ n: n,
+ nameSnapshot: frag,
+ childrenSnapshot: children,
+ }, nil
+}
+
+func (fs *memFS) RemoveAll(name string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("remove", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't remove the root.
+ return os.ErrInvalid
+ }
+ delete(dir.children, frag)
+ return nil
+}
+
+func (fs *memFS) Rename(oldName, newName string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ oldName = slashClean(oldName)
+ newName = slashClean(newName)
+ if oldName == newName {
+ return nil
+ }
+ if strings.HasPrefix(newName, oldName+"/") {
+ // We can't rename oldName to be a sub-directory of itself.
+ return os.ErrInvalid
+ }
+
+ oDir, oFrag, err := fs.find("rename", oldName)
+ if err != nil {
+ return err
+ }
+ if oDir == nil {
+ // We can't rename from the root.
+ return os.ErrInvalid
+ }
+
+ nDir, nFrag, err := fs.find("rename", newName)
+ if err != nil {
+ return err
+ }
+ if nDir == nil {
+ // We can't rename to the root.
+ return os.ErrInvalid
+ }
+
+ oNode, ok := oDir.children[oFrag]
+ if !ok {
+ return os.ErrNotExist
+ }
+ if oNode.children != nil {
+ if nNode, ok := nDir.children[nFrag]; ok {
+ if nNode.children == nil {
+ return errNotADirectory
+ }
+ if len(nNode.children) != 0 {
+ return errDirectoryNotEmpty
+ }
+ }
+ }
+ delete(oDir.children, oFrag)
+ nDir.children[nFrag] = oNode
+ return nil
+}
+
+func (fs *memFS) Stat(name string) (os.FileInfo, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("stat", name)
+ if err != nil {
+ return nil, err
+ }
+ if dir == nil {
+ // We're stat'ting the root.
+ return fs.root.stat("/"), nil
+ }
+ if n, ok := dir.children[frag]; ok {
+ return n.stat(path.Base(name)), nil
+ }
+ return nil, os.ErrNotExist
+}
+
+// A memFSNode represents a single entry in the in-memory filesystem and also
+// implements os.FileInfo.
+type memFSNode struct {
+ // children is protected by memFS.mu.
+ children map[string]*memFSNode
+
+ mu sync.Mutex
+ data []byte
+ mode os.FileMode
+ modTime time.Time
+ deadProps map[xml.Name]Property
+}
+
+func (n *memFSNode) stat(name string) *memFileInfo {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ return &memFileInfo{
+ name: name,
+ size: int64(len(n.data)),
+ mode: n.mode,
+ modTime: n.modTime,
+ }
+}
+
+func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ if len(n.deadProps) == 0 {
+ return nil, nil
+ }
+ ret := make(map[xml.Name]Property, len(n.deadProps))
+ for k, v := range n.deadProps {
+ ret[k] = v
+ }
+ return ret, nil
+}
+
+func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ pstat := Propstat{Status: http.StatusOK}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ if patch.Remove {
+ delete(n.deadProps, p.XMLName)
+ continue
+ }
+ if n.deadProps == nil {
+ n.deadProps = map[xml.Name]Property{}
+ }
+ n.deadProps[p.XMLName] = p
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+type memFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+func (f *memFileInfo) Name() string { return f.name }
+func (f *memFileInfo) Size() int64 { return f.size }
+func (f *memFileInfo) Mode() os.FileMode { return f.mode }
+func (f *memFileInfo) ModTime() time.Time { return f.modTime }
+func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
+func (f *memFileInfo) Sys() interface{} { return nil }
+
+// A memFile is a File implementation for a memFSNode. It is a per-file (not
+// per-node) read/write position, and a snapshot of the memFS' tree structure
+// (a node's name and children) for that node.
+type memFile struct {
+ n *memFSNode
+ nameSnapshot string
+ childrenSnapshot []os.FileInfo
+ // pos is protected by n.mu.
+ pos int
+}
+
+// A *memFile implements the optional DeadPropsHolder interface.
+var _ DeadPropsHolder = (*memFile)(nil)
+
+func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
+func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
+
+func (f *memFile) Close() error {
+ return nil
+}
+
+func (f *memFile) Read(p []byte) (int, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos >= len(f.n.data) {
+ return 0, io.EOF
+ }
+ n := copy(p, f.n.data[f.pos:])
+ f.pos += n
+ return n, nil
+}
+
+func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if !f.n.mode.IsDir() {
+ return nil, os.ErrInvalid
+ }
+ old := f.pos
+ if old >= len(f.childrenSnapshot) {
+ // The os.File Readdir docs say that at the end of a directory,
+ // the error is io.EOF if count > 0 and nil if count <= 0.
+ if count > 0 {
+ return nil, io.EOF
+ }
+ return nil, nil
+ }
+ if count > 0 {
+ f.pos += count
+ if f.pos > len(f.childrenSnapshot) {
+ f.pos = len(f.childrenSnapshot)
+ }
+ } else {
+ f.pos = len(f.childrenSnapshot)
+ old = 0
+ }
+ return f.childrenSnapshot[old:f.pos], nil
+}
+
+func (f *memFile) Seek(offset int64, whence int) (int64, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ npos := f.pos
+ // TODO: How to handle offsets greater than the size of system int?
+ switch whence {
+ case os.SEEK_SET:
+ npos = int(offset)
+ case os.SEEK_CUR:
+ npos += int(offset)
+ case os.SEEK_END:
+ npos = len(f.n.data) + int(offset)
+ default:
+ npos = -1
+ }
+ if npos < 0 {
+ return 0, os.ErrInvalid
+ }
+ f.pos = npos
+ return int64(f.pos), nil
+}
+
+func (f *memFile) Stat() (os.FileInfo, error) {
+ return f.n.stat(f.nameSnapshot), nil
+}
+
+func (f *memFile) Write(p []byte) (int, error) {
+ lenp := len(p)
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos < len(f.n.data) {
+ n := copy(f.n.data[f.pos:], p)
+ f.pos += n
+ p = p[n:]
+ } else if f.pos > len(f.n.data) {
+ // Write permits the creation of holes, if we've seek'ed past the
+ // existing end of file.
+ if f.pos <= cap(f.n.data) {
+ oldLen := len(f.n.data)
+ f.n.data = f.n.data[:f.pos]
+ hole := f.n.data[oldLen:]
+ for i := range hole {
+ hole[i] = 0
+ }
+ } else {
+ d := make([]byte, f.pos, f.pos+len(p))
+ copy(d, f.n.data)
+ f.n.data = d
+ }
+ }
+
+ if len(p) > 0 {
+ // We should only get here if f.pos == len(f.n.data).
+ f.n.data = append(f.n.data, p...)
+ f.pos = len(f.n.data)
+ }
+ f.n.modTime = time.Now()
+ return lenp, nil
+}
+
+// moveFiles moves files and/or directories from src to dst.
+//
+// See section 9.9.4 for when various HTTP status codes apply.
+func moveFiles(fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
+ created := false
+ if _, err := fs.Stat(dst); err != nil {
+ if !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ created = true
+ } else if overwrite {
+ // Section 9.9.3 says that "If a resource exists at the destination
+ // and the Overwrite header is "T", then prior to performing the move,
+ // the server must perform a DELETE with "Depth: infinity" on the
+ // destination resource.
+ if err := fs.RemoveAll(dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ } else {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.Rename(src, dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+func copyProps(dst, src File) error {
+ d, ok := dst.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ s, ok := src.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ m, err := s.DeadProps()
+ if err != nil {
+ return err
+ }
+ props := make([]Property, 0, len(m))
+ for _, prop := range m {
+ props = append(props, prop)
+ }
+ _, err = d.Patch([]Proppatch{{Props: props}})
+ return err
+}
+
+// copyFiles copies files and/or directories from src to dst.
+//
+// See section 9.8.5 for when various HTTP status codes apply.
+func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
+ if recursion == 1000 {
+ return http.StatusInternalServerError, errRecursionTooDeep
+ }
+ recursion++
+
+ // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
+ // into /A/B/ could lead to infinite recursion if not handled correctly."
+
+ srcFile, err := fs.OpenFile(src, os.O_RDONLY, 0)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer srcFile.Close()
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ srcPerm := srcStat.Mode() & os.ModePerm
+
+ created := false
+ if _, err := fs.Stat(dst); err != nil {
+ if os.IsNotExist(err) {
+ created = true
+ } else {
+ return http.StatusForbidden, err
+ }
+ } else {
+ if !overwrite {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.RemoveAll(dst); err != nil && !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ }
+
+ if srcStat.IsDir() {
+ if err := fs.Mkdir(dst, srcPerm); err != nil {
+ return http.StatusForbidden, err
+ }
+ if depth == infiniteDepth {
+ children, err := srcFile.Readdir(-1)
+ if err != nil {
+ return http.StatusForbidden, err
+ }
+ for _, c := range children {
+ name := c.Name()
+ s := path.Join(src, name)
+ d := path.Join(dst, name)
+ cStatus, cErr := copyFiles(fs, s, d, overwrite, depth, recursion)
+ if cErr != nil {
+ // TODO: MultiStatus.
+ return cStatus, cErr
+ }
+ }
+ }
+
+ } else {
+ dstFile, err := fs.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusForbidden, err
+
+ }
+ _, copyErr := io.Copy(dstFile, srcFile)
+ propsErr := copyProps(dstFile, srcFile)
+ closeErr := dstFile.Close()
+ if copyErr != nil {
+ return http.StatusInternalServerError, copyErr
+ }
+ if propsErr != nil {
+ return http.StatusInternalServerError, propsErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ }
+
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+// walkFS traverses filesystem fs starting at name up to depth levels.
+//
+// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
+// walkFS calls walkFn. If a visited file system node is a directory and
+// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
+func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ // This implementation is based on Walk's code in the standard path/filepath package.
+ err := walkFn(name, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+ if !info.IsDir() || depth == 0 {
+ return nil
+ }
+ if depth == 1 {
+ depth = 0
+ }
+
+ // Read directory names.
+ f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+ fileInfos, err := f.Readdir(0)
+ f.Close()
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+
+ for _, fileInfo := range fileInfos {
+ filename := path.Join(name, fileInfo.Name())
+ fileInfo, err := fs.Stat(filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walkFS(fs, depth, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go
new file mode 100644
index 000000000..cbd0240ab
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/file_test.go
@@ -0,0 +1,1169 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestSlashClean(t *testing.T) {
+ testCases := []string{
+ "",
+ ".",
+ "/",
+ "/./",
+ "//",
+ "//.",
+ "//a",
+ "/a",
+ "/a/b/c",
+ "/a//b/./../c/d/",
+ "a",
+ "a/b/c",
+ }
+ for _, tc := range testCases {
+ got := slashClean(tc)
+ want := path.Clean("/" + tc)
+ if got != want {
+ t.Errorf("tc=%q: got %q, want %q", tc, got, want)
+ }
+ }
+}
+
+func TestDirResolve(t *testing.T) {
+ testCases := []struct {
+ dir, name, want string
+ }{
+ {"/", "", "/"},
+ {"/", "/", "/"},
+ {"/", ".", "/"},
+ {"/", "./a", "/a"},
+ {"/", "..", "/"},
+ {"/", "..", "/"},
+ {"/", "../", "/"},
+ {"/", "../.", "/"},
+ {"/", "../a", "/a"},
+ {"/", "../..", "/"},
+ {"/", "../bar/a", "/bar/a"},
+ {"/", "../baz/a", "/baz/a"},
+ {"/", "...", "/..."},
+ {"/", ".../a", "/.../a"},
+ {"/", ".../..", "/"},
+ {"/", "a", "/a"},
+ {"/", "a/./b", "/a/b"},
+ {"/", "a/../../b", "/b"},
+ {"/", "a/../b", "/b"},
+ {"/", "a/b", "/a/b"},
+ {"/", "a/b/c/../../d", "/a/d"},
+ {"/", "a/b/c/../../../d", "/d"},
+ {"/", "a/b/c/../../../../d", "/d"},
+ {"/", "a/b/c/d", "/a/b/c/d"},
+
+ {"/foo/bar", "", "/foo/bar"},
+ {"/foo/bar", "/", "/foo/bar"},
+ {"/foo/bar", ".", "/foo/bar"},
+ {"/foo/bar", "./a", "/foo/bar/a"},
+ {"/foo/bar", "..", "/foo/bar"},
+ {"/foo/bar", "../", "/foo/bar"},
+ {"/foo/bar", "../.", "/foo/bar"},
+ {"/foo/bar", "../a", "/foo/bar/a"},
+ {"/foo/bar", "../..", "/foo/bar"},
+ {"/foo/bar", "../bar/a", "/foo/bar/bar/a"},
+ {"/foo/bar", "../baz/a", "/foo/bar/baz/a"},
+ {"/foo/bar", "...", "/foo/bar/..."},
+ {"/foo/bar", ".../a", "/foo/bar/.../a"},
+ {"/foo/bar", ".../..", "/foo/bar"},
+ {"/foo/bar", "a", "/foo/bar/a"},
+ {"/foo/bar", "a/./b", "/foo/bar/a/b"},
+ {"/foo/bar", "a/../../b", "/foo/bar/b"},
+ {"/foo/bar", "a/../b", "/foo/bar/b"},
+ {"/foo/bar", "a/b", "/foo/bar/a/b"},
+ {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"},
+ {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"},
+ {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"},
+ {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"},
+
+ {"/foo/bar/", "", "/foo/bar"},
+ {"/foo/bar/", "/", "/foo/bar"},
+ {"/foo/bar/", ".", "/foo/bar"},
+ {"/foo/bar/", "./a", "/foo/bar/a"},
+ {"/foo/bar/", "..", "/foo/bar"},
+
+ {"/foo//bar///", "", "/foo/bar"},
+ {"/foo//bar///", "/", "/foo/bar"},
+ {"/foo//bar///", ".", "/foo/bar"},
+ {"/foo//bar///", "./a", "/foo/bar/a"},
+ {"/foo//bar///", "..", "/foo/bar"},
+
+ {"/x/y/z", "ab/c\x00d/ef", ""},
+
+ {".", "", "."},
+ {".", "/", "."},
+ {".", ".", "."},
+ {".", "./a", "a"},
+ {".", "..", "."},
+ {".", "..", "."},
+ {".", "../", "."},
+ {".", "../.", "."},
+ {".", "../a", "a"},
+ {".", "../..", "."},
+ {".", "../bar/a", "bar/a"},
+ {".", "../baz/a", "baz/a"},
+ {".", "...", "..."},
+ {".", ".../a", ".../a"},
+ {".", ".../..", "."},
+ {".", "a", "a"},
+ {".", "a/./b", "a/b"},
+ {".", "a/../../b", "b"},
+ {".", "a/../b", "b"},
+ {".", "a/b", "a/b"},
+ {".", "a/b/c/../../d", "a/d"},
+ {".", "a/b/c/../../../d", "d"},
+ {".", "a/b/c/../../../../d", "d"},
+ {".", "a/b/c/d", "a/b/c/d"},
+
+ {"", "", "."},
+ {"", "/", "."},
+ {"", ".", "."},
+ {"", "./a", "a"},
+ {"", "..", "."},
+ }
+
+ for _, tc := range testCases {
+ d := Dir(filepath.FromSlash(tc.dir))
+ if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want {
+ t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestWalk(t *testing.T) {
+ type walkStep struct {
+ name, frag string
+ final bool
+ }
+
+ testCases := []struct {
+ dir string
+ want []walkStep
+ }{
+ {"", []walkStep{
+ {"", "", true},
+ }},
+ {"/", []walkStep{
+ {"", "", true},
+ }},
+ {"/a", []walkStep{
+ {"", "a", true},
+ }},
+ {"/a/", []walkStep{
+ {"", "a", true},
+ }},
+ {"/a/b", []walkStep{
+ {"", "a", false},
+ {"a", "b", true},
+ }},
+ {"/a/b/", []walkStep{
+ {"", "a", false},
+ {"a", "b", true},
+ }},
+ {"/a/b/c", []walkStep{
+ {"", "a", false},
+ {"a", "b", false},
+ {"b", "c", true},
+ }},
+ // The following test case is the one mentioned explicitly
+ // in the method description.
+ {"/foo/bar/x", []walkStep{
+ {"", "foo", false},
+ {"foo", "bar", false},
+ {"bar", "x", true},
+ }},
+ }
+
+ for _, tc := range testCases {
+ fs := NewMemFS().(*memFS)
+
+ parts := strings.Split(tc.dir, "/")
+ for p := 2; p < len(parts); p++ {
+ d := strings.Join(parts[:p], "/")
+ if err := fs.Mkdir(d, 0666); err != nil {
+ t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err)
+ }
+ }
+
+ i, prevFrag := 0, ""
+ err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error {
+ got := walkStep{
+ name: prevFrag,
+ frag: frag,
+ final: final,
+ }
+ want := tc.want[i]
+
+ if got != want {
+ return fmt.Errorf("got %+v, want %+v", got, want)
+ }
+ i, prevFrag = i+1, frag
+ return nil
+ })
+ if err != nil {
+ t.Errorf("tc.dir=%q: %v", tc.dir, err)
+ }
+ }
+}
+
+// find appends to ss the names of the named file and its children. It is
+// analogous to the Unix find command.
+//
+// The returned strings are not guaranteed to be in any particular order.
+func find(ss []string, fs FileSystem, name string) ([]string, error) {
+ stat, err := fs.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ ss = append(ss, name)
+ if stat.IsDir() {
+ f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ children, err := f.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range children {
+ ss, err = find(ss, fs, path.Join(name, c.Name()))
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return ss, nil
+}
+
+func testFS(t *testing.T, fs FileSystem) {
+ errStr := func(err error) string {
+ switch {
+ case os.IsExist(err):
+ return "errExist"
+ case os.IsNotExist(err):
+ return "errNotExist"
+ case err != nil:
+ return "err"
+ }
+ return "ok"
+ }
+
+ // The non-"find" non-"stat" test cases should change the file system state. The
+ // indentation of the "find"s and "stat"s helps distinguish such test cases.
+ testCases := []string{
+ " stat / want dir",
+ " stat /a want errNotExist",
+ " stat /d want errNotExist",
+ " stat /d/e want errNotExist",
+ "create /a A want ok",
+ " stat /a want 1",
+ "create /d/e EEE want errNotExist",
+ "mk-dir /a want errExist",
+ "mk-dir /d/m want errNotExist",
+ "mk-dir /d want ok",
+ " stat /d want dir",
+ "create /d/e EEE want ok",
+ " stat /d/e want 3",
+ " find / /a /d /d/e",
+ "create /d/f FFFF want ok",
+ "create /d/g GGGGGGG want ok",
+ "mk-dir /d/m want ok",
+ "mk-dir /d/m want errExist",
+ "create /d/m/p PPPPP want ok",
+ " stat /d/e want 3",
+ " stat /d/f want 4",
+ " stat /d/g want 7",
+ " stat /d/h want errNotExist",
+ " stat /d/m want dir",
+ " stat /d/m/p want 5",
+ " find / /a /d /d/e /d/f /d/g /d/m /d/m/p",
+ "rm-all /d want ok",
+ " stat /a want 1",
+ " stat /d want errNotExist",
+ " stat /d/e want errNotExist",
+ " stat /d/f want errNotExist",
+ " stat /d/g want errNotExist",
+ " stat /d/m want errNotExist",
+ " stat /d/m/p want errNotExist",
+ " find / /a",
+ "mk-dir /d/m want errNotExist",
+ "mk-dir /d want ok",
+ "create /d/f FFFF want ok",
+ "rm-all /d/f want ok",
+ "mk-dir /d/m want ok",
+ "rm-all /z want ok",
+ "rm-all / want err",
+ "create /b BB want ok",
+ " stat / want dir",
+ " stat /a want 1",
+ " stat /b want 2",
+ " stat /c want errNotExist",
+ " stat /d want dir",
+ " stat /d/m want dir",
+ " find / /a /b /d /d/m",
+ "move__ o=F /b /c want ok",
+ " stat /b want errNotExist",
+ " stat /c want 2",
+ " stat /d/m want dir",
+ " stat /d/n want errNotExist",
+ " find / /a /c /d /d/m",
+ "move__ o=F /d/m /d/n want ok",
+ "create /d/n/q QQQQ want ok",
+ " stat /d/m want errNotExist",
+ " stat /d/n want dir",
+ " stat /d/n/q want 4",
+ "move__ o=F /d /d/n/z want err",
+ "move__ o=T /c /d/n/q want ok",
+ " stat /c want errNotExist",
+ " stat /d/n/q want 2",
+ " find / /a /d /d/n /d/n/q",
+ "create /d/n/r RRRRR want ok",
+ "mk-dir /u want ok",
+ "mk-dir /u/v want ok",
+ "move__ o=F /d/n /u want errExist",
+ "create /t TTTTTT want ok",
+ "move__ o=F /d/n /t want errExist",
+ "rm-all /t want ok",
+ "move__ o=F /d/n /t want ok",
+ " stat /d want dir",
+ " stat /d/n want errNotExist",
+ " stat /d/n/r want errNotExist",
+ " stat /t want dir",
+ " stat /t/q want 2",
+ " stat /t/r want 5",
+ " find / /a /d /t /t/q /t/r /u /u/v",
+ "move__ o=F /t / want errExist",
+ "move__ o=T /t /u/v want ok",
+ " stat /u/v/r want 5",
+ "move__ o=F / /z want err",
+ " find / /a /d /u /u/v /u/v/q /u/v/r",
+ " stat /a want 1",
+ " stat /b want errNotExist",
+ " stat /c want errNotExist",
+ " stat /u/v/r want 5",
+ "copy__ o=F d=0 /a /b want ok",
+ "copy__ o=T d=0 /a /c want ok",
+ " stat /a want 1",
+ " stat /b want 1",
+ " stat /c want 1",
+ " stat /u/v/r want 5",
+ "copy__ o=F d=0 /u/v/r /b want errExist",
+ " stat /b want 1",
+ "copy__ o=T d=0 /u/v/r /b want ok",
+ " stat /a want 1",
+ " stat /b want 5",
+ " stat /u/v/r want 5",
+ "rm-all /a want ok",
+ "rm-all /b want ok",
+ "mk-dir /u/v/w want ok",
+ "create /u/v/w/s SSSSSSSS want ok",
+ " stat /d want dir",
+ " stat /d/x want errNotExist",
+ " stat /d/y want errNotExist",
+ " stat /u/v/r want 5",
+ " stat /u/v/w/s want 8",
+ " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s",
+ "copy__ o=T d=0 /u/v /d/x want ok",
+ "copy__ o=T d=∞ /u/v /d/y want ok",
+ "rm-all /u want ok",
+ " stat /d/x want dir",
+ " stat /d/x/q want errNotExist",
+ " stat /d/x/r want errNotExist",
+ " stat /d/x/w want errNotExist",
+ " stat /d/x/w/s want errNotExist",
+ " stat /d/y want dir",
+ " stat /d/y/q want 2",
+ " stat /d/y/r want 5",
+ " stat /d/y/w want dir",
+ " stat /d/y/w/s want 8",
+ " stat /u want errNotExist",
+ " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s",
+ "copy__ o=F d=∞ /d/y /d/x want errExist",
+ }
+
+ for i, tc := range testCases {
+ tc = strings.TrimSpace(tc)
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "create":
+ parts := strings.Split(arg, " ")
+ if len(parts) != 4 || parts[2] != "want" {
+ t.Fatalf("test case #%d %q: invalid write", i, tc)
+ }
+ f, opErr := fs.OpenFile(parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if got := errStr(opErr); got != parts[3] {
+ t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3])
+ }
+ if f != nil {
+ if _, err := f.Write([]byte(parts[1])); err != nil {
+ t.Fatalf("test case #%d %q: Write: %v", i, tc, err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
+ }
+ }
+
+ case "find":
+ got, err := find(nil, fs, "/")
+ if err != nil {
+ t.Fatalf("test case #%d %q: find: %v", i, tc, err)
+ }
+ sort.Strings(got)
+ want := strings.Split(arg, " ")
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want)
+ }
+
+ case "copy__", "mk-dir", "move__", "rm-all", "stat":
+ nParts := 3
+ switch op {
+ case "copy__":
+ nParts = 6
+ case "move__":
+ nParts = 5
+ }
+ parts := strings.Split(arg, " ")
+ if len(parts) != nParts {
+ t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
+ }
+
+ got, opErr := "", error(nil)
+ switch op {
+ case "copy__":
+ depth := 0
+ if parts[1] == "d=∞" {
+ depth = infiniteDepth
+ }
+ _, opErr = copyFiles(fs, parts[2], parts[3], parts[0] == "o=T", depth, 0)
+ case "mk-dir":
+ opErr = fs.Mkdir(parts[0], 0777)
+ case "move__":
+ _, opErr = moveFiles(fs, parts[1], parts[2], parts[0] == "o=T")
+ case "rm-all":
+ opErr = fs.RemoveAll(parts[0])
+ case "stat":
+ var stat os.FileInfo
+ fileName := parts[0]
+ if stat, opErr = fs.Stat(fileName); opErr == nil {
+ if stat.IsDir() {
+ got = "dir"
+ } else {
+ got = strconv.Itoa(int(stat.Size()))
+ }
+
+ if fileName == "/" {
+ // For a Dir FileSystem, the virtual file system root maps to a
+ // real file system name like "/tmp/webdav-test012345", which does
+ // not end with "/". We skip such cases.
+ } else if statName := stat.Name(); path.Base(fileName) != statName {
+ t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q",
+ i, tc, fileName, statName)
+ }
+ }
+ }
+ if got == "" {
+ got = errStr(opErr)
+ }
+
+ if parts[len(parts)-2] != "want" {
+ t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
+ }
+ if want := parts[len(parts)-1]; got != want {
+ t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want)
+ }
+ }
+ }
+}
+
+func TestDir(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl":
+ t.Skip("see golang.org/issue/12004")
+ case "plan9":
+ t.Skip("see golang.org/issue/11453")
+ }
+
+ td, err := ioutil.TempDir("", "webdav-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(td)
+ testFS(t, Dir(td))
+}
+
+func TestMemFS(t *testing.T) {
+ testFS(t, NewMemFS())
+}
+
+func TestMemFSRoot(t *testing.T) {
+ fs := NewMemFS()
+ for i := 0; i < 5; i++ {
+ stat, err := fs.Stat("/")
+ if err != nil {
+ t.Fatalf("i=%d: Stat: %v", i, err)
+ }
+ if !stat.IsDir() {
+ t.Fatalf("i=%d: Stat.IsDir is false, want true", i)
+ }
+
+ f, err := fs.OpenFile("/", os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("i=%d: OpenFile: %v", i, err)
+ }
+ defer f.Close()
+ children, err := f.Readdir(-1)
+ if err != nil {
+ t.Fatalf("i=%d: Readdir: %v", i, err)
+ }
+ if len(children) != i {
+ t.Fatalf("i=%d: got %d children, want %d", i, len(children), i)
+ }
+
+ if _, err := f.Write(make([]byte, 1)); err == nil {
+ t.Fatalf("i=%d: Write: got nil error, want non-nil", i)
+ }
+
+ if err := fs.Mkdir(fmt.Sprintf("/dir%d", i), 0777); err != nil {
+ t.Fatalf("i=%d: Mkdir: %v", i, err)
+ }
+ }
+}
+
+func TestMemFileReaddir(t *testing.T) {
+ fs := NewMemFS()
+ if err := fs.Mkdir("/foo", 0777); err != nil {
+ t.Fatalf("Mkdir: %v", err)
+ }
+ readdir := func(count int) ([]os.FileInfo, error) {
+ f, err := fs.OpenFile("/foo", os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+ return f.Readdir(count)
+ }
+ if got, err := readdir(-1); len(got) != 0 || err != nil {
+ t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, <nil>", len(got), err)
+ }
+ if got, err := readdir(+1); len(got) != 0 || err != io.EOF {
+ t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err)
+ }
+}
+
+func TestMemFile(t *testing.T) {
+ testCases := []string{
+ "wantData ",
+ "wantSize 0",
+ "write abc",
+ "wantData abc",
+ "write de",
+ "wantData abcde",
+ "wantSize 5",
+ "write 5*x",
+ "write 4*y+2*z",
+ "write 3*st",
+ "wantData abcdexxxxxyyyyzzststst",
+ "wantSize 22",
+ "seek set 4 want 4",
+ "write EFG",
+ "wantData abcdEFGxxxyyyyzzststst",
+ "wantSize 22",
+ "seek set 2 want 2",
+ "read cdEF",
+ "read Gx",
+ "seek cur 0 want 8",
+ "seek cur 2 want 10",
+ "seek cur -1 want 9",
+ "write J",
+ "wantData abcdEFGxxJyyyyzzststst",
+ "wantSize 22",
+ "seek cur -4 want 6",
+ "write ghijk",
+ "wantData abcdEFghijkyyyzzststst",
+ "wantSize 22",
+ "read yyyz",
+ "seek cur 0 want 15",
+ "write ",
+ "seek cur 0 want 15",
+ "read ",
+ "seek cur 0 want 15",
+ "seek end -3 want 19",
+ "write ZZ",
+ "wantData abcdEFghijkyyyzzstsZZt",
+ "wantSize 22",
+ "write 4*A",
+ "wantData abcdEFghijkyyyzzstsZZAAAA",
+ "wantSize 25",
+ "seek end 0 want 25",
+ "seek end -5 want 20",
+ "read Z+4*A",
+ "write 5*B",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB",
+ "wantSize 30",
+ "seek end 10 want 40",
+ "write C",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C",
+ "wantSize 41",
+ "write D",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD",
+ "wantSize 42",
+ "seek set 43 want 43",
+ "write E",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E",
+ "wantSize 44",
+ "seek set 0 want 0",
+ "write 5*123456789_",
+ "wantData 123456789_123456789_123456789_123456789_123456789_",
+ "wantSize 50",
+ "seek cur 0 want 50",
+ "seek cur -99 want err",
+ }
+
+ const filename = "/foo"
+ fs := NewMemFS()
+ f, err := fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+
+ for i, tc := range testCases {
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+
+ // Expand an arg like "3*a+2*b" to "aaabb".
+ parts := strings.Split(arg, "+")
+ for j, part := range parts {
+ if k := strings.IndexByte(part, '*'); k >= 0 {
+ repeatCount, repeatStr := part[:k], part[k+1:]
+ n, err := strconv.Atoi(repeatCount)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount)
+ }
+ parts[j] = strings.Repeat(repeatStr, n)
+ }
+ }
+ arg = strings.Join(parts, "")
+
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "read":
+ buf := make([]byte, len(arg))
+ if _, err := io.ReadFull(f, buf); err != nil {
+ t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err)
+ }
+ if got := string(buf); got != arg {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
+ }
+
+ case "seek":
+ parts := strings.Split(arg, " ")
+ if len(parts) != 4 {
+ t.Fatalf("test case #%d %q: invalid seek", i, tc)
+ }
+
+ whence := 0
+ switch parts[0] {
+ default:
+ t.Fatalf("test case #%d %q: invalid seek whence", i, tc)
+ case "set":
+ whence = os.SEEK_SET
+ case "cur":
+ whence = os.SEEK_CUR
+ case "end":
+ whence = os.SEEK_END
+ }
+ offset, err := strconv.Atoi(parts[1])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1])
+ }
+
+ if parts[2] != "want" {
+ t.Fatalf("test case #%d %q: invalid seek", i, tc)
+ }
+ if parts[3] == "err" {
+ _, err := f.Seek(int64(offset), whence)
+ if err == nil {
+ t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc)
+ }
+ } else {
+ got, err := f.Seek(int64(offset), whence)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Seek: %v", i, tc, err)
+ }
+ want, err := strconv.Atoi(parts[3])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3])
+ }
+ if got != int64(want) {
+ t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
+ }
+ }
+
+ case "write":
+ n, err := f.Write([]byte(arg))
+ if err != nil {
+ t.Fatalf("test case #%d %q: write: %v", i, tc, err)
+ }
+ if n != len(arg) {
+ t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg))
+ }
+
+ case "wantData":
+ g, err := fs.OpenFile(filename, os.O_RDONLY, 0666)
+ if err != nil {
+ t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err)
+ }
+ gotBytes, err := ioutil.ReadAll(g)
+ if err != nil {
+ t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err)
+ }
+ for i, c := range gotBytes {
+ if c == '\x00' {
+ gotBytes[i] = '.'
+ }
+ }
+ got := string(gotBytes)
+ if got != arg {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
+ }
+ if err := g.Close(); err != nil {
+ t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
+ }
+
+ case "wantSize":
+ n, err := strconv.Atoi(arg)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg)
+ }
+ fi, err := fs.Stat(filename)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Stat: %v", i, tc, err)
+ }
+ if got, want := fi.Size(), int64(n); got != want {
+ t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
+ }
+ }
+ }
+}
+
+// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a
+// memFile doesn't allocate a new buffer for each of those N times. Otherwise,
+// calling io.Copy(aMemFile, src) is likely to have quadratic complexity.
+func TestMemFileWriteAllocs(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo allocates here")
+ }
+ fs := NewMemFS()
+ f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+
+ xxx := make([]byte, 1024)
+ for i := range xxx {
+ xxx[i] = 'x'
+ }
+
+ a := testing.AllocsPerRun(100, func() {
+ f.Write(xxx)
+ })
+ // AllocsPerRun returns an integral value, so we compare the rounded-down
+ // number to zero.
+ if a > 0 {
+ t.Fatalf("%v allocs per run, want 0", a)
+ }
+}
+
+func BenchmarkMemFileWrite(b *testing.B) {
+ fs := NewMemFS()
+ xxx := make([]byte, 1024)
+ for i := range xxx {
+ xxx[i] = 'x'
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ b.Fatalf("OpenFile: %v", err)
+ }
+ for j := 0; j < 100; j++ {
+ f.Write(xxx)
+ }
+ if err := f.Close(); err != nil {
+ b.Fatalf("Close: %v", err)
+ }
+ if err := fs.RemoveAll("/xxx"); err != nil {
+ b.Fatalf("RemoveAll: %v", err)
+ }
+ }
+}
+
+func TestCopyMoveProps(t *testing.T) {
+ fs := NewMemFS()
+ create := func(name string) error {
+ f, err := fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ _, wErr := f.Write([]byte("contents"))
+ cErr := f.Close()
+ if wErr != nil {
+ return wErr
+ }
+ return cErr
+ }
+ patch := func(name string, patches ...Proppatch) error {
+ f, err := fs.OpenFile(name, os.O_RDWR, 0666)
+ if err != nil {
+ return err
+ }
+ _, pErr := f.(DeadPropsHolder).Patch(patches)
+ cErr := f.Close()
+ if pErr != nil {
+ return pErr
+ }
+ return cErr
+ }
+ props := func(name string) (map[xml.Name]Property, error) {
+ f, err := fs.OpenFile(name, os.O_RDWR, 0666)
+ if err != nil {
+ return nil, err
+ }
+ m, pErr := f.(DeadPropsHolder).DeadProps()
+ cErr := f.Close()
+ if pErr != nil {
+ return nil, pErr
+ }
+ if cErr != nil {
+ return nil, cErr
+ }
+ return m, nil
+ }
+
+ p0 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "boat"},
+ InnerXML: []byte("pea-green"),
+ }
+ p1 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "ring"},
+ InnerXML: []byte("1 shilling"),
+ }
+ p2 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "spoon"},
+ InnerXML: []byte("runcible"),
+ }
+ p3 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "moon"},
+ InnerXML: []byte("light"),
+ }
+
+ if err := create("/src"); err != nil {
+ t.Fatalf("create /src: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil {
+ t.Fatalf("patch /src +p0 +p1: %v", err)
+ }
+ if _, err := copyFiles(fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil {
+ t.Fatalf("copyFiles /src /tmp: %v", err)
+ }
+ if _, err := moveFiles(fs, "/tmp", "/dst", true); err != nil {
+ t.Fatalf("moveFiles /tmp /dst: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil {
+ t.Fatalf("patch /src -p0: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil {
+ t.Fatalf("patch /src +p2: %v", err)
+ }
+ if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil {
+ t.Fatalf("patch /dst -p1: %v", err)
+ }
+ if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil {
+ t.Fatalf("patch /dst +p3: %v", err)
+ }
+
+ gotSrc, err := props("/src")
+ if err != nil {
+ t.Fatalf("props /src: %v", err)
+ }
+ wantSrc := map[xml.Name]Property{
+ p1.XMLName: p1,
+ p2.XMLName: p2,
+ }
+ if !reflect.DeepEqual(gotSrc, wantSrc) {
+ t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc)
+ }
+
+ gotDst, err := props("/dst")
+ if err != nil {
+ t.Fatalf("props /dst: %v", err)
+ }
+ wantDst := map[xml.Name]Property{
+ p0.XMLName: p0,
+ p3.XMLName: p3,
+ }
+ if !reflect.DeepEqual(gotDst, wantDst) {
+ t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst)
+ }
+}
+
+func TestWalkFS(t *testing.T) {
+ testCases := []struct {
+ desc string
+ buildfs []string
+ startAt string
+ depth int
+ walkFn filepath.WalkFunc
+ want []string
+ }{{
+ "just root",
+ []string{},
+ "/",
+ infiniteDepth,
+ nil,
+ []string{
+ "/",
+ },
+ }, {
+ "infinite walk from root",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/",
+ infiniteDepth,
+ nil,
+ []string{
+ "/",
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/d",
+ "/e",
+ "/f",
+ },
+ }, {
+ "infinite walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/a",
+ infiniteDepth,
+ nil,
+ []string{
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/d",
+ },
+ }, {
+ "depth 1 walk from root",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/",
+ 1,
+ nil,
+ []string{
+ "/",
+ "/a",
+ "/e",
+ "/f",
+ },
+ }, {
+ "depth 1 walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ },
+ "/a/b",
+ 1,
+ nil,
+ []string{
+ "/a/b",
+ "/a/b/c",
+ "/a/b/g",
+ },
+ }, {
+ "depth 0 walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ },
+ "/a/b",
+ 0,
+ nil,
+ []string{
+ "/a/b",
+ },
+ }, {
+ "infinite walk from file",
+ []string{
+ "mkdir /a",
+ "touch /a/b",
+ "touch /a/c",
+ },
+ "/a/b",
+ 0,
+ nil,
+ []string{
+ "/a/b",
+ },
+ }, {
+ "infinite walk with skipped subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ "touch /a/b/z",
+ },
+ "/",
+ infiniteDepth,
+ func(path string, info os.FileInfo, err error) error {
+ if path == "/a/b/g" {
+ return filepath.SkipDir
+ }
+ return nil
+ },
+ []string{
+ "/",
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/b/z",
+ },
+ }}
+ for _, tc := range testCases {
+ fs, err := buildTestFS(tc.buildfs)
+ if err != nil {
+ t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
+ }
+ var got []string
+ traceFn := func(path string, info os.FileInfo, err error) error {
+ if tc.walkFn != nil {
+ err = tc.walkFn(path, info, err)
+ if err != nil {
+ return err
+ }
+ }
+ got = append(got, path)
+ return nil
+ }
+ fi, err := fs.Stat(tc.startAt)
+ if err != nil {
+ t.Fatalf("%s: cannot stat: %v", tc.desc, err)
+ }
+ err = walkFS(fs, tc.depth, tc.startAt, fi, traceFn)
+ if err != nil {
+ t.Errorf("%s:\ngot error %v, want nil", tc.desc, err)
+ continue
+ }
+ sort.Strings(got)
+ sort.Strings(tc.want)
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func buildTestFS(buildfs []string) (FileSystem, error) {
+ // TODO: Could this be merged with the build logic in TestFS?
+
+ fs := NewMemFS()
+ for _, b := range buildfs {
+ op := strings.Split(b, " ")
+ switch op[0] {
+ case "mkdir":
+ err := fs.Mkdir(op[1], os.ModeDir|0777)
+ if err != nil {
+ return nil, err
+ }
+ case "touch":
+ f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE, 0666)
+ if err != nil {
+ return nil, err
+ }
+ f.Close()
+ case "write":
+ f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return nil, err
+ }
+ _, err = f.Write([]byte(op[2]))
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unknown file operation %q", op[0])
+ }
+ }
+ return fs, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go
new file mode 100644
index 000000000..416e81cdf
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/if.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The If header is covered by Section 10.4.
+// http://www.webdav.org/specs/rfc4918.html#HEADER_If
+
+import (
+ "strings"
+)
+
+// ifHeader is a disjunction (OR) of ifLists.
+type ifHeader struct {
+ lists []ifList
+}
+
+// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
+type ifList struct {
+ resourceTag string
+ conditions []Condition
+}
+
+// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
+// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
+// returned by req.Header.Get("If") for a http.Request req.
+func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
+ s := strings.TrimSpace(httpHeader)
+ switch tokenType, _, _ := lex(s); tokenType {
+ case '(':
+ return parseNoTagLists(s)
+ case angleTokenType:
+ return parseTaggedLists(s)
+ default:
+ return ifHeader{}, false
+ }
+}
+
+func parseNoTagLists(s string) (h ifHeader, ok bool) {
+ for {
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ }
+}
+
+func parseTaggedLists(s string) (h ifHeader, ok bool) {
+ resourceTag, n := "", 0
+ for first := true; ; first = false {
+ tokenType, tokenStr, remaining := lex(s)
+ switch tokenType {
+ case angleTokenType:
+ if !first && n == 0 {
+ return ifHeader{}, false
+ }
+ resourceTag, n = tokenStr, 0
+ s = remaining
+ case '(':
+ n++
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ l.resourceTag = resourceTag
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ default:
+ return ifHeader{}, false
+ }
+ }
+}
+
+func parseList(s string) (l ifList, remaining string, ok bool) {
+ tokenType, _, s := lex(s)
+ if tokenType != '(' {
+ return ifList{}, "", false
+ }
+ for {
+ tokenType, _, remaining = lex(s)
+ if tokenType == ')' {
+ if len(l.conditions) == 0 {
+ return ifList{}, "", false
+ }
+ return l, remaining, true
+ }
+ c, remaining, ok := parseCondition(s)
+ if !ok {
+ return ifList{}, "", false
+ }
+ l.conditions = append(l.conditions, c)
+ s = remaining
+ }
+}
+
+func parseCondition(s string) (c Condition, remaining string, ok bool) {
+ tokenType, tokenStr, s := lex(s)
+ if tokenType == notTokenType {
+ c.Not = true
+ tokenType, tokenStr, s = lex(s)
+ }
+ switch tokenType {
+ case strTokenType, angleTokenType:
+ c.Token = tokenStr
+ case squareTokenType:
+ c.ETag = tokenStr
+ default:
+ return Condition{}, "", false
+ }
+ return c, s, true
+}
+
+// Single-rune tokens like '(' or ')' have a token type equal to their rune.
+// All other tokens have a negative token type.
+const (
+ errTokenType = rune(-1)
+ eofTokenType = rune(-2)
+ strTokenType = rune(-3)
+ notTokenType = rune(-4)
+ angleTokenType = rune(-5)
+ squareTokenType = rune(-6)
+)
+
+func lex(s string) (tokenType rune, tokenStr string, remaining string) {
+ // The net/textproto Reader that parses the HTTP header will collapse
+ // Linear White Space that spans multiple "\r\n" lines to a single " ",
+ // so we don't need to look for '\r' or '\n'.
+ for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return eofTokenType, "", ""
+ }
+ i := 0
+loop:
+ for ; i < len(s); i++ {
+ switch s[i] {
+ case '\t', ' ', '(', ')', '<', '>', '[', ']':
+ break loop
+ }
+ }
+
+ if i != 0 {
+ tokenStr, remaining = s[:i], s[i:]
+ if tokenStr == "Not" {
+ return notTokenType, "", remaining
+ }
+ return strTokenType, tokenStr, remaining
+ }
+
+ j := 0
+ switch s[0] {
+ case '<':
+ j, tokenType = strings.IndexByte(s, '>'), angleTokenType
+ case '[':
+ j, tokenType = strings.IndexByte(s, ']'), squareTokenType
+ default:
+ return rune(s[0]), "", s[1:]
+ }
+ if j < 0 {
+ return errTokenType, "", ""
+ }
+ return tokenType, s[1:j], s[j+1:]
+}
diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go
new file mode 100644
index 000000000..aad61a401
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/if_test.go
@@ -0,0 +1,322 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestParseIfHeader(t *testing.T) {
+ // The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ input string
+ want ifHeader
+ }{{
+ "bad: empty",
+ ``,
+ ifHeader{},
+ }, {
+ "bad: no parens",
+ `foobar`,
+ ifHeader{},
+ }, {
+ "bad: empty list #1",
+ `()`,
+ ifHeader{},
+ }, {
+ "bad: empty list #2",
+ `(a) (b c) () (d)`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #1",
+ `<foo>`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #2",
+ `<foo> <bar> (a)`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #3",
+ `<foo> (a) (b) <bar>`,
+ ifHeader{},
+ }, {
+ "bad: no-tag-list followed by tagged-list",
+ `(a) (b) <foo> (c)`,
+ ifHeader{},
+ }, {
+ "bad: unfinished list",
+ `(a`,
+ ifHeader{},
+ }, {
+ "bad: unfinished ETag",
+ `([b`,
+ ifHeader{},
+ }, {
+ "bad: unfinished Notted list",
+ `(Not a`,
+ ifHeader{},
+ }, {
+ "bad: double Not",
+ `(Not Not a)`,
+ ifHeader{},
+ }, {
+ "good: one list with a Token",
+ `(a)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `a`,
+ }},
+ }},
+ },
+ }, {
+ "good: one list with an ETag",
+ `([a])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ ETag: `a`,
+ }},
+ }},
+ },
+ }, {
+ "good: one list with three Nots",
+ `(Not a Not b Not [d])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `a`,
+ }, {
+ Not: true,
+ Token: `b`,
+ }, {
+ Not: true,
+ ETag: `d`,
+ }},
+ }},
+ },
+ }, {
+ "good: two lists",
+ `(a) (b)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `a`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Token: `b`,
+ }},
+ }},
+ },
+ }, {
+ "good: two Notted lists",
+ `(Not a) (Not b)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `a`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Not: true,
+ Token: `b`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.1",
+ `<http://www.example.com/users/f/fielding/index.html>
+ (<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://www.example.com/users/f/fielding/index.html`,
+ conditions: []Condition{{
+ Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #1",
+ `(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #2",
+ `<http://example.com/locked/>
+ (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://example.com/locked/`,
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #3",
+ `<http://example.com/locked/member>
+ (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://example.com/locked/member`,
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 9.9.6",
+ `(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>)
+ (<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,
+ }},
+ }},
+ },
+ }, {
+ "section 9.10.8",
+ `(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.6",
+ `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ ["I am an ETag"])
+ (["I am another ETag"])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ ETag: `"I am an ETag"`,
+ }},
+ }, {
+ conditions: []Condition{{
+ ETag: `"I am another ETag"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.7",
+ `(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ <urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.8",
+ `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)
+ (Not <DAV:no-lock>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Not: true,
+ Token: `DAV:no-lock`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.9",
+ `</resource1>
+ (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ [W/"A weak ETag"]) (["strong ETag"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/resource1`,
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ ETag: `W/"A weak ETag"`,
+ }},
+ }, {
+ resourceTag: `/resource1`,
+ conditions: []Condition{{
+ ETag: `"strong ETag"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.10",
+ `<http://www.example.com/specs/>
+ (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://www.example.com/specs/`,
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.11 #1",
+ `</specs/rfc2518.doc> (["4217"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/specs/rfc2518.doc`,
+ conditions: []Condition{{
+ ETag: `"4217"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.11 #2",
+ `</specs/rfc2518.doc> (Not ["4217"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/specs/rfc2518.doc`,
+ conditions: []Condition{{
+ Not: true,
+ ETag: `"4217"`,
+ }},
+ }},
+ },
+ }}
+
+ for _, tc := range testCases {
+ got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1))
+ if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {
+ t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok)
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README
new file mode 100644
index 000000000..89656f489
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/README
@@ -0,0 +1,11 @@
+This is a fork of the encoding/xml package at ca1d6c4, the last commit before
+https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
+space behavior" made late in the lead-up to the Go 1.5 release.
+
+The list of encoding/xml changes is at
+https://go.googlesource.com/go/+log/master/src/encoding/xml
+
+This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
+released.
+
+See http://golang.org/issue/11841
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go
new file mode 100644
index 000000000..a71284312
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import "time"
+
+var atomValue = &Feed{
+ XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
+ Title: "Example Feed",
+ Link: []Link{{Href: "http://example.org/"}},
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Author: Person{Name: "John Doe"},
+ Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
+
+ Entry: []Entry{
+ {
+ Title: "Atom-Powered Robots Run Amok",
+ Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
+ Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Summary: NewText("Some text."),
+ },
+ },
+}
+
+var atomXml = `` +
+ `<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` +
+ `<title>Example Feed</title>` +
+ `<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +
+ `<link href="http://example.org/"></link>` +
+ `<author><name>John Doe</name><uri></uri><email></email></author>` +
+ `<entry>` +
+ `<title>Atom-Powered Robots Run Amok</title>` +
+ `<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +
+ `<link href="http://example.org/2003/12/13/atom03"></link>` +
+ `<updated>2003-12-13T18:30:02Z</updated>` +
+ `<author><name></name><uri></uri><email></email></author>` +
+ `<summary>Some text.</summary>` +
+ `</entry>` +
+ `</feed>`
+
+func ParseTime(str string) time.Time {
+ t, err := time.Parse(time.RFC3339, str)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+func NewText(text string) Text {
+ return Text{
+ Body: text,
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go
new file mode 100644
index 000000000..becedd583
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go
@@ -0,0 +1,151 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml_test
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+)
+
+func ExampleMarshalIndent() {
+ type Address struct {
+ City, State string
+ }
+ type Person struct {
+ XMLName xml.Name `xml:"person"`
+ Id int `xml:"id,attr"`
+ FirstName string `xml:"name>first"`
+ LastName string `xml:"name>last"`
+ Age int `xml:"age"`
+ Height float32 `xml:"height,omitempty"`
+ Married bool
+ Address
+ Comment string `xml:",comment"`
+ }
+
+ v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
+ v.Comment = " Need more details. "
+ v.Address = Address{"Hanga Roa", "Easter Island"}
+
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+
+ os.Stdout.Write(output)
+ // Output:
+ // <person id="13">
+ // <name>
+ // <first>John</first>
+ // <last>Doe</last>
+ // </name>
+ // <age>42</age>
+ // <Married>false</Married>
+ // <City>Hanga Roa</City>
+ // <State>Easter Island</State>
+ // <!-- Need more details. -->
+ // </person>
+}
+
+func ExampleEncoder() {
+ type Address struct {
+ City, State string
+ }
+ type Person struct {
+ XMLName xml.Name `xml:"person"`
+ Id int `xml:"id,attr"`
+ FirstName string `xml:"name>first"`
+ LastName string `xml:"name>last"`
+ Age int `xml:"age"`
+ Height float32 `xml:"height,omitempty"`
+ Married bool
+ Address
+ Comment string `xml:",comment"`
+ }
+
+ v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
+ v.Comment = " Need more details. "
+ v.Address = Address{"Hanga Roa", "Easter Island"}
+
+ enc := xml.NewEncoder(os.Stdout)
+ enc.Indent(" ", " ")
+ if err := enc.Encode(v); err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+
+ // Output:
+ // <person id="13">
+ // <name>
+ // <first>John</first>
+ // <last>Doe</last>
+ // </name>
+ // <age>42</age>
+ // <Married>false</Married>
+ // <City>Hanga Roa</City>
+ // <State>Easter Island</State>
+ // <!-- Need more details. -->
+ // </person>
+}
+
+// This example demonstrates unmarshaling an XML excerpt into a value with
+// some preset fields. Note that the Phone field isn't modified and that
+// the XML <Company> element is ignored. Also, the Groups field is assigned
+// considering the element path provided in its tag.
+func ExampleUnmarshal() {
+ type Email struct {
+ Where string `xml:"where,attr"`
+ Addr string
+ }
+ type Address struct {
+ City, State string
+ }
+ type Result struct {
+ XMLName xml.Name `xml:"Person"`
+ Name string `xml:"FullName"`
+ Phone string
+ Email []Email
+ Groups []string `xml:"Group>Value"`
+ Address
+ }
+ v := Result{Name: "none", Phone: "none"}
+
+ data := `
+ <Person>
+ <FullName>Grace R. Emlin</FullName>
+ <Company>Example Inc.</Company>
+ <Email where="home">
+ <Addr>gre@example.com</Addr>
+ </Email>
+ <Email where='work'>
+ <Addr>gre@work.com</Addr>
+ </Email>
+ <Group>
+ <Value>Friends</Value>
+ <Value>Squash</Value>
+ </Group>
+ <City>Hanga Roa</City>
+ <State>Easter Island</State>
+ </Person>
+ `
+ err := xml.Unmarshal([]byte(data), &v)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ return
+ }
+ fmt.Printf("XMLName: %#v\n", v.XMLName)
+ fmt.Printf("Name: %q\n", v.Name)
+ fmt.Printf("Phone: %q\n", v.Phone)
+ fmt.Printf("Email: %v\n", v.Email)
+ fmt.Printf("Groups: %v\n", v.Groups)
+ fmt.Printf("Address: %v\n", v.Address)
+ // Output:
+ // XMLName: xml.Name{Space:"", Local:"Person"}
+ // Name: "Grace R. Emlin"
+ // Phone: "none"
+ // Email: [{home gre@example.com} {work gre@work.com}]
+ // Groups: [Friends Squash]
+ // Address: {Hanga Roa Easter Island}
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
new file mode 100644
index 000000000..3c3b6aca5
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
@@ -0,0 +1,1223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ // A generic XML header suitable for use with the output of Marshal.
+ // This is not automatically added to any output of this package,
+ // it is provided as a convenience.
+ Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
+)
+
+// Marshal returns the XML encoding of v.
+//
+// Marshal handles an array or slice by marshalling each of the elements.
+// Marshal handles a pointer by marshalling the value it points at or, if the
+// pointer is nil, by writing nothing. Marshal handles an interface value by
+// marshalling the value it contains or, if the interface value is nil, by
+// writing nothing. Marshal handles all other data by writing one or more XML
+// elements containing the data.
+//
+// The name for the XML elements is taken from, in order of preference:
+// - the tag on the XMLName field, if the data is a struct
+// - the value of the XMLName field of type xml.Name
+// - the tag of the struct field used to obtain the data
+// - the name of the struct field used to obtain the data
+// - the name of the marshalled type
+//
+// The XML element for a struct contains marshalled elements for each of the
+// exported fields of the struct, with these exceptions:
+// - the XMLName field, described above, is omitted.
+// - a field with tag "-" is omitted.
+// - a field with tag "name,attr" becomes an attribute with
+// the given name in the XML element.
+// - a field with tag ",attr" becomes an attribute with the
+// field name in the XML element.
+// - a field with tag ",chardata" is written as character data,
+// not as an XML element.
+// - a field with tag ",innerxml" is written verbatim, not subject
+// to the usual marshalling procedure.
+// - a field with tag ",comment" is written as an XML comment, not
+// subject to the usual marshalling procedure. It must not contain
+// the "--" string within it.
+// - a field with a tag including the "omitempty" option is omitted
+// if the field value is empty. The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or
+// string of length zero.
+// - an anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// If a field uses a tag "a>b>c", then the element c will be nested inside
+// parent elements a and b. Fields that appear next to each other that name
+// the same parent will be enclosed in one XML element.
+//
+// See MarshalIndent for an example.
+//
+// Marshal will return an error if asked to marshal a channel, function, or map.
+func Marshal(v interface{}) ([]byte, error) {
+ var b bytes.Buffer
+ if err := NewEncoder(&b).Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// Marshaler is the interface implemented by objects that can marshal
+// themselves into valid XML elements.
+//
+// MarshalXML encodes the receiver as zero or more XML elements.
+// By convention, arrays or slices are typically encoded as a sequence
+// of elements, one per entry.
+// Using start as the element tag is not required, but doing so
+// will enable Unmarshal to match the XML elements to the correct
+// struct field.
+// One common implementation strategy is to construct a separate
+// value with a layout corresponding to the desired XML and then
+// to encode it using e.EncodeElement.
+// Another common strategy is to use repeated calls to e.EncodeToken
+// to generate the XML output one token at a time.
+// The sequence of encoded tokens must make up zero or more valid
+// XML elements.
+type Marshaler interface {
+ MarshalXML(e *Encoder, start StartElement) error
+}
+
+// MarshalerAttr is the interface implemented by objects that can marshal
+// themselves into valid XML attributes.
+//
+// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.
+// Using name as the attribute name is not required, but doing so
+// will enable Unmarshal to match the attribute to the correct
+// struct field.
+// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute
+// will be generated in the output.
+// MarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type MarshalerAttr interface {
+ MarshalXMLAttr(name Name) (Attr, error)
+}
+
+// MarshalIndent works like Marshal, but each XML element begins on a new
+// indented line that starts with prefix and is followed by one or more
+// copies of indent according to the nesting depth.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ var b bytes.Buffer
+ enc := NewEncoder(&b)
+ enc.Indent(prefix, indent)
+ if err := enc.Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// An Encoder writes XML data to an output stream.
+type Encoder struct {
+ p printer
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{printer{Writer: bufio.NewWriter(w)}}
+ e.p.encoder = e
+ return e
+}
+
+// Indent sets the encoder to generate XML in which each element
+// begins on a new indented line that starts with prefix and is followed by
+// one or more copies of indent according to the nesting depth.
+func (enc *Encoder) Indent(prefix, indent string) {
+ enc.p.prefix = prefix
+ enc.p.indent = indent
+}
+
+// Encode writes the XML encoding of v to the stream.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// Encode calls Flush before returning.
+func (enc *Encoder) Encode(v interface{}) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+// EncodeElement writes the XML encoding of v to the stream,
+// using start as the outermost tag in the encoding.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// EncodeElement calls Flush before returning.
+func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+var (
+ begComment = []byte("<!--")
+ endComment = []byte("-->")
+ endProcInst = []byte("?>")
+ endDirective = []byte(">")
+)
+
+// EncodeToken writes the given XML token to the stream.
+// It returns an error if StartElement and EndElement tokens are not
+// properly matched.
+//
+// EncodeToken does not call Flush, because usually it is part of a
+// larger operation such as Encode or EncodeElement (or a custom
+// Marshaler's MarshalXML invoked during those), and those will call
+// Flush when finished. Callers that create an Encoder and then invoke
+// EncodeToken directly, without using Encode or EncodeElement, need to
+// call Flush when finished to ensure that the XML is written to the
+// underlying writer.
+//
+// EncodeToken allows writing a ProcInst with Target set to "xml" only
+// as the first token in the stream.
+//
+// When encoding a StartElement holding an XML namespace prefix
+// declaration for a prefix that is not already declared, contained
+// elements (including the StartElement itself) will use the declared
+// prefix when encoding names with matching namespace URIs.
+func (enc *Encoder) EncodeToken(t Token) error {
+
+ p := &enc.p
+ switch t := t.(type) {
+ case StartElement:
+ if err := p.writeStart(&t); err != nil {
+ return err
+ }
+ case EndElement:
+ if err := p.writeEnd(t.Name); err != nil {
+ return err
+ }
+ case CharData:
+ escapeText(p, t, false)
+ case Comment:
+ if bytes.Contains(t, endComment) {
+ return fmt.Errorf("xml: EncodeToken of Comment containing --> marker")
+ }
+ p.WriteString("<!--")
+ p.Write(t)
+ p.WriteString("-->")
+ return p.cachedWriteError()
+ case ProcInst:
+ // First token to be encoded which is also a ProcInst with target of xml
+ // is the xml declaration. The only ProcInst where target of xml is allowed.
+ if t.Target == "xml" && p.Buffered() != 0 {
+ return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded")
+ }
+ if !isNameString(t.Target) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target")
+ }
+ if bytes.Contains(t.Inst, endProcInst) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker")
+ }
+ p.WriteString("<?")
+ p.WriteString(t.Target)
+ if len(t.Inst) > 0 {
+ p.WriteByte(' ')
+ p.Write(t.Inst)
+ }
+ p.WriteString("?>")
+ case Directive:
+ if !isValidDirective(t) {
+ return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers")
+ }
+ p.WriteString("<!")
+ p.Write(t)
+ p.WriteString(">")
+ default:
+ return fmt.Errorf("xml: EncodeToken of invalid token type")
+
+ }
+ return p.cachedWriteError()
+}
+
+// isValidDirective reports whether dir is a valid directive text,
+// meaning angle brackets are matched, ignoring comments and strings.
+func isValidDirective(dir Directive) bool {
+ var (
+ depth int
+ inquote uint8
+ incomment bool
+ )
+ for i, c := range dir {
+ switch {
+ case incomment:
+ if c == '>' {
+ if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) {
+ incomment = false
+ }
+ }
+ // Just ignore anything in comment
+ case inquote != 0:
+ if c == inquote {
+ inquote = 0
+ }
+ // Just ignore anything within quotes
+ case c == '\'' || c == '"':
+ inquote = c
+ case c == '<':
+ if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) {
+ incomment = true
+ } else {
+ depth++
+ }
+ case c == '>':
+ if depth == 0 {
+ return false
+ }
+ depth--
+ }
+ }
+ return depth == 0 && inquote == 0 && !incomment
+}
+
+// Flush flushes any buffered XML to the underlying writer.
+// See the EncodeToken documentation for details about when it is necessary.
+func (enc *Encoder) Flush() error {
+ return enc.p.Flush()
+}
+
+type printer struct {
+ *bufio.Writer
+ encoder *Encoder
+ seq int
+ indent string
+ prefix string
+ depth int
+ indentedIn bool
+ putNewline bool
+ defaultNS string
+ attrNS map[string]string // map prefix -> name space
+ attrPrefix map[string]string // map name space -> prefix
+ prefixes []printerPrefix
+ tags []Name
+}
+
+// printerPrefix holds a namespace undo record.
+// When an element is popped, the prefix record
+// is set back to the recorded URL. The empty
+// prefix records the URL for the default name space.
+//
+// The start of an element is recorded with an element
+// that has mark=true.
+type printerPrefix struct {
+ prefix string
+ url string
+ mark bool
+}
+
+func (p *printer) prefixForNS(url string, isAttr bool) string {
+ // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml"
+ // and must be referred to that way.
+ // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns",
+ // but users should not be trying to use that one directly - that's our job.)
+ if url == xmlURL {
+ return "xml"
+ }
+ if !isAttr && url == p.defaultNS {
+ // We can use the default name space.
+ return ""
+ }
+ return p.attrPrefix[url]
+}
+
+// defineNS pushes any namespace definition found in the given attribute.
+// If ignoreNonEmptyDefault is true, an xmlns="nonempty"
+// attribute will be ignored.
+func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error {
+ var prefix string
+ if attr.Name.Local == "xmlns" {
+ if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL {
+ return fmt.Errorf("xml: cannot redefine xmlns attribute prefix")
+ }
+ } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" {
+ prefix = attr.Name.Local
+ if attr.Value == "" {
+ // Technically, an empty XML namespace is allowed for an attribute.
+ // From http://www.w3.org/TR/xml-names11/#scoping-defaulting:
+ //
+ // The attribute value in a namespace declaration for a prefix may be
+ // empty. This has the effect, within the scope of the declaration, of removing
+ // any association of the prefix with a namespace name.
+ //
+ // However our namespace prefixes here are used only as hints. There's
+ // no need to respect the removal of a namespace prefix, so we ignore it.
+ return nil
+ }
+ } else {
+ // Ignore: it's not a namespace definition
+ return nil
+ }
+ if prefix == "" {
+ if attr.Value == p.defaultNS {
+ // No need for redefinition.
+ return nil
+ }
+ if attr.Value != "" && ignoreNonEmptyDefault {
+ // We have an xmlns="..." value but
+ // it can't define a name space in this context,
+ // probably because the element has an empty
+ // name space. In this case, we just ignore
+ // the name space declaration.
+ return nil
+ }
+ } else if _, ok := p.attrPrefix[attr.Value]; ok {
+ // There's already a prefix for the given name space,
+ // so use that. This prevents us from
+ // having two prefixes for the same name space
+ // so attrNS and attrPrefix can remain bijective.
+ return nil
+ }
+ p.pushPrefix(prefix, attr.Value)
+ return nil
+}
+
+// createNSPrefix creates a name space prefix attribute
+// to use for the given name space, defining a new prefix
+// if necessary.
+// If isAttr is true, the prefix is to be created for an attribute
+// prefix, which means that the default name space cannot
+// be used.
+func (p *printer) createNSPrefix(url string, isAttr bool) {
+ if _, ok := p.attrPrefix[url]; ok {
+ // We already have a prefix for the given URL.
+ return
+ }
+ switch {
+ case !isAttr && url == p.defaultNS:
+ // We can use the default name space.
+ return
+ case url == "":
+ // The only way we can encode names in the empty
+ // name space is by using the default name space,
+ // so we must use that.
+ if p.defaultNS != "" {
+ // The default namespace is non-empty, so we
+ // need to set it to empty.
+ p.pushPrefix("", "")
+ }
+ return
+ case url == xmlURL:
+ return
+ }
+ // TODO If the URL is an existing prefix, we could
+ // use it as is. That would enable the
+ // marshaling of elements that had been unmarshaled
+ // and with a name space prefix that was not found.
+ // although technically it would be incorrect.
+
+ // Pick a name. We try to use the final element of the path
+ // but fall back to _.
+ prefix := strings.TrimRight(url, "/")
+ if i := strings.LastIndex(prefix, "/"); i >= 0 {
+ prefix = prefix[i+1:]
+ }
+ if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") {
+ prefix = "_"
+ }
+ if strings.HasPrefix(prefix, "xml") {
+ // xmlanything is reserved.
+ prefix = "_" + prefix
+ }
+ if p.attrNS[prefix] != "" {
+ // Name is taken. Find a better one.
+ for p.seq++; ; p.seq++ {
+ if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" {
+ prefix = id
+ break
+ }
+ }
+ }
+
+ p.pushPrefix(prefix, url)
+}
+
+// writeNamespaces writes xmlns attributes for all the
+// namespace prefixes that have been defined in
+// the current element.
+func (p *printer) writeNamespaces() {
+ for i := len(p.prefixes) - 1; i >= 0; i-- {
+ prefix := p.prefixes[i]
+ if prefix.mark {
+ return
+ }
+ p.WriteString(" ")
+ if prefix.prefix == "" {
+ // Default name space.
+ p.WriteString(`xmlns="`)
+ } else {
+ p.WriteString("xmlns:")
+ p.WriteString(prefix.prefix)
+ p.WriteString(`="`)
+ }
+ EscapeText(p, []byte(p.nsForPrefix(prefix.prefix)))
+ p.WriteString(`"`)
+ }
+}
+
+// pushPrefix pushes a new prefix on the prefix stack
+// without checking to see if it is already defined.
+func (p *printer) pushPrefix(prefix, url string) {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ prefix: prefix,
+ url: p.nsForPrefix(prefix),
+ })
+ p.setAttrPrefix(prefix, url)
+}
+
+// nsForPrefix returns the name space for the given
+// prefix. Note that this is not valid for the
+// empty attribute prefix, which always has an empty
+// name space.
+func (p *printer) nsForPrefix(prefix string) string {
+ if prefix == "" {
+ return p.defaultNS
+ }
+ return p.attrNS[prefix]
+}
+
+// markPrefix marks the start of an element on the prefix
+// stack.
+func (p *printer) markPrefix() {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ mark: true,
+ })
+}
+
+// popPrefix pops all defined prefixes for the current
+// element.
+func (p *printer) popPrefix() {
+ for len(p.prefixes) > 0 {
+ prefix := p.prefixes[len(p.prefixes)-1]
+ p.prefixes = p.prefixes[:len(p.prefixes)-1]
+ if prefix.mark {
+ break
+ }
+ p.setAttrPrefix(prefix.prefix, prefix.url)
+ }
+}
+
+// setAttrPrefix sets an attribute name space prefix.
+// If url is empty, the attribute is removed.
+// If prefix is empty, the default name space is set.
+func (p *printer) setAttrPrefix(prefix, url string) {
+ if prefix == "" {
+ p.defaultNS = url
+ return
+ }
+ if url == "" {
+ delete(p.attrPrefix, p.attrNS[prefix])
+ delete(p.attrNS, prefix)
+ return
+ }
+ if p.attrPrefix == nil {
+ // Need to define a new name space.
+ p.attrPrefix = make(map[string]string)
+ p.attrNS = make(map[string]string)
+ }
+ // Remove any old prefix value. This is OK because we maintain a
+ // strict one-to-one mapping between prefix and URL (see
+ // defineNS)
+ delete(p.attrPrefix, p.attrNS[prefix])
+ p.attrPrefix[url] = prefix
+ p.attrNS[prefix] = url
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+)
+
+// marshalValue writes one or more XML elements representing val.
+// If val was obtained from a struct field, finfo must have its details.
+func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {
+ if startTemplate != nil && startTemplate.Name.Local == "" {
+ return fmt.Errorf("xml: EncodeElement of StartElement with missing name")
+ }
+
+ if !val.IsValid() {
+ return nil
+ }
+ if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {
+ return nil
+ }
+
+ // Drill into interfaces and pointers.
+ // This can turn into an infinite loop given a cyclic chain,
+ // but it matches the Go 1 behavior.
+ for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ return nil
+ }
+ val = val.Elem()
+ }
+
+ kind := val.Kind()
+ typ := val.Type()
+
+ // Check for marshaler.
+ if val.CanInterface() && typ.Implements(marshalerType) {
+ return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerType) {
+ return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Check for text marshaler.
+ if val.CanInterface() && typ.Implements(textMarshalerType) {
+ return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Slices and arrays iterate over the elements. They do not have an enclosing tag.
+ if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
+ for i, n := 0, val.Len(); i < n; i++ {
+ if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ tinfo, err := getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Create start element.
+ // Precedence for the XML element name is:
+ // 0. startTemplate
+ // 1. XMLName field in underlying struct;
+ // 2. field name/tag in the struct field; and
+ // 3. type name
+ var start StartElement
+
+ // explicitNS records whether the element's name space has been
+ // explicitly set (for example an XMLName field).
+ explicitNS := false
+
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ explicitNS = true
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if tinfo.xmlname != nil {
+ xmlname := tinfo.xmlname
+ if xmlname.name != "" {
+ start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name
+ } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" {
+ start.Name = v
+ }
+ explicitNS = true
+ }
+ if start.Name.Local == "" && finfo != nil {
+ start.Name.Local = finfo.name
+ if finfo.xmlns != "" {
+ start.Name.Space = finfo.xmlns
+ explicitNS = true
+ }
+ }
+ if start.Name.Local == "" {
+ name := typ.Name()
+ if name == "" {
+ return &UnsupportedTypeError{typ}
+ }
+ start.Name.Local = name
+ }
+
+ // defaultNS records the default name space as set by a xmlns="..."
+ // attribute. We don't set p.defaultNS because we want to let
+ // the attribute writing code (in p.defineNS) be solely responsible
+ // for maintaining that.
+ defaultNS := p.defaultNS
+
+ // Attributes
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr == 0 {
+ continue
+ }
+ attr, err := p.fieldAttr(finfo, val)
+ if err != nil {
+ return err
+ }
+ if attr.Name.Local == "" {
+ continue
+ }
+ start.Attr = append(start.Attr, attr)
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ defaultNS = attr.Value
+ }
+ }
+ if !explicitNS {
+ // Historic behavior: elements use the default name space
+ // they are contained in by default.
+ start.Name.Space = defaultNS
+ }
+ // Historic behaviour: an element that's in a namespace sets
+ // the default namespace for all elements contained within it.
+ start.setDefaultNamespace()
+
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+
+ if val.Kind() == reflect.Struct {
+ err = p.marshalStruct(tinfo, val)
+ } else {
+ s, b, err1 := p.marshalSimple(typ, val)
+ if err1 != nil {
+ err = err1
+ } else if b != nil {
+ EscapeText(p, b)
+ } else {
+ p.EscapeString(s)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := p.writeEnd(start.Name); err != nil {
+ return err
+ }
+
+ return p.cachedWriteError()
+}
+
+// fieldAttr returns the attribute of the given field.
+// If the returned attribute has an empty Name.Local,
+// it should not be used.
+// The given value holds the value containing the field.
+func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) {
+ fv := finfo.value(val)
+ name := Name{Space: finfo.xmlns, Local: finfo.name}
+ if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {
+ return Attr{}, nil
+ }
+ if fv.Kind() == reflect.Interface && fv.IsNil() {
+ return Attr{}, nil
+ }
+ if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) {
+ attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) {
+ attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ }
+ if fv.CanInterface() && fv.Type().Implements(textMarshalerType) {
+ text, err := fv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ text, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ }
+ // Dereference or skip nil pointer, interface values.
+ switch fv.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if fv.IsNil() {
+ return Attr{}, nil
+ }
+ fv = fv.Elem()
+ }
+ s, b, err := p.marshalSimple(fv.Type(), fv)
+ if err != nil {
+ return Attr{}, err
+ }
+ if b != nil {
+ s = string(b)
+ }
+ return Attr{name, s}, nil
+}
+
+// defaultStart returns the default start element to use,
+// given the reflect type, field info, and start template.
+func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {
+ var start StartElement
+ // Precedence for the XML element name is as above,
+ // except that we do not look inside structs for the first field.
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if finfo != nil && finfo.name != "" {
+ start.Name.Local = finfo.name
+ start.Name.Space = finfo.xmlns
+ } else if typ.Name() != "" {
+ start.Name.Local = typ.Name()
+ } else {
+ // Must be a pointer to a named type,
+ // since it has the Marshaler methods.
+ start.Name.Local = typ.Elem().Name()
+ }
+ // Historic behaviour: elements use the name space of
+ // the element they are contained in by default.
+ if start.Name.Space == "" {
+ start.Name.Space = p.defaultNS
+ }
+ start.setDefaultNamespace()
+ return start
+}
+
+// marshalInterface marshals a Marshaler interface value.
+func (p *printer) marshalInterface(val Marshaler, start StartElement) error {
+ // Push a marker onto the tag stack so that MarshalXML
+ // cannot close the XML tags that it did not open.
+ p.tags = append(p.tags, Name{})
+ n := len(p.tags)
+
+ err := val.MarshalXML(p.encoder, start)
+ if err != nil {
+ return err
+ }
+
+ // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.
+ if len(p.tags) > n {
+ return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local)
+ }
+ p.tags = p.tags[:n-1]
+ return nil
+}
+
+// marshalTextInterface marshals a TextMarshaler interface value.
+func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+ text, err := val.MarshalText()
+ if err != nil {
+ return err
+ }
+ EscapeText(p, text)
+ return p.writeEnd(start.Name)
+}
+
+// writeStart writes the given start element.
+func (p *printer) writeStart(start *StartElement) error {
+ if start.Name.Local == "" {
+ return fmt.Errorf("xml: start tag with no name")
+ }
+
+ p.tags = append(p.tags, start.Name)
+ p.markPrefix()
+ // Define any name spaces explicitly declared in the attributes.
+ // We do this as a separate pass so that explicitly declared prefixes
+ // will take precedence over implicitly declared prefixes
+ // regardless of the order of the attributes.
+ ignoreNonEmptyDefault := start.Name.Space == ""
+ for _, attr := range start.Attr {
+ if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil {
+ return err
+ }
+ }
+ // Define any new name spaces implied by the attributes.
+ for _, attr := range start.Attr {
+ name := attr.Name
+ // From http://www.w3.org/TR/xml-names11/#defaulting
+ // "Default namespace declarations do not apply directly
+ // to attribute names; the interpretation of unprefixed
+ // attributes is determined by the element on which they
+ // appear."
+ // This means we don't need to create a new namespace
+ // when an attribute name space is empty.
+ if name.Space != "" && !name.isNamespace() {
+ p.createNSPrefix(name.Space, true)
+ }
+ }
+ p.createNSPrefix(start.Name.Space, false)
+
+ p.writeIndent(1)
+ p.WriteByte('<')
+ p.writeName(start.Name, false)
+ p.writeNamespaces()
+ for _, attr := range start.Attr {
+ name := attr.Name
+ if name.Local == "" || name.isNamespace() {
+ // Namespaces have already been written by writeNamespaces above.
+ continue
+ }
+ p.WriteByte(' ')
+ p.writeName(name, true)
+ p.WriteString(`="`)
+ p.EscapeString(attr.Value)
+ p.WriteByte('"')
+ }
+ p.WriteByte('>')
+ return nil
+}
+
+// writeName writes the given name. It assumes
+// that p.createNSPrefix(name) has already been called.
+func (p *printer) writeName(name Name, isAttr bool) {
+ if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" {
+ p.WriteString(prefix)
+ p.WriteByte(':')
+ }
+ p.WriteString(name.Local)
+}
+
+func (p *printer) writeEnd(name Name) error {
+ if name.Local == "" {
+ return fmt.Errorf("xml: end tag with no name")
+ }
+ if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" {
+ return fmt.Errorf("xml: end tag </%s> without start tag", name.Local)
+ }
+ if top := p.tags[len(p.tags)-1]; top != name {
+ if top.Local != name.Local {
+ return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local)
+ }
+ return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space)
+ }
+ p.tags = p.tags[:len(p.tags)-1]
+
+ p.writeIndent(-1)
+ p.WriteByte('<')
+ p.WriteByte('/')
+ p.writeName(name, false)
+ p.WriteByte('>')
+ p.popPrefix()
+ return nil
+}
+
+func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(val.Int(), 10), nil, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(val.Uint(), 10), nil, nil
+ case reflect.Float32, reflect.Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil
+ case reflect.String:
+ return val.String(), nil, nil
+ case reflect.Bool:
+ return strconv.FormatBool(val.Bool()), nil, nil
+ case reflect.Array:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // [...]byte
+ var bytes []byte
+ if val.CanAddr() {
+ bytes = val.Slice(0, val.Len()).Bytes()
+ } else {
+ bytes = make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(bytes), val)
+ }
+ return "", bytes, nil
+ case reflect.Slice:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // []byte
+ return "", val.Bytes(), nil
+ }
+ return "", nil, &UnsupportedTypeError{typ}
+}
+
+var ddBytes = []byte("--")
+
+func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
+ s := parentStack{p: p}
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr != 0 {
+ continue
+ }
+ vf := finfo.value(val)
+
+ // Dereference or skip nil pointer, interface values.
+ switch vf.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !vf.IsNil() {
+ vf = vf.Elem()
+ }
+ }
+
+ switch finfo.flags & fMode {
+ case fCharData:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ if vf.CanInterface() && vf.Type().Implements(textMarshalerType) {
+ data, err := vf.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ if vf.CanAddr() {
+ pv := vf.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ data, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ }
+ var scratch [64]byte
+ switch vf.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10))
+ case reflect.Float32, reflect.Float64:
+ Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits()))
+ case reflect.Bool:
+ Escape(p, strconv.AppendBool(scratch[:0], vf.Bool()))
+ case reflect.String:
+ if err := EscapeText(p, []byte(vf.String())); err != nil {
+ return err
+ }
+ case reflect.Slice:
+ if elem, ok := vf.Interface().([]byte); ok {
+ if err := EscapeText(p, elem); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+
+ case fComment:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ k := vf.Kind()
+ if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {
+ return fmt.Errorf("xml: bad type for comment field of %s", val.Type())
+ }
+ if vf.Len() == 0 {
+ continue
+ }
+ p.writeIndent(0)
+ p.WriteString("<!--")
+ dashDash := false
+ dashLast := false
+ switch k {
+ case reflect.String:
+ s := vf.String()
+ dashDash = strings.Index(s, "--") >= 0
+ dashLast = s[len(s)-1] == '-'
+ if !dashDash {
+ p.WriteString(s)
+ }
+ case reflect.Slice:
+ b := vf.Bytes()
+ dashDash = bytes.Index(b, ddBytes) >= 0
+ dashLast = b[len(b)-1] == '-'
+ if !dashDash {
+ p.Write(b)
+ }
+ default:
+ panic("can't happen")
+ }
+ if dashDash {
+ return fmt.Errorf(`xml: comments must not contain "--"`)
+ }
+ if dashLast {
+ // "--->" is invalid grammar. Make it "- -->"
+ p.WriteByte(' ')
+ }
+ p.WriteString("-->")
+ continue
+
+ case fInnerXml:
+ iface := vf.Interface()
+ switch raw := iface.(type) {
+ case []byte:
+ p.Write(raw)
+ continue
+ case string:
+ p.WriteString(raw)
+ continue
+ }
+
+ case fElement, fElement | fAny:
+ if err := s.setParents(finfo, vf); err != nil {
+ return err
+ }
+ }
+ if err := p.marshalValue(vf, finfo, nil); err != nil {
+ return err
+ }
+ }
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ return p.cachedWriteError()
+}
+
+var noField fieldInfo
+
+// return the bufio Writer's cached write error
+func (p *printer) cachedWriteError() error {
+ _, err := p.Write(nil)
+ return err
+}
+
+func (p *printer) writeIndent(depthDelta int) {
+ if len(p.prefix) == 0 && len(p.indent) == 0 {
+ return
+ }
+ if depthDelta < 0 {
+ p.depth--
+ if p.indentedIn {
+ p.indentedIn = false
+ return
+ }
+ p.indentedIn = false
+ }
+ if p.putNewline {
+ p.WriteByte('\n')
+ } else {
+ p.putNewline = true
+ }
+ if len(p.prefix) > 0 {
+ p.WriteString(p.prefix)
+ }
+ if len(p.indent) > 0 {
+ for i := 0; i < p.depth; i++ {
+ p.WriteString(p.indent)
+ }
+ }
+ if depthDelta > 0 {
+ p.depth++
+ p.indentedIn = true
+ }
+}
+
+type parentStack struct {
+ p *printer
+ xmlns string
+ parents []string
+}
+
+// setParents sets the stack of current parents to those found in finfo.
+// It only writes the start elements if vf holds a non-nil value.
+// If finfo is &noField, it pops all elements.
+func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error {
+ xmlns := s.p.defaultNS
+ if finfo.xmlns != "" {
+ xmlns = finfo.xmlns
+ }
+ commonParents := 0
+ if xmlns == s.xmlns {
+ for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ {
+ if finfo.parents[commonParents] != s.parents[commonParents] {
+ break
+ }
+ }
+ }
+ // Pop off any parents that aren't in common with the previous field.
+ for i := len(s.parents) - 1; i >= commonParents; i-- {
+ if err := s.p.writeEnd(Name{
+ Space: s.xmlns,
+ Local: s.parents[i],
+ }); err != nil {
+ return err
+ }
+ }
+ s.parents = finfo.parents
+ s.xmlns = xmlns
+ if commonParents >= len(s.parents) {
+ // No new elements to push.
+ return nil
+ }
+ if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() {
+ // The element is nil, so no need for the start elements.
+ s.parents = s.parents[:commonParents]
+ return nil
+ }
+ // Push any new parents required.
+ for _, name := range s.parents[commonParents:] {
+ start := &StartElement{
+ Name: Name{
+ Space: s.xmlns,
+ Local: name,
+ },
+ }
+ // Set the default name space for parent elements
+ // to match what we do with other elements.
+ if s.xmlns != s.p.defaultNS {
+ start.setDefaultNamespace()
+ }
+ if err := s.p.writeStart(start); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A MarshalXMLError is returned when Marshal encounters a type
+// that cannot be converted into XML.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "xml: unsupported type: " + e.Type.String()
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go
new file mode 100644
index 000000000..5dc78e748
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go
@@ -0,0 +1,1939 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+type DriveType int
+
+const (
+ HyperDrive DriveType = iota
+ ImprobabilityDrive
+)
+
+type Passenger struct {
+ Name []string `xml:"name"`
+ Weight float32 `xml:"weight"`
+}
+
+type Ship struct {
+ XMLName struct{} `xml:"spaceship"`
+
+ Name string `xml:"name,attr"`
+ Pilot string `xml:"pilot,attr"`
+ Drive DriveType `xml:"drive"`
+ Age uint `xml:"age"`
+ Passenger []*Passenger `xml:"passenger"`
+ secret string
+}
+
+type NamedType string
+
+type Port struct {
+ XMLName struct{} `xml:"port"`
+ Type string `xml:"type,attr,omitempty"`
+ Comment string `xml:",comment"`
+ Number string `xml:",chardata"`
+}
+
+type Domain struct {
+ XMLName struct{} `xml:"domain"`
+ Country string `xml:",attr,omitempty"`
+ Name []byte `xml:",chardata"`
+ Comment []byte `xml:",comment"`
+}
+
+type Book struct {
+ XMLName struct{} `xml:"book"`
+ Title string `xml:",chardata"`
+}
+
+type Event struct {
+ XMLName struct{} `xml:"event"`
+ Year int `xml:",chardata"`
+}
+
+type Movie struct {
+ XMLName struct{} `xml:"movie"`
+ Length uint `xml:",chardata"`
+}
+
+type Pi struct {
+ XMLName struct{} `xml:"pi"`
+ Approximation float32 `xml:",chardata"`
+}
+
+type Universe struct {
+ XMLName struct{} `xml:"universe"`
+ Visible float64 `xml:",chardata"`
+}
+
+type Particle struct {
+ XMLName struct{} `xml:"particle"`
+ HasMass bool `xml:",chardata"`
+}
+
+type Departure struct {
+ XMLName struct{} `xml:"departure"`
+ When time.Time `xml:",chardata"`
+}
+
+type SecretAgent struct {
+ XMLName struct{} `xml:"agent"`
+ Handle string `xml:"handle,attr"`
+ Identity string
+ Obfuscate string `xml:",innerxml"`
+}
+
+type NestedItems struct {
+ XMLName struct{} `xml:"result"`
+ Items []string `xml:">item"`
+ Item1 []string `xml:"Items>item1"`
+}
+
+type NestedOrder struct {
+ XMLName struct{} `xml:"result"`
+ Field1 string `xml:"parent>c"`
+ Field2 string `xml:"parent>b"`
+ Field3 string `xml:"parent>a"`
+}
+
+type MixedNested struct {
+ XMLName struct{} `xml:"result"`
+ A string `xml:"parent1>a"`
+ B string `xml:"b"`
+ C string `xml:"parent1>parent2>c"`
+ D string `xml:"parent1>d"`
+}
+
+type NilTest struct {
+ A interface{} `xml:"parent1>parent2>a"`
+ B interface{} `xml:"parent1>b"`
+ C interface{} `xml:"parent1>parent2>c"`
+}
+
+type Service struct {
+ XMLName struct{} `xml:"service"`
+ Domain *Domain `xml:"host>domain"`
+ Port *Port `xml:"host>port"`
+ Extra1 interface{}
+ Extra2 interface{} `xml:"host>extra2"`
+}
+
+var nilStruct *Ship
+
+type EmbedA struct {
+ EmbedC
+ EmbedB EmbedB
+ FieldA string
+}
+
+type EmbedB struct {
+ FieldB string
+ *EmbedC
+}
+
+type EmbedC struct {
+ FieldA1 string `xml:"FieldA>A1"`
+ FieldA2 string `xml:"FieldA>A2"`
+ FieldB string
+ FieldC string
+}
+
+type NameCasing struct {
+ XMLName struct{} `xml:"casing"`
+ Xy string
+ XY string
+ XyA string `xml:"Xy,attr"`
+ XYA string `xml:"XY,attr"`
+}
+
+type NamePrecedence struct {
+ XMLName Name `xml:"Parent"`
+ FromTag XMLNameWithoutTag `xml:"InTag"`
+ FromNameVal XMLNameWithoutTag
+ FromNameTag XMLNameWithTag
+ InFieldName string
+}
+
+type XMLNameWithTag struct {
+ XMLName Name `xml:"InXMLNameTag"`
+ Value string `xml:",chardata"`
+}
+
+type XMLNameWithNSTag struct {
+ XMLName Name `xml:"ns InXMLNameWithNSTag"`
+ Value string `xml:",chardata"`
+}
+
+type XMLNameWithoutTag struct {
+ XMLName Name
+ Value string `xml:",chardata"`
+}
+
+type NameInField struct {
+ Foo Name `xml:"ns foo"`
+}
+
+type AttrTest struct {
+ Int int `xml:",attr"`
+ Named int `xml:"int,attr"`
+ Float float64 `xml:",attr"`
+ Uint8 uint8 `xml:",attr"`
+ Bool bool `xml:",attr"`
+ Str string `xml:",attr"`
+ Bytes []byte `xml:",attr"`
+}
+
+type OmitAttrTest struct {
+ Int int `xml:",attr,omitempty"`
+ Named int `xml:"int,attr,omitempty"`
+ Float float64 `xml:",attr,omitempty"`
+ Uint8 uint8 `xml:",attr,omitempty"`
+ Bool bool `xml:",attr,omitempty"`
+ Str string `xml:",attr,omitempty"`
+ Bytes []byte `xml:",attr,omitempty"`
+}
+
+type OmitFieldTest struct {
+ Int int `xml:",omitempty"`
+ Named int `xml:"int,omitempty"`
+ Float float64 `xml:",omitempty"`
+ Uint8 uint8 `xml:",omitempty"`
+ Bool bool `xml:",omitempty"`
+ Str string `xml:",omitempty"`
+ Bytes []byte `xml:",omitempty"`
+ Ptr *PresenceTest `xml:",omitempty"`
+}
+
+type AnyTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField AnyHolder `xml:",any"`
+}
+
+type AnyOmitTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField *AnyHolder `xml:",any,omitempty"`
+}
+
+type AnySliceTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField []AnyHolder `xml:",any"`
+}
+
+type AnyHolder struct {
+ XMLName Name
+ XML string `xml:",innerxml"`
+}
+
+type RecurseA struct {
+ A string
+ B *RecurseB
+}
+
+type RecurseB struct {
+ A *RecurseA
+ B string
+}
+
+type PresenceTest struct {
+ Exists *struct{}
+}
+
+type IgnoreTest struct {
+ PublicSecret string `xml:"-"`
+}
+
+type MyBytes []byte
+
+type Data struct {
+ Bytes []byte
+ Attr []byte `xml:",attr"`
+ Custom MyBytes
+}
+
+type Plain struct {
+ V interface{}
+}
+
+type MyInt int
+
+type EmbedInt struct {
+ MyInt
+}
+
+type Strings struct {
+ X []string `xml:"A>B,omitempty"`
+}
+
+type PointerFieldsTest struct {
+ XMLName Name `xml:"dummy"`
+ Name *string `xml:"name,attr"`
+ Age *uint `xml:"age,attr"`
+ Empty *string `xml:"empty,attr"`
+ Contents *string `xml:",chardata"`
+}
+
+type ChardataEmptyTest struct {
+ XMLName Name `xml:"test"`
+ Contents *string `xml:",chardata"`
+}
+
+type MyMarshalerTest struct {
+}
+
+var _ Marshaler = (*MyMarshalerTest)(nil)
+
+func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error {
+ e.EncodeToken(start)
+ e.EncodeToken(CharData([]byte("hello world")))
+ e.EncodeToken(EndElement{start.Name})
+ return nil
+}
+
+type MyMarshalerAttrTest struct{}
+
+var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil)
+
+func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) {
+ return Attr{name, "hello world"}, nil
+}
+
+type MyMarshalerValueAttrTest struct{}
+
+var _ MarshalerAttr = MyMarshalerValueAttrTest{}
+
+func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) {
+ return Attr{name, "hello world"}, nil
+}
+
+type MarshalerStruct struct {
+ Foo MyMarshalerAttrTest `xml:",attr"`
+}
+
+type MarshalerValueStruct struct {
+ Foo MyMarshalerValueAttrTest `xml:",attr"`
+}
+
+type InnerStruct struct {
+ XMLName Name `xml:"testns outer"`
+}
+
+type OuterStruct struct {
+ InnerStruct
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterNamedStruct struct {
+ InnerStruct
+ XMLName Name `xml:"outerns test"`
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterNamedOrderedStruct struct {
+ XMLName Name `xml:"outerns test"`
+ InnerStruct
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterOuterStruct struct {
+ OuterStruct
+}
+
+type NestedAndChardata struct {
+ AB []string `xml:"A>B"`
+ Chardata string `xml:",chardata"`
+}
+
+type NestedAndComment struct {
+ AB []string `xml:"A>B"`
+ Comment string `xml:",comment"`
+}
+
+type XMLNSFieldStruct struct {
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type NamedXMLNSFieldStruct struct {
+ XMLName struct{} `xml:"testns test"`
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type XMLNSFieldStructWithOmitEmpty struct {
+ Ns string `xml:"xmlns,attr,omitempty"`
+ Body string
+}
+
+type NamedXMLNSFieldStructWithEmptyNamespace struct {
+ XMLName struct{} `xml:"test"`
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type RecursiveXMLNSFieldStruct struct {
+ Ns string `xml:"xmlns,attr"`
+ Body *RecursiveXMLNSFieldStruct `xml:",omitempty"`
+ Text string `xml:",omitempty"`
+}
+
+func ifaceptr(x interface{}) interface{} {
+ return &x
+}
+
+var (
+ nameAttr = "Sarah"
+ ageAttr = uint(12)
+ contentsAttr = "lorem ipsum"
+)
+
+// Unless explicitly stated as such (or *Plain), all of the
+// tests below are two-way tests. When introducing new tests,
+// please try to make them two-way as well to ensure that
+// marshalling and unmarshalling are as symmetrical as feasible.
+var marshalTests = []struct {
+ Value interface{}
+ ExpectXML string
+ MarshalOnly bool
+ UnmarshalOnly bool
+}{
+ // Test nil marshals to nothing
+ {Value: nil, ExpectXML: ``, MarshalOnly: true},
+ {Value: nilStruct, ExpectXML: ``, MarshalOnly: true},
+
+ // Test value types
+ {Value: &Plain{true}, ExpectXML: `<Plain><V>true</V></Plain>`},
+ {Value: &Plain{false}, ExpectXML: `<Plain><V>false</V></Plain>`},
+ {Value: &Plain{int(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{float32(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},
+ {Value: &Plain{float64(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},
+ {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `<Plain><V>65501</V></Plain>`},
+ {Value: &Plain{"gopher"}, ExpectXML: `<Plain><V>gopher</V></Plain>`},
+ {Value: &Plain{[]byte("gopher")}, ExpectXML: `<Plain><V>gopher</V></Plain>`},
+ {Value: &Plain{"</>"}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{[]byte("</>")}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{NamedType("potato")}, ExpectXML: `<Plain><V>potato</V></Plain>`},
+ {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},
+ {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},
+ {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `<bool>true</bool>`},
+
+ // Test time.
+ {
+ Value: &Plain{time.Unix(1e9, 123456789).UTC()},
+ ExpectXML: `<Plain><V>2001-09-09T01:46:40.123456789Z</V></Plain>`,
+ },
+
+ // A pointer to struct{} may be used to test for an element's presence.
+ {
+ Value: &PresenceTest{new(struct{})},
+ ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,
+ },
+ {
+ Value: &PresenceTest{},
+ ExpectXML: `<PresenceTest></PresenceTest>`,
+ },
+
+ // A pointer to struct{} may be used to test for an element's presence.
+ {
+ Value: &PresenceTest{new(struct{})},
+ ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,
+ },
+ {
+ Value: &PresenceTest{},
+ ExpectXML: `<PresenceTest></PresenceTest>`,
+ },
+
+ // A []byte field is only nil if the element was not found.
+ {
+ Value: &Data{},
+ ExpectXML: `<Data></Data>`,
+ UnmarshalOnly: true,
+ },
+ {
+ Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}},
+ ExpectXML: `<Data Attr=""><Bytes></Bytes><Custom></Custom></Data>`,
+ UnmarshalOnly: true,
+ },
+
+ // Check that []byte works, including named []byte types.
+ {
+ Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}},
+ ExpectXML: `<Data Attr="v"><Bytes>ab</Bytes><Custom>cd</Custom></Data>`,
+ },
+
+ // Test innerxml
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<redacted/>",
+ },
+ ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<Identity>James Bond</Identity><redacted/>",
+ },
+ ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
+ UnmarshalOnly: true,
+ },
+
+ // Test structs
+ {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `<port type="ssl">443</port>`},
+ {Value: &Port{Number: "443"}, ExpectXML: `<port>443</port>`},
+ {Value: &Port{Type: "<unix>"}, ExpectXML: `<port type="&lt;unix&gt;"></port>`},
+ {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `<port><!--https-->443</port>`},
+ {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `<port><!--add space- -->443</port>`, MarshalOnly: true},
+ {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `<domain>google.com&amp;friends</domain>`},
+ {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `<domain>google.com<!-- &friends --></domain>`},
+ {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `<book>Pride &amp; Prejudice</book>`},
+ {Value: &Event{Year: -3114}, ExpectXML: `<event>-3114</event>`},
+ {Value: &Movie{Length: 13440}, ExpectXML: `<movie>13440</movie>`},
+ {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `<pi>3.1415927</pi>`},
+ {Value: &Universe{Visible: 9.3e13}, ExpectXML: `<universe>9.3e+13</universe>`},
+ {Value: &Particle{HasMass: true}, ExpectXML: `<particle>true</particle>`},
+ {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `<departure>2013-01-09T00:15:00-09:00</departure>`},
+ {Value: atomValue, ExpectXML: atomXml},
+ {
+ Value: &Ship{
+ Name: "Heart of Gold",
+ Pilot: "Computer",
+ Age: 1,
+ Drive: ImprobabilityDrive,
+ Passenger: []*Passenger{
+ {
+ Name: []string{"Zaphod", "Beeblebrox"},
+ Weight: 7.25,
+ },
+ {
+ Name: []string{"Trisha", "McMillen"},
+ Weight: 5.5,
+ },
+ {
+ Name: []string{"Ford", "Prefect"},
+ Weight: 7,
+ },
+ {
+ Name: []string{"Arthur", "Dent"},
+ Weight: 6.75,
+ },
+ },
+ },
+ ExpectXML: `<spaceship name="Heart of Gold" pilot="Computer">` +
+ `<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +
+ `<age>1</age>` +
+ `<passenger>` +
+ `<name>Zaphod</name>` +
+ `<name>Beeblebrox</name>` +
+ `<weight>7.25</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Trisha</name>` +
+ `<name>McMillen</name>` +
+ `<weight>5.5</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Ford</name>` +
+ `<name>Prefect</name>` +
+ `<weight>7</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Arthur</name>` +
+ `<name>Dent</name>` +
+ `<weight>6.75</weight>` +
+ `</passenger>` +
+ `</spaceship>`,
+ },
+
+ // Test a>b
+ {
+ Value: &NestedItems{Items: nil, Item1: nil},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{}, Item1: []string{}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `</Items>` +
+ `</result>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &NestedItems{Items: nil, Item1: []string{"A"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item1>A</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `<item1>C</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"},
+ ExpectXML: `<result>` +
+ `<parent>` +
+ `<c>C</c>` +
+ `<b>B</b>` +
+ `<a>A</a>` +
+ `</parent>` +
+ `</result>`,
+ },
+ {
+ Value: &NilTest{A: "A", B: nil, C: "C"},
+ ExpectXML: `<NilTest>` +
+ `<parent1>` +
+ `<parent2><a>A</a></parent2>` +
+ `<parent2><c>C</c></parent2>` +
+ `</parent1>` +
+ `</NilTest>`,
+ MarshalOnly: true, // Uses interface{}
+ },
+ {
+ Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"},
+ ExpectXML: `<result>` +
+ `<parent1><a>A</a></parent1>` +
+ `<b>B</b>` +
+ `<parent1>` +
+ `<parent2><c>C</c></parent2>` +
+ `<d>D</d>` +
+ `</parent1>` +
+ `</result>`,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}},
+ ExpectXML: `<service><host><port>80</port></host></service>`,
+ },
+ {
+ Value: &Service{},
+ ExpectXML: `<service></service>`,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<Extra1>A</Extra1>` +
+ `<host><extra2>B</extra2></host>` +
+ `</service>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<host><extra2>example</extra2></host>` +
+ `</service>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &struct {
+ XMLName struct{} `xml:"space top"`
+ A string `xml:"x>a"`
+ B string `xml:"x>b"`
+ C string `xml:"space x>c"`
+ C1 string `xml:"space1 x>c"`
+ D1 string `xml:"space1 x>d"`
+ E1 string `xml:"x>e"`
+ }{
+ A: "a",
+ B: "b",
+ C: "c",
+ C1: "c1",
+ D1: "d1",
+ E1: "e1",
+ },
+ ExpectXML: `<top xmlns="space">` +
+ `<x><a>a</a><b>b</b><c>c</c></x>` +
+ `<x xmlns="space1">` +
+ `<c>c1</c>` +
+ `<d>d1</d>` +
+ `</x>` +
+ `<x>` +
+ `<e>e1</e>` +
+ `</x>` +
+ `</top>`,
+ },
+ {
+ Value: &struct {
+ XMLName Name
+ A string `xml:"x>a"`
+ B string `xml:"x>b"`
+ C string `xml:"space x>c"`
+ C1 string `xml:"space1 x>c"`
+ D1 string `xml:"space1 x>d"`
+ }{
+ XMLName: Name{
+ Space: "space0",
+ Local: "top",
+ },
+ A: "a",
+ B: "b",
+ C: "c",
+ C1: "c1",
+ D1: "d1",
+ },
+ ExpectXML: `<top xmlns="space0">` +
+ `<x><a>a</a><b>b</b></x>` +
+ `<x xmlns="space"><c>c</c></x>` +
+ `<x xmlns="space1">` +
+ `<c>c1</c>` +
+ `<d>d1</d>` +
+ `</x>` +
+ `</top>`,
+ },
+ {
+ Value: &struct {
+ XMLName struct{} `xml:"top"`
+ B string `xml:"space x>b"`
+ B1 string `xml:"space1 x>b"`
+ }{
+ B: "b",
+ B1: "b1",
+ },
+ ExpectXML: `<top>` +
+ `<x xmlns="space"><b>b</b></x>` +
+ `<x xmlns="space1"><b>b1</b></x>` +
+ `</top>`,
+ },
+
+ // Test struct embedding
+ {
+ Value: &EmbedA{
+ EmbedC: EmbedC{
+ FieldA1: "", // Shadowed by A.A
+ FieldA2: "", // Shadowed by A.A
+ FieldB: "A.C.B",
+ FieldC: "A.C.C",
+ },
+ EmbedB: EmbedB{
+ FieldB: "A.B.B",
+ EmbedC: &EmbedC{
+ FieldA1: "A.B.C.A1",
+ FieldA2: "A.B.C.A2",
+ FieldB: "", // Shadowed by A.B.B
+ FieldC: "A.B.C.C",
+ },
+ },
+ FieldA: "A.A",
+ },
+ ExpectXML: `<EmbedA>` +
+ `<FieldB>A.C.B</FieldB>` +
+ `<FieldC>A.C.C</FieldC>` +
+ `<EmbedB>` +
+ `<FieldB>A.B.B</FieldB>` +
+ `<FieldA>` +
+ `<A1>A.B.C.A1</A1>` +
+ `<A2>A.B.C.A2</A2>` +
+ `</FieldA>` +
+ `<FieldC>A.B.C.C</FieldC>` +
+ `</EmbedB>` +
+ `<FieldA>A.A</FieldA>` +
+ `</EmbedA>`,
+ },
+
+ // Test that name casing matters
+ {
+ Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"},
+ ExpectXML: `<casing Xy="mixedA" XY="upperA"><Xy>mixed</Xy><XY>upper</XY></casing>`,
+ },
+
+ // Test the order in which the XML element name is chosen
+ {
+ Value: &NamePrecedence{
+ FromTag: XMLNameWithoutTag{Value: "A"},
+ FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"},
+ FromNameTag: XMLNameWithTag{Value: "C"},
+ InFieldName: "D",
+ },
+ ExpectXML: `<Parent>` +
+ `<InTag>A</InTag>` +
+ `<InXMLName>B</InXMLName>` +
+ `<InXMLNameTag>C</InXMLNameTag>` +
+ `<InFieldName>D</InFieldName>` +
+ `</Parent>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &NamePrecedence{
+ XMLName: Name{Local: "Parent"},
+ FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"},
+ FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"},
+ FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"},
+ InFieldName: "D",
+ },
+ ExpectXML: `<Parent>` +
+ `<InTag>A</InTag>` +
+ `<FromNameVal>B</FromNameVal>` +
+ `<InXMLNameTag>C</InXMLNameTag>` +
+ `<InFieldName>D</InFieldName>` +
+ `</Parent>`,
+ UnmarshalOnly: true,
+ },
+
+ // xml.Name works in a plain field as well.
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ },
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"><ignore></ignore></foo></NameInField>`,
+ UnmarshalOnly: true,
+ },
+
+ // Marshaling zero xml.Name uses the tag or field name.
+ {
+ Value: &NameInField{},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ MarshalOnly: true,
+ },
+
+ // Test attributes
+ {
+ Value: &AttrTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ },
+ ExpectXML: `<AttrTest Int="8" int="9" Float="23.5" Uint8="255"` +
+ ` Bool="true" Str="str" Bytes="byt"></AttrTest>`,
+ },
+ {
+ Value: &AttrTest{Bytes: []byte{}},
+ ExpectXML: `<AttrTest Int="0" int="0" Float="0" Uint8="0"` +
+ ` Bool="false" Str="" Bytes=""></AttrTest>`,
+ },
+ {
+ Value: &OmitAttrTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ },
+ ExpectXML: `<OmitAttrTest Int="8" int="9" Float="23.5" Uint8="255"` +
+ ` Bool="true" Str="str" Bytes="byt"></OmitAttrTest>`,
+ },
+ {
+ Value: &OmitAttrTest{},
+ ExpectXML: `<OmitAttrTest></OmitAttrTest>`,
+ },
+
+ // pointer fields
+ {
+ Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr},
+ ExpectXML: `<dummy name="Sarah" age="12">lorem ipsum</dummy>`,
+ MarshalOnly: true,
+ },
+
+ // empty chardata pointer field
+ {
+ Value: &ChardataEmptyTest{},
+ ExpectXML: `<test></test>`,
+ MarshalOnly: true,
+ },
+
+ // omitempty on fields
+ {
+ Value: &OmitFieldTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ Ptr: &PresenceTest{},
+ },
+ ExpectXML: `<OmitFieldTest>` +
+ `<Int>8</Int>` +
+ `<int>9</int>` +
+ `<Float>23.5</Float>` +
+ `<Uint8>255</Uint8>` +
+ `<Bool>true</Bool>` +
+ `<Str>str</Str>` +
+ `<Bytes>byt</Bytes>` +
+ `<Ptr></Ptr>` +
+ `</OmitFieldTest>`,
+ },
+ {
+ Value: &OmitFieldTest{},
+ ExpectXML: `<OmitFieldTest></OmitFieldTest>`,
+ },
+
+ // Test ",any"
+ {
+ ExpectXML: `<a><nested><value>known</value></nested><other><sub>unknown</sub></other></a>`,
+ Value: &AnyTest{
+ Nested: "known",
+ AnyField: AnyHolder{
+ XMLName: Name{Local: "other"},
+ XML: "<sub>unknown</sub>",
+ },
+ },
+ },
+ {
+ Value: &AnyTest{Nested: "known",
+ AnyField: AnyHolder{
+ XML: "<unknown/>",
+ XMLName: Name{Local: "AnyField"},
+ },
+ },
+ ExpectXML: `<a><nested><value>known</value></nested><AnyField><unknown/></AnyField></a>`,
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested></a>`,
+ Value: &AnyOmitTest{
+ Nested: "b",
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested><c><d>e</d></c><g xmlns="f"><h>i</h></g></a>`,
+ Value: &AnySliceTest{
+ Nested: "b",
+ AnyField: []AnyHolder{
+ {
+ XMLName: Name{Local: "c"},
+ XML: "<d>e</d>",
+ },
+ {
+ XMLName: Name{Space: "f", Local: "g"},
+ XML: "<h>i</h>",
+ },
+ },
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested></a>`,
+ Value: &AnySliceTest{
+ Nested: "b",
+ },
+ },
+
+ // Test recursive types.
+ {
+ Value: &RecurseA{
+ A: "a1",
+ B: &RecurseB{
+ A: &RecurseA{"a2", nil},
+ B: "b1",
+ },
+ },
+ ExpectXML: `<RecurseA><A>a1</A><B><A><A>a2</A></A><B>b1</B></B></RecurseA>`,
+ },
+
+ // Test ignoring fields via "-" tag
+ {
+ ExpectXML: `<IgnoreTest></IgnoreTest>`,
+ Value: &IgnoreTest{},
+ },
+ {
+ ExpectXML: `<IgnoreTest></IgnoreTest>`,
+ Value: &IgnoreTest{PublicSecret: "can't tell"},
+ MarshalOnly: true,
+ },
+ {
+ ExpectXML: `<IgnoreTest><PublicSecret>ignore me</PublicSecret></IgnoreTest>`,
+ Value: &IgnoreTest{},
+ UnmarshalOnly: true,
+ },
+
+ // Test escaping.
+ {
+ ExpectXML: `<a><nested><value>dquote: &#34;; squote: &#39;; ampersand: &amp;; less: &lt;; greater: &gt;;</value></nested><empty></empty></a>`,
+ Value: &AnyTest{
+ Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`,
+ AnyField: AnyHolder{XMLName: Name{Local: "empty"}},
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>newline: &#xA;; cr: &#xD;; tab: &#x9;;</value></nested><AnyField></AnyField></a>`,
+ Value: &AnyTest{
+ Nested: "newline: \n; cr: \r; tab: \t;",
+ AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}},
+ },
+ },
+ {
+ ExpectXML: "<a><nested><value>1\r2\r\n3\n\r4\n5</value></nested></a>",
+ Value: &AnyTest{
+ Nested: "1\n2\n3\n\n4\n5",
+ },
+ UnmarshalOnly: true,
+ },
+ {
+ ExpectXML: `<EmbedInt><MyInt>42</MyInt></EmbedInt>`,
+ Value: &EmbedInt{
+ MyInt: 42,
+ },
+ },
+ // Test omitempty with parent chain; see golang.org/issue/4168.
+ {
+ ExpectXML: `<Strings><A></A></Strings>`,
+ Value: &Strings{},
+ },
+ // Custom marshalers.
+ {
+ ExpectXML: `<MyMarshalerTest>hello world</MyMarshalerTest>`,
+ Value: &MyMarshalerTest{},
+ },
+ {
+ ExpectXML: `<MarshalerStruct Foo="hello world"></MarshalerStruct>`,
+ Value: &MarshalerStruct{},
+ },
+ {
+ ExpectXML: `<MarshalerValueStruct Foo="hello world"></MarshalerValueStruct>`,
+ Value: &MarshalerValueStruct{},
+ },
+ {
+ ExpectXML: `<outer xmlns="testns" int="10"></outer>`,
+ Value: &OuterStruct{IntAttr: 10},
+ },
+ {
+ ExpectXML: `<test xmlns="outerns" int="10"></test>`,
+ Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10},
+ },
+ {
+ ExpectXML: `<test xmlns="outerns" int="10"></test>`,
+ Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10},
+ },
+ {
+ ExpectXML: `<outer xmlns="testns" int="10"></outer>`,
+ Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}},
+ },
+ {
+ ExpectXML: `<NestedAndChardata><A><B></B><B></B></A>test</NestedAndChardata>`,
+ Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"},
+ },
+ {
+ ExpectXML: `<NestedAndComment><A><B></B><B></B></A><!--test--></NestedAndComment>`,
+ Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"},
+ },
+ {
+ ExpectXML: `<XMLNSFieldStruct xmlns="http://example.com/ns"><Body>hello world</Body></XMLNSFieldStruct>`,
+ Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<testns:test xmlns:testns="testns" xmlns="http://example.com/ns"><Body>hello world</Body></testns:test>`,
+ Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<testns:test xmlns:testns="testns"><Body>hello world</Body></testns:test>`,
+ Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<XMLNSFieldStructWithOmitEmpty><Body>hello world</Body></XMLNSFieldStructWithOmitEmpty>`,
+ Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"},
+ },
+ {
+ // The xmlns attribute must be ignored because the <test>
+ // element is in the empty namespace, so it's not possible
+ // to set the default namespace to something non-empty.
+ ExpectXML: `<test><Body>hello world</Body></test>`,
+ Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"},
+ MarshalOnly: true,
+ },
+ {
+ ExpectXML: `<RecursiveXMLNSFieldStruct xmlns="foo"><Body xmlns=""><Text>hello world</Text></Body></RecursiveXMLNSFieldStruct>`,
+ Value: &RecursiveXMLNSFieldStruct{
+ Ns: "foo",
+ Body: &RecursiveXMLNSFieldStruct{
+ Text: "hello world",
+ },
+ },
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ for idx, test := range marshalTests {
+ if test.UnmarshalOnly {
+ continue
+ }
+ data, err := Marshal(test.Value)
+ if err != nil {
+ t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err)
+ continue
+ }
+ if got, want := string(data), test.ExpectXML; got != want {
+ if strings.Contains(want, "\n") {
+ t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want)
+ } else {
+ t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want)
+ }
+ }
+ }
+}
+
+type AttrParent struct {
+ X string `xml:"X>Y,attr"`
+}
+
+type BadAttr struct {
+ Name []string `xml:"name,attr"`
+}
+
+var marshalErrorTests = []struct {
+ Value interface{}
+ Err string
+ Kind reflect.Kind
+}{
+ {
+ Value: make(chan bool),
+ Err: "xml: unsupported type: chan bool",
+ Kind: reflect.Chan,
+ },
+ {
+ Value: map[string]string{
+ "question": "What do you get when you multiply six by nine?",
+ "answer": "42",
+ },
+ Err: "xml: unsupported type: map[string]string",
+ Kind: reflect.Map,
+ },
+ {
+ Value: map[*Ship]bool{nil: false},
+ Err: "xml: unsupported type: map[*xml.Ship]bool",
+ Kind: reflect.Map,
+ },
+ {
+ Value: &Domain{Comment: []byte("f--bar")},
+ Err: `xml: comments must not contain "--"`,
+ },
+ // Reject parent chain with attr, never worked; see golang.org/issue/5033.
+ {
+ Value: &AttrParent{},
+ Err: `xml: X>Y chain not valid with attr flag`,
+ },
+ {
+ Value: BadAttr{[]string{"X", "Y"}},
+ Err: `xml: unsupported type: []string`,
+ },
+}
+
+var marshalIndentTests = []struct {
+ Value interface{}
+ Prefix string
+ Indent string
+ ExpectXML string
+}{
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<redacted/>",
+ },
+ Prefix: "",
+ Indent: "\t",
+ ExpectXML: fmt.Sprintf("<agent handle=\"007\">\n\t<Identity>James Bond</Identity><redacted/>\n</agent>"),
+ },
+}
+
+func TestMarshalErrors(t *testing.T) {
+ for idx, test := range marshalErrorTests {
+ data, err := Marshal(test.Value)
+ if err == nil {
+ t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err)
+ continue
+ }
+ if err.Error() != test.Err {
+ t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err)
+ }
+ if test.Kind != reflect.Invalid {
+ if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {
+ t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind)
+ }
+ }
+ }
+}
+
+// Do invertibility testing on the various structures that we test
+func TestUnmarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ if test.MarshalOnly {
+ continue
+ }
+ if _, ok := test.Value.(*Plain); ok {
+ continue
+ }
+ vt := reflect.TypeOf(test.Value)
+ dest := reflect.New(vt.Elem()).Interface()
+ err := Unmarshal([]byte(test.ExpectXML), dest)
+
+ switch fix := dest.(type) {
+ case *Feed:
+ fix.Author.InnerXML = ""
+ for i := range fix.Entry {
+ fix.Entry[i].Author.InnerXML = ""
+ }
+ }
+
+ if err != nil {
+ t.Errorf("#%d: unexpected error: %#v", i, err)
+ } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
+ t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want)
+ }
+ }
+}
+
+func TestMarshalIndent(t *testing.T) {
+ for i, test := range marshalIndentTests {
+ data, err := MarshalIndent(test.Value, test.Prefix, test.Indent)
+ if err != nil {
+ t.Errorf("#%d: Error: %s", i, err)
+ continue
+ }
+ if got, want := string(data), test.ExpectXML; got != want {
+ t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want)
+ }
+ }
+}
+
+type limitedBytesWriter struct {
+ w io.Writer
+ remain int // until writes fail
+}
+
+func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) {
+ if lw.remain <= 0 {
+ println("error")
+ return 0, errors.New("write limit hit")
+ }
+ if len(p) > lw.remain {
+ p = p[:lw.remain]
+ n, _ = lw.w.Write(p)
+ lw.remain = 0
+ return n, errors.New("write limit hit")
+ }
+ n, err = lw.w.Write(p)
+ lw.remain -= n
+ return n, err
+}
+
+func TestMarshalWriteErrors(t *testing.T) {
+ var buf bytes.Buffer
+ const writeCap = 1024
+ w := &limitedBytesWriter{&buf, writeCap}
+ enc := NewEncoder(w)
+ var err error
+ var i int
+ const n = 4000
+ for i = 1; i <= n; i++ {
+ err = enc.Encode(&Passenger{
+ Name: []string{"Alice", "Bob"},
+ Weight: 5,
+ })
+ if err != nil {
+ break
+ }
+ }
+ if err == nil {
+ t.Error("expected an error")
+ }
+ if i == n {
+ t.Errorf("expected to fail before the end")
+ }
+ if buf.Len() != writeCap {
+ t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap)
+ }
+}
+
+func TestMarshalWriteIOErrors(t *testing.T) {
+ enc := NewEncoder(errWriter{})
+
+ expectErr := "unwritable"
+ err := enc.Encode(&Passenger{})
+ if err == nil || err.Error() != expectErr {
+ t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr)
+ }
+}
+
+func TestMarshalFlush(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ if err := enc.EncodeToken(CharData("hello world")); err != nil {
+ t.Fatalf("enc.EncodeToken: %v", err)
+ }
+ if buf.Len() > 0 {
+ t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes())
+ }
+ if err := enc.Flush(); err != nil {
+ t.Fatalf("enc.Flush: %v", err)
+ }
+ if buf.String() != "hello world" {
+ t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world")
+ }
+}
+
+var encodeElementTests = []struct {
+ desc string
+ value interface{}
+ start StartElement
+ expectXML string
+}{{
+ desc: "simple string",
+ value: "hello",
+ start: StartElement{
+ Name: Name{Local: "a"},
+ },
+ expectXML: `<a>hello</a>`,
+}, {
+ desc: "string with added attributes",
+ value: "hello",
+ start: StartElement{
+ Name: Name{Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "x"},
+ Value: "y",
+ }, {
+ Name: Name{Local: "foo"},
+ Value: "bar",
+ }},
+ },
+ expectXML: `<a x="y" foo="bar">hello</a>`,
+}, {
+ desc: "start element with default name space",
+ value: struct {
+ Foo XMLNameWithNSTag
+ }{
+ Foo: XMLNameWithNSTag{
+ Value: "hello",
+ },
+ },
+ start: StartElement{
+ Name: Name{Space: "ns", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<a xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></a>`,
+}, {
+ desc: "start element in name space with different default name space",
+ value: struct {
+ Foo XMLNameWithNSTag
+ }{
+ Foo: XMLNameWithNSTag{
+ Value: "hello",
+ },
+ },
+ start: StartElement{
+ Name: Name{Space: "ns2", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></ns2:a>`,
+}, {
+ desc: "XMLMarshaler with start element with default name space",
+ value: &MyMarshalerTest{},
+ start: StartElement{
+ Name: Name{Space: "ns2", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns">hello world</ns2:a>`,
+}}
+
+func TestEncodeElement(t *testing.T) {
+ for idx, test := range encodeElementTests {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ err := enc.EncodeElement(test.value, test.start)
+ if err != nil {
+ t.Fatalf("enc.EncodeElement: %v", err)
+ }
+ err = enc.Flush()
+ if err != nil {
+ t.Fatalf("enc.Flush: %v", err)
+ }
+ if got, want := buf.String(), test.expectXML; got != want {
+ t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want)
+ }
+ }
+}
+
+func BenchmarkMarshal(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ Marshal(atomValue)
+ }
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ b.ReportAllocs()
+ xml := []byte(atomXml)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(xml, &Feed{})
+ }
+}
+
+// golang.org/issue/6556
+func TestStructPointerMarshal(t *testing.T) {
+ type A struct {
+ XMLName string `xml:"a"`
+ B []interface{}
+ }
+ type C struct {
+ XMLName Name
+ Value string `xml:"value"`
+ }
+
+ a := new(A)
+ a.B = append(a.B, &C{
+ XMLName: Name{Local: "c"},
+ Value: "x",
+ })
+
+ b, err := Marshal(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if x := string(b); x != "<a><c><value>x</value></c></a>" {
+ t.Fatal(x)
+ }
+ var v A
+ err = Unmarshal(b, &v)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+var encodeTokenTests = []struct {
+ desc string
+ toks []Token
+ want string
+ err string
+}{{
+ desc: "start element with name space",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, nil},
+ },
+ want: `<space:local xmlns:space="space">`,
+}, {
+ desc: "start element with no name",
+ toks: []Token{
+ StartElement{Name{"space", ""}, nil},
+ },
+ err: "xml: start tag with no name",
+}, {
+ desc: "end element with no name",
+ toks: []Token{
+ EndElement{Name{"space", ""}},
+ },
+ err: "xml: end tag with no name",
+}, {
+ desc: "char data",
+ toks: []Token{
+ CharData("foo"),
+ },
+ want: `foo`,
+}, {
+ desc: "char data with escaped chars",
+ toks: []Token{
+ CharData(" \t\n"),
+ },
+ want: " &#x9;\n",
+}, {
+ desc: "comment",
+ toks: []Token{
+ Comment("foo"),
+ },
+ want: `<!--foo-->`,
+}, {
+ desc: "comment with invalid content",
+ toks: []Token{
+ Comment("foo-->"),
+ },
+ err: "xml: EncodeToken of Comment containing --> marker",
+}, {
+ desc: "proc instruction",
+ toks: []Token{
+ ProcInst{"Target", []byte("Instruction")},
+ },
+ want: `<?Target Instruction?>`,
+}, {
+ desc: "proc instruction with empty target",
+ toks: []Token{
+ ProcInst{"", []byte("Instruction")},
+ },
+ err: "xml: EncodeToken of ProcInst with invalid Target",
+}, {
+ desc: "proc instruction with bad content",
+ toks: []Token{
+ ProcInst{"", []byte("Instruction?>")},
+ },
+ err: "xml: EncodeToken of ProcInst with invalid Target",
+}, {
+ desc: "directive",
+ toks: []Token{
+ Directive("foo"),
+ },
+ want: `<!foo>`,
+}, {
+ desc: "more complex directive",
+ toks: []Token{
+ Directive("DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]"),
+ },
+ want: `<!DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]>`,
+}, {
+ desc: "directive instruction with bad name",
+ toks: []Token{
+ Directive("foo>"),
+ },
+ err: "xml: EncodeToken of Directive containing wrong < or > markers",
+}, {
+ desc: "end tag without start tag",
+ toks: []Token{
+ EndElement{Name{"foo", "bar"}},
+ },
+ err: "xml: end tag </bar> without start tag",
+}, {
+ desc: "mismatching end tag local name",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, nil},
+ EndElement{Name{"", "bar"}},
+ },
+ err: "xml: end tag </bar> does not match start tag <foo>",
+ want: `<foo>`,
+}, {
+ desc: "mismatching end tag namespace",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, nil},
+ EndElement{Name{"another", "foo"}},
+ },
+ err: "xml: end tag </foo> in namespace another does not match start tag <foo> in namespace space",
+ want: `<space:foo xmlns:space="space">`,
+}, {
+ desc: "start element with explicit namespace",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ {Name{"space", "foo"}, "value"},
+ }},
+ },
+ want: `<x:local xmlns:x="space" x:foo="value">`,
+}, {
+ desc: "start element with explicit namespace and colliding prefix",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ {Name{"space", "foo"}, "value"},
+ {Name{"x", "bar"}, "other"},
+ }},
+ },
+ want: `<x:local xmlns:x_1="x" xmlns:x="space" x:foo="value" x_1:bar="other">`,
+}, {
+ desc: "start element using previously defined namespace",
+ toks: []Token{
+ StartElement{Name{"", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"space", "x"}, "y"},
+ }},
+ },
+ want: `<local xmlns:x="space"><x:foo x:x="y">`,
+}, {
+ desc: "nested name space with same prefix",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space1"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space2"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"space1", "a"}, "space1 value"},
+ {Name{"space2", "b"}, "space2 value"},
+ }},
+ EndElement{Name{"", "foo"}},
+ EndElement{Name{"", "foo"}},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"space1", "a"}, "space1 value"},
+ {Name{"space2", "b"}, "space2 value"},
+ }},
+ },
+ want: `<foo xmlns:x="space1"><foo xmlns:x="space2"><foo xmlns:space1="space1" space1:a="space1 value" x:b="space2 value"></foo></foo><foo xmlns:space2="space2" x:a="space1 value" space2:b="space2 value">`,
+}, {
+ desc: "start element defining several prefixes for the same name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "a"}, "space"},
+ {Name{"xmlns", "b"}, "space"},
+ {Name{"space", "x"}, "value"},
+ }},
+ },
+ want: `<a:foo xmlns:a="space" a:x="value">`,
+}, {
+ desc: "nested element redefines name space",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "y"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns:x="space"><x:foo x:a="value">`,
+}, {
+ desc: "nested element creates alias for default name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "y"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo xmlns:y="space" y:a="value">`,
+}, {
+ desc: "nested element defines default name space with existing prefix",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns:x="space"><foo xmlns="space" x:a="value">`,
+}, {
+ desc: "nested element uses empty attribute name space when default ns defined",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "attr"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo attr="value">`,
+}, {
+ desc: "redefine xmlns",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"foo", "xmlns"}, "space"},
+ }},
+ },
+ err: `xml: cannot redefine xmlns attribute prefix`,
+}, {
+ desc: "xmlns with explicit name space #1",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xml", "xmlns"}, "space"},
+ }},
+ },
+ want: `<foo xmlns="space">`,
+}, {
+ desc: "xmlns with explicit name space #2",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{xmlURL, "xmlns"}, "space"},
+ }},
+ },
+ want: `<foo xmlns="space">`,
+}, {
+ desc: "empty name space declaration is ignored",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "foo"}, ""},
+ }},
+ },
+ want: `<foo>`,
+}, {
+ desc: "attribute with no name is ignored",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", ""}, "value"},
+ }},
+ },
+ want: `<foo>`,
+}, {
+ desc: "namespace URL with non-valid name",
+ toks: []Token{
+ StartElement{Name{"/34", "foo"}, []Attr{
+ {Name{"/34", "x"}, "value"},
+ }},
+ },
+ want: `<_:foo xmlns:_="/34" _:x="value">`,
+}, {
+ desc: "nested element resets default namespace to empty",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", "xmlns"}, ""},
+ {Name{"", "x"}, "value"},
+ {Name{"space", "x"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo xmlns:space="space" xmlns="" x="value" space:x="value">`,
+}, {
+ desc: "nested element requires empty default name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"", "foo"}, nil},
+ },
+ want: `<foo xmlns="space"><foo xmlns="">`,
+}, {
+ desc: "attribute uses name space from xmlns",
+ toks: []Token{
+ StartElement{Name{"some/space", "foo"}, []Attr{
+ {Name{"", "attr"}, "value"},
+ {Name{"some/space", "other"}, "other value"},
+ }},
+ },
+ want: `<space:foo xmlns:space="some/space" attr="value" space:other="other value">`,
+}, {
+ desc: "default name space should not be used by attributes",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"xmlns", "bar"}, "space"},
+ {Name{"space", "baz"}, "foo"},
+ }},
+ StartElement{Name{"space", "baz"}, nil},
+ EndElement{Name{"space", "baz"}},
+ EndElement{Name{"space", "foo"}},
+ },
+ want: `<foo xmlns:bar="space" xmlns="space" bar:baz="foo"><baz></baz></foo>`,
+}, {
+ desc: "default name space not used by attributes, not explicitly defined",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"space", "baz"}, "foo"},
+ }},
+ StartElement{Name{"space", "baz"}, nil},
+ EndElement{Name{"space", "baz"}},
+ EndElement{Name{"space", "foo"}},
+ },
+ want: `<foo xmlns:space="space" xmlns="space" space:baz="foo"><baz></baz></foo>`,
+}, {
+ desc: "impossible xmlns declaration",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "bar"}, []Attr{
+ {Name{"space", "attr"}, "value"},
+ }},
+ },
+ want: `<foo><space:bar xmlns:space="space" space:attr="value">`,
+}}
+
+func TestEncodeToken(t *testing.T) {
+loop:
+ for i, tt := range encodeTokenTests {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ var err error
+ for j, tok := range tt.toks {
+ err = enc.EncodeToken(tok)
+ if err != nil && j < len(tt.toks)-1 {
+ t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err)
+ continue loop
+ }
+ }
+ errorf := func(f string, a ...interface{}) {
+ t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...))
+ }
+ switch {
+ case tt.err != "" && err == nil:
+ errorf(" expected error; got none")
+ continue
+ case tt.err == "" && err != nil:
+ errorf(" got error: %v", err)
+ continue
+ case tt.err != "" && err != nil && tt.err != err.Error():
+ errorf(" error mismatch; got %v, want %v", err, tt.err)
+ continue
+ }
+ if err := enc.Flush(); err != nil {
+ errorf(" %v", err)
+ continue
+ }
+ if got := buf.String(); got != tt.want {
+ errorf("\ngot %v\nwant %v", got, tt.want)
+ continue
+ }
+ }
+}
+
+func TestProcInstEncodeToken(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+
+ if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil {
+ t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err)
+ }
+
+ if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil {
+ t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst")
+ }
+
+ if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil {
+ t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token")
+ }
+}
+
+func TestDecodeEncode(t *testing.T) {
+ var in, out bytes.Buffer
+ in.WriteString(`<?xml version="1.0" encoding="UTF-8"?>
+<?Target Instruction?>
+<root>
+</root>
+`)
+ dec := NewDecoder(&in)
+ enc := NewEncoder(&out)
+ for tok, err := dec.Token(); err == nil; tok, err = dec.Token() {
+ err = enc.EncodeToken(tok)
+ if err != nil {
+ t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err)
+ }
+ }
+}
+
+// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race.
+func TestRace9796(t *testing.T) {
+ type A struct{}
+ type B struct {
+ C []A `xml:"X>Y"`
+ }
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ go func() {
+ Marshal(B{[]A{A{}}})
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestIsValidDirective(t *testing.T) {
+ testOK := []string{
+ "<>",
+ "< < > >",
+ "<!DOCTYPE '<' '>' '>' <!--nothing-->>",
+ "<!DOCTYPE doc [ <!ELEMENT doc ANY> <!ELEMENT doc ANY> ]>",
+ "<!DOCTYPE doc [ <!ELEMENT doc \"ANY> '<' <!E\" LEMENT '>' doc ANY> ]>",
+ "<!DOCTYPE doc <!-- just>>>> a < comment --> [ <!ITEM anything> ] >",
+ }
+ testKO := []string{
+ "<",
+ ">",
+ "<!--",
+ "-->",
+ "< > > < < >",
+ "<!dummy <!-- > -->",
+ "<!DOCTYPE doc '>",
+ "<!DOCTYPE doc '>'",
+ "<!DOCTYPE doc <!--comment>",
+ }
+ for _, s := range testOK {
+ if !isValidDirective(Directive(s)) {
+ t.Errorf("Directive %q is expected to be valid", s)
+ }
+ }
+ for _, s := range testKO {
+ if isValidDirective(Directive(s)) {
+ t.Errorf("Directive %q is expected to be invalid", s)
+ }
+ }
+}
+
+// Issue 11719. EncodeToken used to silently eat tokens with an invalid type.
+func TestSimpleUseOfEncodeToken(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil {
+ t.Errorf("enc.EncodeToken: pointer type should be rejected")
+ }
+ if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil {
+ t.Errorf("enc.EncodeToken: pointer type should be rejected")
+ }
+ if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil {
+ t.Errorf("enc.EncodeToken: StartElement %s", err)
+ }
+ if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil {
+ t.Errorf("enc.EncodeToken: EndElement %s", err)
+ }
+ if err := enc.EncodeToken(Universe{}); err == nil {
+ t.Errorf("enc.EncodeToken: invalid type not caught")
+ }
+ if err := enc.Flush(); err != nil {
+ t.Errorf("enc.Flush: %s", err)
+ }
+ if buf.Len() == 0 {
+ t.Errorf("enc.EncodeToken: empty buffer")
+ }
+ want := "<object2></object2>"
+ if buf.String() != want {
+ t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String())
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go
new file mode 100644
index 000000000..75b9f2ba1
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go
@@ -0,0 +1,692 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
+// an XML element is an order-dependent collection of anonymous
+// values, while a data structure is an order-independent collection
+// of named values.
+// See package json for a textual representation more suitable
+// to data structures.
+
+// Unmarshal parses the XML-encoded data and stores the result in
+// the value pointed to by v, which must be an arbitrary struct,
+// slice, or string. Well-formed data that does not fit into v is
+// discarded.
+//
+// Because Unmarshal uses the reflect package, it can only assign
+// to exported (upper case) fields. Unmarshal uses a case-sensitive
+// comparison to match XML element names to tag values and struct
+// field names.
+//
+// Unmarshal maps an XML element to a struct using the following rules.
+// In the rules, the tag of a field refers to the value associated with the
+// key 'xml' in the struct field's tag (see the example above).
+//
+// * If the struct has a field of type []byte or string with tag
+// ",innerxml", Unmarshal accumulates the raw XML nested inside the
+// element in that field. The rest of the rules still apply.
+//
+// * If the struct has a field named XMLName of type xml.Name,
+// Unmarshal records the element name in that field.
+//
+// * If the XMLName field has an associated tag of the form
+// "name" or "namespace-URL name", the XML element must have
+// the given name (and, optionally, name space) or else Unmarshal
+// returns an error.
+//
+// * If the XML element has an attribute whose name matches a
+// struct field name with an associated tag containing ",attr" or
+// the explicit name in a struct field tag of the form "name,attr",
+// Unmarshal records the attribute value in that field.
+//
+// * If the XML element contains character data, that data is
+// accumulated in the first struct field that has tag ",chardata".
+// The struct field may have type []byte or string.
+// If there is no such field, the character data is discarded.
+//
+// * If the XML element contains comments, they are accumulated in
+// the first struct field that has tag ",comment". The struct
+// field may have type []byte or string. If there is no such
+// field, the comments are discarded.
+//
+// * If the XML element contains a sub-element whose name matches
+// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
+// will descend into the XML structure looking for elements with the
+// given names, and will map the innermost elements to that struct
+// field. A tag starting with ">" is equivalent to one starting
+// with the field name followed by ">".
+//
+// * If the XML element contains a sub-element whose name matches
+// a struct field's XMLName tag and the struct field has no
+// explicit name tag as per the previous rule, unmarshal maps
+// the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element whose name matches a
+// field without any mode flags (",attr", ",chardata", etc), Unmarshal
+// maps the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element that hasn't matched any
+// of the above rules and the struct has a field with tag ",any",
+// unmarshal maps the sub-element to that struct field.
+//
+// * An anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// * A struct field with tag "-" is never unmarshalled into.
+//
+// Unmarshal maps an XML element to a string or []byte by saving the
+// concatenation of that element's character data in the string or
+// []byte. The saved []byte is never nil.
+//
+// Unmarshal maps an attribute value to a string or []byte by saving
+// the value in the string or slice.
+//
+// Unmarshal maps an XML element to a slice by extending the length of
+// the slice and mapping the element to the newly created value.
+//
+// Unmarshal maps an XML element or attribute value to a bool by
+// setting it to the boolean value represented by the string.
+//
+// Unmarshal maps an XML element or attribute value to an integer or
+// floating-point field by setting the field to the result of
+// interpreting the string value in decimal. There is no check for
+// overflow.
+//
+// Unmarshal maps an XML element to an xml.Name by recording the
+// element name.
+//
+// Unmarshal maps an XML element to a pointer by setting the pointer
+// to a freshly allocated value and then mapping the element to that value.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ return NewDecoder(bytes.NewReader(data)).Decode(v)
+}
+
+// Decode works like xml.Unmarshal, except it reads the decoder
+// stream to find the start element.
+func (d *Decoder) Decode(v interface{}) error {
+ return d.DecodeElement(v, nil)
+}
+
+// DecodeElement works like xml.Unmarshal except that it takes
+// a pointer to the start XML element to decode into v.
+// It is useful when a client reads some raw XML tokens itself
+// but also wants to defer to Unmarshal for some elements.
+func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+ return d.unmarshal(val.Elem(), start)
+}
+
+// An UnmarshalError represents an error in the unmarshalling process.
+type UnmarshalError string
+
+func (e UnmarshalError) Error() string { return string(e) }
+
+// Unmarshaler is the interface implemented by objects that can unmarshal
+// an XML element description of themselves.
+//
+// UnmarshalXML decodes a single XML element
+// beginning with the given start element.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXML must consume exactly one XML element.
+// One common implementation strategy is to unmarshal into
+// a separate value with a layout matching the expected XML
+// using d.DecodeElement, and then to copy the data from
+// that value into the receiver.
+// Another common strategy is to use d.Token to process the
+// XML object one token at a time.
+// UnmarshalXML may not use d.RawToken.
+type Unmarshaler interface {
+ UnmarshalXML(d *Decoder, start StartElement) error
+}
+
+// UnmarshalerAttr is the interface implemented by objects that can unmarshal
+// an XML attribute description of themselves.
+//
+// UnmarshalXMLAttr decodes a single XML attribute.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type UnmarshalerAttr interface {
+ UnmarshalXMLAttr(attr Attr) error
+}
+
+// receiverType returns the receiver type to use in an expression like "%s.MethodName".
+func receiverType(val interface{}) string {
+ t := reflect.TypeOf(val)
+ if t.Name() != "" {
+ return t.String()
+ }
+ return "(" + t.String() + ")"
+}
+
+// unmarshalInterface unmarshals a single XML element into val.
+// start is the opening tag of the element.
+func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
+ // Record that decoder must stop at end tag corresponding to start.
+ p.pushEOF()
+
+ p.unmarshalDepth++
+ err := val.UnmarshalXML(p, *start)
+ p.unmarshalDepth--
+ if err != nil {
+ p.popEOF()
+ return err
+ }
+
+ if !p.popEOF() {
+ return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
+ }
+
+ return nil
+}
+
+// unmarshalTextInterface unmarshals a single XML element into val.
+// The chardata contained in the element (but not its children)
+// is passed to the text unmarshaler.
+func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
+ var buf []byte
+ depth := 1
+ for depth > 0 {
+ t, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := t.(type) {
+ case CharData:
+ if depth == 1 {
+ buf = append(buf, t...)
+ }
+ case StartElement:
+ depth++
+ case EndElement:
+ depth--
+ }
+ }
+ return val.UnmarshalText(buf)
+}
+
+// unmarshalAttr unmarshals a single XML attribute into val.
+func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
+ return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ }
+
+ // Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ }
+
+ copyValue(val, []byte(attr.Value))
+ return nil
+}
+
+var (
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+// Unmarshal a single XML element into val.
+func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+ // Find start element if we need it.
+ if start == nil {
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ if t, ok := tok.(StartElement); ok {
+ start = &t
+ break
+ }
+ }
+ }
+
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ e := val.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() {
+ val = e
+ }
+ }
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
+ return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
+ }
+ }
+
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
+ }
+ }
+
+ var (
+ data []byte
+ saveData reflect.Value
+ comment []byte
+ saveComment reflect.Value
+ saveXML reflect.Value
+ saveXMLIndex int
+ saveXMLData []byte
+ saveAny reflect.Value
+ sv reflect.Value
+ tinfo *typeInfo
+ err error
+ )
+
+ switch v := val; v.Kind() {
+ default:
+ return errors.New("unknown type " + v.Type().String())
+
+ case reflect.Interface:
+ // TODO: For now, simply ignore the field. In the near
+ // future we may choose to unmarshal the start
+ // element on it, if not nil.
+ return p.Skip()
+
+ case reflect.Slice:
+ typ := v.Type()
+ if typ.Elem().Kind() == reflect.Uint8 {
+ // []byte
+ saveData = v
+ break
+ }
+
+ // Slice of element values.
+ // Grow slice.
+ n := v.Len()
+ if n >= v.Cap() {
+ ncap := 2 * n
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(typ, n, ncap)
+ reflect.Copy(new, v)
+ v.Set(new)
+ }
+ v.SetLen(n + 1)
+
+ // Recur to read element into slice.
+ if err := p.unmarshal(v.Index(n), start); err != nil {
+ v.SetLen(n)
+ return err
+ }
+ return nil
+
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
+ saveData = v
+
+ case reflect.Struct:
+ typ := v.Type()
+ if typ == nameType {
+ v.Set(reflect.ValueOf(start.Name))
+ break
+ }
+
+ sv = v
+ tinfo, err = getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Validate and assign element name.
+ if tinfo.xmlname != nil {
+ finfo := tinfo.xmlname
+ if finfo.name != "" && finfo.name != start.Name.Local {
+ return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
+ }
+ if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
+ if start.Name.Space == "" {
+ e += "no name space"
+ } else {
+ e += start.Name.Space
+ }
+ return UnmarshalError(e)
+ }
+ fv := finfo.value(sv)
+ if _, ok := fv.Interface().(Name); ok {
+ fv.Set(reflect.ValueOf(start.Name))
+ }
+ }
+
+ // Assign attributes.
+ // Also, determine whether we need to save character data or comments.
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ switch finfo.flags & fMode {
+ case fAttr:
+ strv := finfo.value(sv)
+ // Look for attribute.
+ for _, a := range start.Attr {
+ if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
+ if err := p.unmarshalAttr(strv, a); err != nil {
+ return err
+ }
+ break
+ }
+ }
+
+ case fCharData:
+ if !saveData.IsValid() {
+ saveData = finfo.value(sv)
+ }
+
+ case fComment:
+ if !saveComment.IsValid() {
+ saveComment = finfo.value(sv)
+ }
+
+ case fAny, fAny | fElement:
+ if !saveAny.IsValid() {
+ saveAny = finfo.value(sv)
+ }
+
+ case fInnerXml:
+ if !saveXML.IsValid() {
+ saveXML = finfo.value(sv)
+ if p.saved == nil {
+ saveXMLIndex = 0
+ p.saved = new(bytes.Buffer)
+ } else {
+ saveXMLIndex = p.savedOffset()
+ }
+ }
+ }
+ }
+ }
+
+ // Find end element.
+ // Process sub-elements along the way.
+Loop:
+ for {
+ var savedOffset int
+ if saveXML.IsValid() {
+ savedOffset = p.savedOffset()
+ }
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed := false
+ if sv.IsValid() {
+ consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
+ if err != nil {
+ return err
+ }
+ if !consumed && saveAny.IsValid() {
+ consumed = true
+ if err := p.unmarshal(saveAny, &t); err != nil {
+ return err
+ }
+ }
+ }
+ if !consumed {
+ if err := p.Skip(); err != nil {
+ return err
+ }
+ }
+
+ case EndElement:
+ if saveXML.IsValid() {
+ saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
+ if saveXMLIndex == 0 {
+ p.saved = nil
+ }
+ }
+ break Loop
+
+ case CharData:
+ if saveData.IsValid() {
+ data = append(data, t...)
+ }
+
+ case Comment:
+ if saveComment.IsValid() {
+ comment = append(comment, t...)
+ }
+ }
+ }
+
+ if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
+ if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+
+ if saveData.IsValid() && saveData.CanAddr() {
+ pv := saveData.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+ }
+
+ if err := copyValue(saveData, data); err != nil {
+ return err
+ }
+
+ switch t := saveComment; t.Kind() {
+ case reflect.String:
+ t.SetString(string(comment))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(comment))
+ }
+
+ switch t := saveXML; t.Kind() {
+ case reflect.String:
+ t.SetString(string(saveXMLData))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(saveXMLData))
+ }
+
+ return nil
+}
+
+func copyValue(dst reflect.Value, src []byte) (err error) {
+ dst0 := dst
+
+ if dst.Kind() == reflect.Ptr {
+ if dst.IsNil() {
+ dst.Set(reflect.New(dst.Type().Elem()))
+ }
+ dst = dst.Elem()
+ }
+
+ // Save accumulated data.
+ switch dst.Kind() {
+ case reflect.Invalid:
+ // Probably a comment.
+ default:
+ return errors.New("cannot unmarshal into " + dst0.Type().String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetInt(itmp)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetUint(utmp)
+ case reflect.Float32, reflect.Float64:
+ ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetFloat(ftmp)
+ case reflect.Bool:
+ value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
+ if err != nil {
+ return err
+ }
+ dst.SetBool(value)
+ case reflect.String:
+ dst.SetString(string(src))
+ case reflect.Slice:
+ if len(src) == 0 {
+ // non-nil to flag presence
+ src = []byte{}
+ }
+ dst.SetBytes(src)
+ }
+ return nil
+}
+
+// unmarshalPath walks down an XML structure looking for wanted
+// paths, and calls unmarshal on them.
+// The consumed result tells whether XML elements have been consumed
+// from the Decoder until start's matching end element, or if it's
+// still untouched because start is uninteresting for sv's fields.
+func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+ recurse := false
+Loop:
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ continue
+ }
+ for j := range parents {
+ if parents[j] != finfo.parents[j] {
+ continue Loop
+ }
+ }
+ if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
+ // It's a perfect match, unmarshal the field.
+ return true, p.unmarshal(finfo.value(sv), start)
+ }
+ if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
+ // It's a prefix for the field. Break and recurse
+ // since it's not ok for one field path to be itself
+ // the prefix for another field path.
+ recurse = true
+
+ // We can reuse the same slice as long as we
+ // don't try to append to it.
+ parents = finfo.parents[:len(parents)+1]
+ break
+ }
+ }
+ if !recurse {
+ // We have no business with this element.
+ return false, nil
+ }
+ // The element is not a perfect match for any field, but one
+ // or more fields have the path to this element as a parent
+ // prefix. Recurse and attempt to match these.
+ for {
+ var tok Token
+ tok, err = p.Token()
+ if err != nil {
+ return true, err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
+ if err != nil {
+ return true, err
+ }
+ if !consumed2 {
+ if err := p.Skip(); err != nil {
+ return true, err
+ }
+ }
+ case EndElement:
+ return true, nil
+ }
+ }
+}
+
+// Skip reads tokens until it has consumed the end element
+// matching the most recent start element already consumed.
+// It recurs if it encounters a start element, so it can be used to
+// skip nested structures.
+// It returns nil if it finds an end element matching the start
+// element; otherwise it returns an error describing the problem.
+func (d *Decoder) Skip() error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return err
+ }
+ switch tok.(type) {
+ case StartElement:
+ if err := d.Skip(); err != nil {
+ return err
+ }
+ case EndElement:
+ return nil
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go
new file mode 100644
index 000000000..02f1e10c3
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go
@@ -0,0 +1,744 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+// Stripped down Atom feed data structures.
+
+func TestUnmarshalFeed(t *testing.T) {
+ var f Feed
+ if err := Unmarshal([]byte(atomFeedString), &f); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(f, atomFeed) {
+ t.Fatalf("have %#v\nwant %#v", f, atomFeed)
+ }
+}
+
+// hget http://codereview.appspot.com/rss/mine/rsc
+const atomFeedString = `
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> `
+
+type Feed struct {
+ XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
+ Title string `xml:"title"`
+ Id string `xml:"id"`
+ Link []Link `xml:"link"`
+ Updated time.Time `xml:"updated,attr"`
+ Author Person `xml:"author"`
+ Entry []Entry `xml:"entry"`
+}
+
+type Entry struct {
+ Title string `xml:"title"`
+ Id string `xml:"id"`
+ Link []Link `xml:"link"`
+ Updated time.Time `xml:"updated"`
+ Author Person `xml:"author"`
+ Summary Text `xml:"summary"`
+}
+
+type Link struct {
+ Rel string `xml:"rel,attr,omitempty"`
+ Href string `xml:"href,attr"`
+}
+
+type Person struct {
+ Name string `xml:"name"`
+ URI string `xml:"uri"`
+ Email string `xml:"email"`
+ InnerXML string `xml:",innerxml"`
+}
+
+type Text struct {
+ Type string `xml:"type,attr,omitempty"`
+ Body string `xml:",chardata"`
+}
+
+var atomFeed = Feed{
+ XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
+ Title: "Code Review - My issues",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/"},
+ {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
+ },
+ Id: "http://codereview.appspot.com/",
+ Updated: ParseTime("2009-10-04T01:35:58+00:00"),
+ Author: Person{
+ Name: "rietveld<>",
+ InnerXML: "<name>rietveld&lt;&gt;</name>",
+ },
+ Entry: []Entry{
+ {
+ Title: "rietveld: an attempt at pubsubhubbub\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
+ },
+ Updated: ParseTime("2009-10-04T01:35:58+00:00"),
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&#39;t quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed&#39;s actual URL in
+the link rel=&quot;self&quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+`,
+ },
+ },
+ {
+ Title: "rietveld: correct tab handling\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
+ },
+ Updated: ParseTime("2009-10-03T23:02:17+00:00"),
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&#39;t know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+`,
+ },
+ },
+ },
+}
+
+const pathTestString = `
+<Result>
+ <Before>1</Before>
+ <Items>
+ <Item1>
+ <Value>A</Value>
+ </Item1>
+ <Item2>
+ <Value>B</Value>
+ </Item2>
+ <Item1>
+ <Value>C</Value>
+ <Value>D</Value>
+ </Item1>
+ <_>
+ <Value>E</Value>
+ </_>
+ </Items>
+ <After>2</After>
+</Result>
+`
+
+type PathTestItem struct {
+ Value string
+}
+
+type PathTestA struct {
+ Items []PathTestItem `xml:">Item1"`
+ Before, After string
+}
+
+type PathTestB struct {
+ Other []PathTestItem `xml:"Items>Item1"`
+ Before, After string
+}
+
+type PathTestC struct {
+ Values1 []string `xml:"Items>Item1>Value"`
+ Values2 []string `xml:"Items>Item2>Value"`
+ Before, After string
+}
+
+type PathTestSet struct {
+ Item1 []PathTestItem
+}
+
+type PathTestD struct {
+ Other PathTestSet `xml:"Items"`
+ Before, After string
+}
+
+type PathTestE struct {
+ Underline string `xml:"Items>_>Value"`
+ Before, After string
+}
+
+var pathTests = []interface{}{
+ &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
+ &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
+ &PathTestE{Underline: "E", Before: "1", After: "2"},
+}
+
+func TestUnmarshalPaths(t *testing.T) {
+ for _, pt := range pathTests {
+ v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
+ if err := Unmarshal([]byte(pathTestString), v); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(v, pt) {
+ t.Fatalf("have %#v\nwant %#v", v, pt)
+ }
+ }
+}
+
+type BadPathTestA struct {
+ First string `xml:"items>item1"`
+ Other string `xml:"items>item2"`
+ Second string `xml:"items"`
+}
+
+type BadPathTestB struct {
+ Other string `xml:"items>item2>value"`
+ First string `xml:"items>item1"`
+ Second string `xml:"items>item1>value"`
+}
+
+type BadPathTestC struct {
+ First string
+ Second string `xml:"First"`
+}
+
+type BadPathTestD struct {
+ BadPathEmbeddedA
+ BadPathEmbeddedB
+}
+
+type BadPathEmbeddedA struct {
+ First string
+}
+
+type BadPathEmbeddedB struct {
+ Second string `xml:"First"`
+}
+
+var badPathTests = []struct {
+ v, e interface{}
+}{
+ {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
+ {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
+ {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
+ {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
+}
+
+func TestUnmarshalBadPaths(t *testing.T) {
+ for _, tt := range badPathTests {
+ err := Unmarshal([]byte(pathTestString), tt.v)
+ if !reflect.DeepEqual(err, tt.e) {
+ t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e)
+ }
+ }
+}
+
+const OK = "OK"
+const withoutNameTypeData = `
+<?xml version="1.0" charset="utf-8"?>
+<Test3 Attr="OK" />`
+
+type TestThree struct {
+ XMLName Name `xml:"Test3"`
+ Attr string `xml:",attr"`
+}
+
+func TestUnmarshalWithoutNameType(t *testing.T) {
+ var x TestThree
+ if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if x.Attr != OK {
+ t.Fatalf("have %v\nwant %v", x.Attr, OK)
+ }
+}
+
+func TestUnmarshalAttr(t *testing.T) {
+ type ParamVal struct {
+ Int int `xml:"int,attr"`
+ }
+
+ type ParamPtr struct {
+ Int *int `xml:"int,attr"`
+ }
+
+ type ParamStringPtr struct {
+ Int *string `xml:"int,attr"`
+ }
+
+ x := []byte(`<Param int="1" />`)
+
+ p1 := &ParamPtr{}
+ if err := Unmarshal(x, p1); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p1.Int == nil {
+ t.Fatalf("Unmarshal failed in to *int field")
+ } else if *p1.Int != 1 {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1)
+ }
+
+ p2 := &ParamVal{}
+ if err := Unmarshal(x, p2); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p2.Int != 1 {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1)
+ }
+
+ p3 := &ParamStringPtr{}
+ if err := Unmarshal(x, p3); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p3.Int == nil {
+ t.Fatalf("Unmarshal failed in to *string field")
+ } else if *p3.Int != "1" {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1)
+ }
+}
+
+type Tables struct {
+ HTable string `xml:"http://www.w3.org/TR/html4/ table"`
+ FTable string `xml:"http://www.w3schools.com/furniture table"`
+}
+
+var tables = []struct {
+ xml string
+ tab Tables
+ ns string
+}{
+ {
+ xml: `<Tables>` +
+ `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
+ `<table xmlns="http://www.w3schools.com/furniture">world</table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables>` +
+ `<table xmlns="http://www.w3schools.com/furniture">world</table>` +
+ `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` +
+ `<f:table>world</f:table>` +
+ `<h:table>hello</h:table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables>` +
+ `<table>bogus</table>` +
+ `</Tables>`,
+ tab: Tables{},
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{HTable: "only"},
+ ns: "http://www.w3.org/TR/html4/",
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{FTable: "only"},
+ ns: "http://www.w3schools.com/furniture",
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{},
+ ns: "something else entirely",
+ },
+}
+
+func TestUnmarshalNS(t *testing.T) {
+ for i, tt := range tables {
+ var dst Tables
+ var err error
+ if tt.ns != "" {
+ d := NewDecoder(strings.NewReader(tt.xml))
+ d.DefaultSpace = tt.ns
+ err = d.Decode(&dst)
+ } else {
+ err = Unmarshal([]byte(tt.xml), &dst)
+ }
+ if err != nil {
+ t.Errorf("#%d: Unmarshal: %v", i, err)
+ continue
+ }
+ want := tt.tab
+ if dst != want {
+ t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
+ }
+ }
+}
+
+func TestRoundTrip(t *testing.T) {
+ // From issue 7535
+ const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>`
+ in := bytes.NewBufferString(s)
+ for i := 0; i < 10; i++ {
+ out := &bytes.Buffer{}
+ d := NewDecoder(in)
+ e := NewEncoder(out)
+
+ for {
+ t, err := d.Token()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ fmt.Println("failed:", err)
+ return
+ }
+ e.EncodeToken(t)
+ }
+ e.Flush()
+ in = out
+ }
+ if got := in.String(); got != s {
+ t.Errorf("have: %q\nwant: %q\n", got, s)
+ }
+}
+
+func TestMarshalNS(t *testing.T) {
+ dst := Tables{"hello", "world"}
+ data, err := Marshal(&dst)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>`
+ str := string(data)
+ if str != want {
+ t.Errorf("have: %q\nwant: %q\n", str, want)
+ }
+}
+
+type TableAttrs struct {
+ TAttr TAttr
+}
+
+type TAttr struct {
+ HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"`
+ FTable string `xml:"http://www.w3schools.com/furniture table,attr"`
+ Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"`
+ Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"`
+ Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"`
+ Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"`
+ Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"`
+}
+
+var tableAttrs = []struct {
+ xml string
+ tab TableAttrs
+ ns string
+}{
+ {
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
+ `h:table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` +
+ `table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `table="bogus" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
+ ns: "http://www.w3schools.com/furniture",
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` +
+ `table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
+ ns: "http://www.w3.org/TR/html4/",
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `table="bogus" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{},
+ ns: "something else entirely",
+ },
+}
+
+func TestUnmarshalNSAttr(t *testing.T) {
+ for i, tt := range tableAttrs {
+ var dst TableAttrs
+ var err error
+ if tt.ns != "" {
+ d := NewDecoder(strings.NewReader(tt.xml))
+ d.DefaultSpace = tt.ns
+ err = d.Decode(&dst)
+ } else {
+ err = Unmarshal([]byte(tt.xml), &dst)
+ }
+ if err != nil {
+ t.Errorf("#%d: Unmarshal: %v", i, err)
+ continue
+ }
+ want := tt.tab
+ if dst != want {
+ t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
+ }
+ }
+}
+
+func TestMarshalNSAttr(t *testing.T) {
+ src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}}
+ data, err := Marshal(&src)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>`
+ str := string(data)
+ if str != want {
+ t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want)
+ }
+
+ var dst TableAttrs
+ if err := Unmarshal(data, &dst); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+
+ if dst != src {
+ t.Errorf("Unmarshal = %q, want %q", dst, src)
+ }
+}
+
+type MyCharData struct {
+ body string
+}
+
+func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {
+ for {
+ t, err := d.Token()
+ if err == io.EOF { // found end of element
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if char, ok := t.(CharData); ok {
+ m.body += string(char)
+ }
+ }
+ return nil
+}
+
+var _ Unmarshaler = (*MyCharData)(nil)
+
+func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {
+ panic("must not call")
+}
+
+type MyAttr struct {
+ attr string
+}
+
+func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {
+ m.attr = attr.Value
+ return nil
+}
+
+var _ UnmarshalerAttr = (*MyAttr)(nil)
+
+type MyStruct struct {
+ Data *MyCharData
+ Attr *MyAttr `xml:",attr"`
+
+ Data2 MyCharData
+ Attr2 MyAttr `xml:",attr"`
+}
+
+func TestUnmarshaler(t *testing.T) {
+ xml := `<?xml version="1.0" encoding="utf-8"?>
+ <MyStruct Attr="attr1" Attr2="attr2">
+ <Data>hello <!-- comment -->world</Data>
+ <Data2>howdy <!-- comment -->world</Data2>
+ </MyStruct>
+ `
+
+ var m MyStruct
+ if err := Unmarshal([]byte(xml), &m); err != nil {
+ t.Fatal(err)
+ }
+
+ if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" {
+ t.Errorf("m=%#+v\n", m)
+ }
+}
+
+type Pea struct {
+ Cotelydon string
+}
+
+type Pod struct {
+ Pea interface{} `xml:"Pea"`
+}
+
+// https://golang.org/issue/6836
+func TestUnmarshalIntoInterface(t *testing.T) {
+ pod := new(Pod)
+ pod.Pea = new(Pea)
+ xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`
+ err := Unmarshal([]byte(xml), pod)
+ if err != nil {
+ t.Fatalf("failed to unmarshal %q: %v", xml, err)
+ }
+ pea, ok := pod.Pea.(*Pea)
+ if !ok {
+ t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea)
+ }
+ have, want := pea.Cotelydon, "Green stuff"
+ if have != want {
+ t.Errorf("failed to unmarshal into interface, have %q want %q", have, want)
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
new file mode 100644
index 000000000..c9a6421f2
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
@@ -0,0 +1,371 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// typeInfo holds details for the xml representation of a type.
+type typeInfo struct {
+ xmlname *fieldInfo
+ fields []fieldInfo
+}
+
+// fieldInfo holds details for the xml representation of a single field.
+type fieldInfo struct {
+ idx []int
+ name string
+ xmlns string
+ flags fieldFlags
+ parents []string
+}
+
+type fieldFlags int
+
+const (
+ fElement fieldFlags = 1 << iota
+ fAttr
+ fCharData
+ fInnerXml
+ fComment
+ fAny
+
+ fOmitEmpty
+
+ fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
+)
+
+var tinfoMap = make(map[reflect.Type]*typeInfo)
+var tinfoLock sync.RWMutex
+
+var nameType = reflect.TypeOf(Name{})
+
+// getTypeInfo returns the typeInfo structure with details necessary
+// for marshalling and unmarshalling typ.
+func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
+ tinfoLock.RLock()
+ tinfo, ok := tinfoMap[typ]
+ tinfoLock.RUnlock()
+ if ok {
+ return tinfo, nil
+ }
+ tinfo = &typeInfo{}
+ if typ.Kind() == reflect.Struct && typ != nameType {
+ n := typ.NumField()
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
+ continue // Private field
+ }
+
+ // For embedded structs, embed its fields.
+ if f.Anonymous {
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Struct {
+ inner, err := getTypeInfo(t)
+ if err != nil {
+ return nil, err
+ }
+ if tinfo.xmlname == nil {
+ tinfo.xmlname = inner.xmlname
+ }
+ for _, finfo := range inner.fields {
+ finfo.idx = append([]int{i}, finfo.idx...)
+ if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ }
+
+ finfo, err := structFieldInfo(typ, &f)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.Name == "XMLName" {
+ tinfo.xmlname = finfo
+ continue
+ }
+
+ // Add the field if it doesn't conflict with other fields.
+ if err := addFieldInfo(typ, tinfo, finfo); err != nil {
+ return nil, err
+ }
+ }
+ }
+ tinfoLock.Lock()
+ tinfoMap[typ] = tinfo
+ tinfoLock.Unlock()
+ return tinfo, nil
+}
+
+// structFieldInfo builds and returns a fieldInfo for f.
+func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
+ finfo := &fieldInfo{idx: f.Index}
+
+ // Split the tag from the xml namespace if necessary.
+ tag := f.Tag.Get("xml")
+ if i := strings.Index(tag, " "); i >= 0 {
+ finfo.xmlns, tag = tag[:i], tag[i+1:]
+ }
+
+ // Parse flags.
+ tokens := strings.Split(tag, ",")
+ if len(tokens) == 1 {
+ finfo.flags = fElement
+ } else {
+ tag = tokens[0]
+ for _, flag := range tokens[1:] {
+ switch flag {
+ case "attr":
+ finfo.flags |= fAttr
+ case "chardata":
+ finfo.flags |= fCharData
+ case "innerxml":
+ finfo.flags |= fInnerXml
+ case "comment":
+ finfo.flags |= fComment
+ case "any":
+ finfo.flags |= fAny
+ case "omitempty":
+ finfo.flags |= fOmitEmpty
+ }
+ }
+
+ // Validate the flags used.
+ valid := true
+ switch mode := finfo.flags & fMode; mode {
+ case 0:
+ finfo.flags |= fElement
+ case fAttr, fCharData, fInnerXml, fComment, fAny:
+ if f.Name == "XMLName" || tag != "" && mode != fAttr {
+ valid = false
+ }
+ default:
+ // This will also catch multiple modes in a single field.
+ valid = false
+ }
+ if finfo.flags&fMode == fAny {
+ finfo.flags |= fElement
+ }
+ if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
+ valid = false
+ }
+ if !valid {
+ return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+ }
+
+ // Use of xmlns without a name is not allowed.
+ if finfo.xmlns != "" && tag == "" {
+ return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+
+ if f.Name == "XMLName" {
+ // The XMLName field records the XML element name. Don't
+ // process it as usual because its name should default to
+ // empty rather than to the field name.
+ finfo.name = tag
+ return finfo, nil
+ }
+
+ if tag == "" {
+ // If the name part of the tag is completely empty, get
+ // default from XMLName of underlying struct if feasible,
+ // or field name otherwise.
+ if xmlname := lookupXMLName(f.Type); xmlname != nil {
+ finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
+ } else {
+ finfo.name = f.Name
+ }
+ return finfo, nil
+ }
+
+ if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
+ // If it's an element no namespace specified, get the default
+ // from the XMLName of enclosing struct if possible.
+ if xmlname := lookupXMLName(typ); xmlname != nil {
+ finfo.xmlns = xmlname.xmlns
+ }
+ }
+
+ // Prepare field name and parents.
+ parents := strings.Split(tag, ">")
+ if parents[0] == "" {
+ parents[0] = f.Name
+ }
+ if parents[len(parents)-1] == "" {
+ return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
+ }
+ finfo.name = parents[len(parents)-1]
+ if len(parents) > 1 {
+ if (finfo.flags & fElement) == 0 {
+ return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
+ }
+ finfo.parents = parents[:len(parents)-1]
+ }
+
+ // If the field type has an XMLName field, the names must match
+ // so that the behavior of both marshalling and unmarshalling
+ // is straightforward and unambiguous.
+ if finfo.flags&fElement != 0 {
+ ftyp := f.Type
+ xmlname := lookupXMLName(ftyp)
+ if xmlname != nil && xmlname.name != finfo.name {
+ return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
+ finfo.name, typ, f.Name, xmlname.name, ftyp)
+ }
+ }
+ return finfo, nil
+}
+
+// lookupXMLName returns the fieldInfo for typ's XMLName field
+// in case it exists and has a valid xml field tag, otherwise
+// it returns nil.
+func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
+ for typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+ if typ.Kind() != reflect.Struct {
+ return nil
+ }
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ f := typ.Field(i)
+ if f.Name != "XMLName" {
+ continue
+ }
+ finfo, err := structFieldInfo(typ, &f)
+ if finfo.name != "" && err == nil {
+ return finfo
+ }
+ // Also consider errors as a non-existent field tag
+ // and let getTypeInfo itself report the error.
+ break
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+// addFieldInfo adds finfo to tinfo.fields if there are no
+// conflicts, or if conflicts arise from previous fields that were
+// obtained from deeper embedded structures than finfo. In the latter
+// case, the conflicting entries are dropped.
+// A conflict occurs when the path (parent + name) to a field is
+// itself a prefix of another path, or when two paths match exactly.
+// It is okay for field paths to share a common, shorter prefix.
+func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
+ var conflicts []int
+Loop:
+ // First, figure all conflicts. Most working code will have none.
+ for i := range tinfo.fields {
+ oldf := &tinfo.fields[i]
+ if oldf.flags&fMode != newf.flags&fMode {
+ continue
+ }
+ if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
+ continue
+ }
+ minl := min(len(newf.parents), len(oldf.parents))
+ for p := 0; p < minl; p++ {
+ if oldf.parents[p] != newf.parents[p] {
+ continue Loop
+ }
+ }
+ if len(oldf.parents) > len(newf.parents) {
+ if oldf.parents[len(newf.parents)] == newf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else if len(oldf.parents) < len(newf.parents) {
+ if newf.parents[len(oldf.parents)] == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else {
+ if newf.name == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ }
+ }
+ // Without conflicts, add the new field and return.
+ if conflicts == nil {
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+ }
+
+ // If any conflict is shallower, ignore the new field.
+ // This matches the Go field resolution on embedding.
+ for _, i := range conflicts {
+ if len(tinfo.fields[i].idx) < len(newf.idx) {
+ return nil
+ }
+ }
+
+ // Otherwise, if any of them is at the same depth level, it's an error.
+ for _, i := range conflicts {
+ oldf := &tinfo.fields[i]
+ if len(oldf.idx) == len(newf.idx) {
+ f1 := typ.FieldByIndex(oldf.idx)
+ f2 := typ.FieldByIndex(newf.idx)
+ return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
+ }
+ }
+
+ // Otherwise, the new field is shallower, and thus takes precedence,
+ // so drop the conflicting fields from tinfo and append the new one.
+ for c := len(conflicts) - 1; c >= 0; c-- {
+ i := conflicts[c]
+ copy(tinfo.fields[i:], tinfo.fields[i+1:])
+ tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
+ }
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+}
+
+// A TagPathError represents an error in the unmarshalling process
+// caused by the use of field tags with conflicting paths.
+type TagPathError struct {
+ Struct reflect.Type
+ Field1, Tag1 string
+ Field2, Tag2 string
+}
+
+func (e *TagPathError) Error() string {
+ return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
+}
+
+// value returns v's field value corresponding to finfo.
+// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
+// and dereferences pointers as necessary.
+func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
+ for i, x := range finfo.idx {
+ if i > 0 {
+ t := v.Type()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go
new file mode 100644
index 000000000..ffab4a70c
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go
@@ -0,0 +1,1998 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xml implements a simple XML 1.0 parser that
+// understands XML name spaces.
+package xml
+
+// References:
+// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
+// XML name spaces: http://www.w3.org/TR/REC-xml-names/
+
+// TODO(rsc):
+// Test error handling.
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A SyntaxError represents a syntax error in the XML input stream.
+type SyntaxError struct {
+ Msg string
+ Line int
+}
+
+func (e *SyntaxError) Error() string {
+ return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
+}
+
+// A Name represents an XML name (Local) annotated with a name space
+// identifier (Space). In tokens returned by Decoder.Token, the Space
+// identifier is given as a canonical URL, not the short prefix used in
+// the document being parsed.
+//
+// As a special case, XML namespace declarations will use the literal
+// string "xmlns" for the Space field instead of the fully resolved URL.
+// See Encoder.EncodeToken for more information on namespace encoding
+// behaviour.
+type Name struct {
+ Space, Local string
+}
+
+// isNamespace reports whether the name is a namespace-defining name.
+func (name Name) isNamespace() bool {
+ return name.Local == "xmlns" || name.Space == "xmlns"
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+// A Token is an interface holding one of the token types:
+// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
+type Token interface{}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(attrs, e.Attr)
+ e.Attr = attrs
+ return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+ return EndElement{e.Name}
+}
+
+// setDefaultNamespace sets the namespace of the element
+// as the default for all elements contained within it.
+func (e *StartElement) setDefaultNamespace() {
+ if e.Name.Space == "" {
+ // If there's no namespace on the element, don't
+ // set the default. Strictly speaking this might be wrong, as
+ // we can't tell if the element had no namespace set
+ // or was just using the default namespace.
+ return
+ }
+ // Don't add a default name space if there's already one set.
+ for _, attr := range e.Attr {
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ return
+ }
+ }
+ e.Attr = append(e.Attr, Attr{
+ Name: Name{
+ Local: "xmlns",
+ },
+ Value: e.Name.Space,
+ })
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// A CharData represents XML character data (raw text),
+// in which XML escape sequences have been replaced by
+// the characters they represent.
+type CharData []byte
+
+func makeCopy(b []byte) []byte {
+ b1 := make([]byte, len(b))
+ copy(b1, b)
+ return b1
+}
+
+func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
+
+// A Comment represents an XML comment of the form <!--comment-->.
+// The bytes do not include the <!-- and --> comment markers.
+type Comment []byte
+
+func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
+
+// A ProcInst represents an XML processing instruction of the form <?target inst?>
+type ProcInst struct {
+ Target string
+ Inst []byte
+}
+
+func (p ProcInst) Copy() ProcInst {
+ p.Inst = makeCopy(p.Inst)
+ return p
+}
+
+// A Directive represents an XML directive of the form <!text>.
+// The bytes do not include the <! and > markers.
+type Directive []byte
+
+func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
+
+// CopyToken returns a copy of a Token.
+func CopyToken(t Token) Token {
+ switch v := t.(type) {
+ case CharData:
+ return v.Copy()
+ case Comment:
+ return v.Copy()
+ case Directive:
+ return v.Copy()
+ case ProcInst:
+ return v.Copy()
+ case StartElement:
+ return v.Copy()
+ }
+ return t
+}
+
+// A Decoder represents an XML parser reading a particular input stream.
+// The parser assumes that its input is encoded in UTF-8.
+type Decoder struct {
+ // Strict defaults to true, enforcing the requirements
+ // of the XML specification.
+ // If set to false, the parser allows input containing common
+ // mistakes:
+ // * If an element is missing an end tag, the parser invents
+ // end tags as necessary to keep the return values from Token
+ // properly balanced.
+ // * In attribute values and character data, unknown or malformed
+ // character entities (sequences beginning with &) are left alone.
+ //
+ // Setting:
+ //
+ // d.Strict = false;
+ // d.AutoClose = HTMLAutoClose;
+ // d.Entity = HTMLEntity
+ //
+ // creates a parser that can handle typical HTML.
+ //
+ // Strict mode does not enforce the requirements of the XML name spaces TR.
+ // In particular it does not reject name space tags using undefined prefixes.
+ // Such tags are recorded with the unknown prefix as the name space URL.
+ Strict bool
+
+ // When Strict == false, AutoClose indicates a set of elements to
+ // consider closed immediately after they are opened, regardless
+ // of whether an end element is present.
+ AutoClose []string
+
+ // Entity can be used to map non-standard entity names to string replacements.
+ // The parser behaves as if these standard mappings are present in the map,
+ // regardless of the actual map content:
+ //
+ // "lt": "<",
+ // "gt": ">",
+ // "amp": "&",
+ // "apos": "'",
+ // "quot": `"`,
+ Entity map[string]string
+
+ // CharsetReader, if non-nil, defines a function to generate
+ // charset-conversion readers, converting from the provided
+ // non-UTF-8 charset into UTF-8. If CharsetReader is nil or
+ // returns an error, parsing stops with an error. One of the
+ // the CharsetReader's result values must be non-nil.
+ CharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+ // DefaultSpace sets the default name space used for unadorned tags,
+ // as if the entire XML stream were wrapped in an element containing
+ // the attribute xmlns="DefaultSpace".
+ DefaultSpace string
+
+ r io.ByteReader
+ buf bytes.Buffer
+ saved *bytes.Buffer
+ stk *stack
+ free *stack
+ needClose bool
+ toClose Name
+ nextToken Token
+ nextByte int
+ ns map[string]string
+ err error
+ line int
+ offset int64
+ unmarshalDepth int
+}
+
+// NewDecoder creates a new XML parser reading from r.
+// If r does not implement io.ByteReader, NewDecoder will
+// do its own buffering.
+func NewDecoder(r io.Reader) *Decoder {
+ d := &Decoder{
+ ns: make(map[string]string),
+ nextByte: -1,
+ line: 1,
+ Strict: true,
+ }
+ d.switchToReader(r)
+ return d
+}
+
+// Token returns the next XML token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Slices of bytes in the returned token data refer to the
+// parser's internal buffer and remain valid only until the next
+// call to Token. To acquire a copy of the bytes, call CopyToken
+// or the token's Copy method.
+//
+// Token expands self-closing elements such as <br/>
+// into separate start and end elements returned by successive calls.
+//
+// Token guarantees that the StartElement and EndElement
+// tokens it returns are properly nested and matched:
+// if Token encounters an unexpected end element,
+// it will return an error.
+//
+// Token implements XML name spaces as described by
+// http://www.w3.org/TR/REC-xml-names/. Each of the
+// Name structures contained in the Token has the Space
+// set to the URL identifying its name space when known.
+// If Token encounters an unrecognized name space prefix,
+// it uses the prefix as the Space rather than report an error.
+func (d *Decoder) Token() (t Token, err error) {
+ if d.stk != nil && d.stk.kind == stkEOF {
+ err = io.EOF
+ return
+ }
+ if d.nextToken != nil {
+ t = d.nextToken
+ d.nextToken = nil
+ } else if t, err = d.rawToken(); err != nil {
+ return
+ }
+
+ if !d.Strict {
+ if t1, ok := d.autoClose(t); ok {
+ d.nextToken = t
+ t = t1
+ }
+ }
+ switch t1 := t.(type) {
+ case StartElement:
+ // In XML name spaces, the translations listed in the
+ // attributes apply to the element name and
+ // to the other attribute names, so process
+ // the translations first.
+ for _, a := range t1.Attr {
+ if a.Name.Space == "xmlns" {
+ v, ok := d.ns[a.Name.Local]
+ d.pushNs(a.Name.Local, v, ok)
+ d.ns[a.Name.Local] = a.Value
+ }
+ if a.Name.Space == "" && a.Name.Local == "xmlns" {
+ // Default space for untagged names
+ v, ok := d.ns[""]
+ d.pushNs("", v, ok)
+ d.ns[""] = a.Value
+ }
+ }
+
+ d.translate(&t1.Name, true)
+ for i := range t1.Attr {
+ d.translate(&t1.Attr[i].Name, false)
+ }
+ d.pushElement(t1.Name)
+ t = t1
+
+ case EndElement:
+ d.translate(&t1.Name, true)
+ if !d.popElement(&t1) {
+ return nil, d.err
+ }
+ t = t1
+ }
+ return
+}
+
+const xmlURL = "http://www.w3.org/XML/1998/namespace"
+
+// Apply name space translation to name n.
+// The default name space (for Space=="")
+// applies only to element names, not to attribute names.
+func (d *Decoder) translate(n *Name, isElementName bool) {
+ switch {
+ case n.Space == "xmlns":
+ return
+ case n.Space == "" && !isElementName:
+ return
+ case n.Space == "xml":
+ n.Space = xmlURL
+ case n.Space == "" && n.Local == "xmlns":
+ return
+ }
+ if v, ok := d.ns[n.Space]; ok {
+ n.Space = v
+ } else if n.Space == "" {
+ n.Space = d.DefaultSpace
+ }
+}
+
+func (d *Decoder) switchToReader(r io.Reader) {
+ // Get efficient byte at a time reader.
+ // Assume that if reader has its own
+ // ReadByte, it's efficient enough.
+ // Otherwise, use bufio.
+ if rb, ok := r.(io.ByteReader); ok {
+ d.r = rb
+ } else {
+ d.r = bufio.NewReader(r)
+ }
+}
+
+// Parsing state - stack holds old name space translations
+// and the current set of open elements. The translations to pop when
+// ending a given tag are *below* it on the stack, which is
+// more work but forced on us by XML.
+type stack struct {
+ next *stack
+ kind int
+ name Name
+ ok bool
+}
+
+const (
+ stkStart = iota
+ stkNs
+ stkEOF
+)
+
+func (d *Decoder) push(kind int) *stack {
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.next = d.stk
+ s.kind = kind
+ d.stk = s
+ return s
+}
+
+func (d *Decoder) pop() *stack {
+ s := d.stk
+ if s != nil {
+ d.stk = s.next
+ s.next = d.free
+ d.free = s
+ }
+ return s
+}
+
+// Record that after the current element is finished
+// (that element is already pushed on the stack)
+// Token should return EOF until popEOF is called.
+func (d *Decoder) pushEOF() {
+ // Walk down stack to find Start.
+ // It might not be the top, because there might be stkNs
+ // entries above it.
+ start := d.stk
+ for start.kind != stkStart {
+ start = start.next
+ }
+ // The stkNs entries below a start are associated with that
+ // element too; skip over them.
+ for start.next != nil && start.next.kind == stkNs {
+ start = start.next
+ }
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.kind = stkEOF
+ s.next = start.next
+ start.next = s
+}
+
+// Undo a pushEOF.
+// The element must have been finished, so the EOF should be at the top of the stack.
+func (d *Decoder) popEOF() bool {
+ if d.stk == nil || d.stk.kind != stkEOF {
+ return false
+ }
+ d.pop()
+ return true
+}
+
+// Record that we are starting an element with the given name.
+func (d *Decoder) pushElement(name Name) {
+ s := d.push(stkStart)
+ s.name = name
+}
+
+// Record that we are changing the value of ns[local].
+// The old value is url, ok.
+func (d *Decoder) pushNs(local string, url string, ok bool) {
+ s := d.push(stkNs)
+ s.name.Local = local
+ s.name.Space = url
+ s.ok = ok
+}
+
+// Creates a SyntaxError with the current line number.
+func (d *Decoder) syntaxError(msg string) error {
+ return &SyntaxError{Msg: msg, Line: d.line}
+}
+
+// Record that we are ending an element with the given name.
+// The name must match the record at the top of the stack,
+// which must be a pushElement record.
+// After popping the element, apply any undo records from
+// the stack to restore the name translations that existed
+// before we saw this element.
+func (d *Decoder) popElement(t *EndElement) bool {
+ s := d.pop()
+ name := t.Name
+ switch {
+ case s == nil || s.kind != stkStart:
+ d.err = d.syntaxError("unexpected end element </" + name.Local + ">")
+ return false
+ case s.name.Local != name.Local:
+ if !d.Strict {
+ d.needClose = true
+ d.toClose = t.Name
+ t.Name = s.name
+ return true
+ }
+ d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
+ return false
+ case s.name.Space != name.Space:
+ d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
+ "closed by </" + name.Local + "> in space " + name.Space)
+ return false
+ }
+
+ // Pop stack until a Start or EOF is on the top, undoing the
+ // translations that were associated with the element we just closed.
+ for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {
+ s := d.pop()
+ if s.ok {
+ d.ns[s.name.Local] = s.name.Space
+ } else {
+ delete(d.ns, s.name.Local)
+ }
+ }
+
+ return true
+}
+
+// If the top element on the stack is autoclosing and
+// t is not the end tag, invent the end tag.
+func (d *Decoder) autoClose(t Token) (Token, bool) {
+ if d.stk == nil || d.stk.kind != stkStart {
+ return nil, false
+ }
+ name := strings.ToLower(d.stk.name.Local)
+ for _, s := range d.AutoClose {
+ if strings.ToLower(s) == name {
+ // This one should be auto closed if t doesn't close it.
+ et, ok := t.(EndElement)
+ if !ok || et.Name.Local != name {
+ return EndElement{d.stk.name}, true
+ }
+ break
+ }
+ }
+ return nil, false
+}
+
+var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method")
+
+// RawToken is like Token but does not verify that
+// start and end elements match and does not translate
+// name space prefixes to their corresponding URLs.
+func (d *Decoder) RawToken() (Token, error) {
+ if d.unmarshalDepth > 0 {
+ return nil, errRawToken
+ }
+ return d.rawToken()
+}
+
+func (d *Decoder) rawToken() (Token, error) {
+ if d.err != nil {
+ return nil, d.err
+ }
+ if d.needClose {
+ // The last element we read was self-closing and
+ // we returned just the StartElement half.
+ // Return the EndElement half now.
+ d.needClose = false
+ return EndElement{d.toClose}, nil
+ }
+
+ b, ok := d.getc()
+ if !ok {
+ return nil, d.err
+ }
+
+ if b != '<' {
+ // Text section.
+ d.ungetc(b)
+ data := d.text(-1, false)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '/':
+ // </: End element
+ var name Name
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after </")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("invalid characters between </" + name.Local + " and >")
+ return nil, d.err
+ }
+ return EndElement{name}, nil
+
+ case '?':
+ // <?: Processing instruction.
+ var target string
+ if target, ok = d.name(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected target name after <?")
+ }
+ return nil, d.err
+ }
+ d.space()
+ d.buf.Reset()
+ var b0 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '?' && b == '>' {
+ break
+ }
+ b0 = b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-2] // chop ?>
+
+ if target == "xml" {
+ content := string(data)
+ ver := procInst("version", content)
+ if ver != "" && ver != "1.0" {
+ d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver)
+ return nil, d.err
+ }
+ enc := procInst("encoding", content)
+ if enc != "" && enc != "utf-8" && enc != "UTF-8" {
+ if d.CharsetReader == nil {
+ d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc)
+ return nil, d.err
+ }
+ newr, err := d.CharsetReader(enc, d.r.(io.Reader))
+ if err != nil {
+ d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err)
+ return nil, d.err
+ }
+ if newr == nil {
+ panic("CharsetReader returned a nil Reader for charset " + enc)
+ }
+ d.switchToReader(newr)
+ }
+ }
+ return ProcInst{target, data}, nil
+
+ case '!':
+ // <!: Maybe comment, maybe CDATA.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '-': // <!-
+ // Probably <!-- for a comment.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '-' {
+ d.err = d.syntaxError("invalid sequence <!- not part of <!--")
+ return nil, d.err
+ }
+ // Look for terminator.
+ d.buf.Reset()
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-3] // chop -->
+ return Comment(data), nil
+
+ case '[': // <![
+ // Probably <![CDATA[.
+ for i := 0; i < 6; i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != "CDATA["[i] {
+ d.err = d.syntaxError("invalid <![ sequence")
+ return nil, d.err
+ }
+ }
+ // Have <![CDATA[. Read text until ]]>.
+ data := d.text(-1, true)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
+ // We don't care, but accumulate for caller. Quoted angle
+ // brackets do not count for nesting.
+ d.buf.Reset()
+ d.buf.WriteByte(b)
+ inquote := uint8(0)
+ depth := 0
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if inquote == 0 && b == '>' && depth == 0 {
+ break
+ }
+ HandleB:
+ d.buf.WriteByte(b)
+ switch {
+ case b == inquote:
+ inquote = 0
+
+ case inquote != 0:
+ // in quotes, no special action
+
+ case b == '\'' || b == '"':
+ inquote = b
+
+ case b == '>' && inquote == 0:
+ depth--
+
+ case b == '<' && inquote == 0:
+ // Look for <!-- to begin comment.
+ s := "!--"
+ for i := 0; i < len(s); i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != s[i] {
+ for j := 0; j < i; j++ {
+ d.buf.WriteByte(s[j])
+ }
+ depth++
+ goto HandleB
+ }
+ }
+
+ // Remove < that was written above.
+ d.buf.Truncate(d.buf.Len() - 1)
+
+ // Look for terminator.
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ }
+ }
+ return Directive(d.buf.Bytes()), nil
+ }
+
+ // Must be an open element like <a href="foo">
+ d.ungetc(b)
+
+ var (
+ name Name
+ empty bool
+ attr []Attr
+ )
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after <")
+ }
+ return nil, d.err
+ }
+
+ attr = []Attr{}
+ for {
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b == '/' {
+ empty = true
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("expected /> in element")
+ return nil, d.err
+ }
+ break
+ }
+ if b == '>' {
+ break
+ }
+ d.ungetc(b)
+
+ n := len(attr)
+ if n >= cap(attr) {
+ nCap := 2 * cap(attr)
+ if nCap == 0 {
+ nCap = 4
+ }
+ nattr := make([]Attr, n, nCap)
+ copy(nattr, attr)
+ attr = nattr
+ }
+ attr = attr[0 : n+1]
+ a := &attr[n]
+ if a.Name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected attribute name in element")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '=' {
+ if d.Strict {
+ d.err = d.syntaxError("attribute name without = in element")
+ return nil, d.err
+ } else {
+ d.ungetc(b)
+ a.Value = a.Name.Local
+ }
+ } else {
+ d.space()
+ data := d.attrval()
+ if data == nil {
+ return nil, d.err
+ }
+ a.Value = string(data)
+ }
+ }
+ if empty {
+ d.needClose = true
+ d.toClose = name
+ }
+ return StartElement{name, attr}, nil
+}
+
+func (d *Decoder) attrval() []byte {
+ b, ok := d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // Handle quoted attribute values
+ if b == '"' || b == '\'' {
+ return d.text(int(b), false)
+ }
+ // Handle unquoted attribute values for strict parsers
+ if d.Strict {
+ d.err = d.syntaxError("unquoted or missing attribute value in element")
+ return nil
+ }
+ // Handle unquoted attribute values for unstrict parsers
+ d.ungetc(b)
+ d.buf.Reset()
+ for {
+ b, ok = d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
+ if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
+ d.buf.WriteByte(b)
+ } else {
+ d.ungetc(b)
+ break
+ }
+ }
+ return d.buf.Bytes()
+}
+
+// Skip spaces if any
+func (d *Decoder) space() {
+ for {
+ b, ok := d.getc()
+ if !ok {
+ return
+ }
+ switch b {
+ case ' ', '\r', '\n', '\t':
+ default:
+ d.ungetc(b)
+ return
+ }
+ }
+}
+
+// Read a single byte.
+// If there is no byte to read, return ok==false
+// and leave the error in d.err.
+// Maintain line number.
+func (d *Decoder) getc() (b byte, ok bool) {
+ if d.err != nil {
+ return 0, false
+ }
+ if d.nextByte >= 0 {
+ b = byte(d.nextByte)
+ d.nextByte = -1
+ } else {
+ b, d.err = d.r.ReadByte()
+ if d.err != nil {
+ return 0, false
+ }
+ if d.saved != nil {
+ d.saved.WriteByte(b)
+ }
+ }
+ if b == '\n' {
+ d.line++
+ }
+ d.offset++
+ return b, true
+}
+
+// InputOffset returns the input stream byte offset of the current decoder position.
+// The offset gives the location of the end of the most recently returned token
+// and the beginning of the next token.
+func (d *Decoder) InputOffset() int64 {
+ return d.offset
+}
+
+// Return saved offset.
+// If we did ungetc (nextByte >= 0), have to back up one.
+func (d *Decoder) savedOffset() int {
+ n := d.saved.Len()
+ if d.nextByte >= 0 {
+ n--
+ }
+ return n
+}
+
+// Must read a single byte.
+// If there is no byte to read,
+// set d.err to SyntaxError("unexpected EOF")
+// and return ok==false
+func (d *Decoder) mustgetc() (b byte, ok bool) {
+ if b, ok = d.getc(); !ok {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF")
+ }
+ }
+ return
+}
+
+// Unread a single byte.
+func (d *Decoder) ungetc(b byte) {
+ if b == '\n' {
+ d.line--
+ }
+ d.nextByte = int(b)
+ d.offset--
+}
+
+var entity = map[string]int{
+ "lt": '<',
+ "gt": '>',
+ "amp": '&',
+ "apos": '\'',
+ "quot": '"',
+}
+
+// Read plain text section (XML calls it character data).
+// If quote >= 0, we are in a quoted string and need to find the matching quote.
+// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
+// On failure return nil and leave the error in d.err.
+func (d *Decoder) text(quote int, cdata bool) []byte {
+ var b0, b1 byte
+ var trunc int
+ d.buf.Reset()
+Input:
+ for {
+ b, ok := d.getc()
+ if !ok {
+ if cdata {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF in CDATA section")
+ }
+ return nil
+ }
+ break Input
+ }
+
+ // <![CDATA[ section ends with ]]>.
+ // It is an error for ]]> to appear in ordinary text.
+ if b0 == ']' && b1 == ']' && b == '>' {
+ if cdata {
+ trunc = 2
+ break Input
+ }
+ d.err = d.syntaxError("unescaped ]]> not in CDATA section")
+ return nil
+ }
+
+ // Stop reading text if we see a <.
+ if b == '<' && !cdata {
+ if quote >= 0 {
+ d.err = d.syntaxError("unescaped < inside quoted string")
+ return nil
+ }
+ d.ungetc('<')
+ break Input
+ }
+ if quote >= 0 && b == byte(quote) {
+ break Input
+ }
+ if b == '&' && !cdata {
+ // Read escaped character expression up to semicolon.
+ // XML in all its glory allows a document to define and use
+ // its own character names with <!ENTITY ...> directives.
+ // Parsers are required to recognize lt, gt, amp, apos, and quot
+ // even if they have not been declared.
+ before := d.buf.Len()
+ d.buf.WriteByte('&')
+ var ok bool
+ var text string
+ var haveText bool
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b == '#' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ base := 10
+ if b == 'x' {
+ base = 16
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ start := d.buf.Len()
+ for '0' <= b && b <= '9' ||
+ base == 16 && 'a' <= b && b <= 'f' ||
+ base == 16 && 'A' <= b && b <= 'F' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ s := string(d.buf.Bytes()[start:])
+ d.buf.WriteByte(';')
+ n, err := strconv.ParseUint(s, base, 64)
+ if err == nil && n <= unicode.MaxRune {
+ text = string(n)
+ haveText = true
+ }
+ }
+ } else {
+ d.ungetc(b)
+ if !d.readName() {
+ if d.err != nil {
+ return nil
+ }
+ ok = false
+ }
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ name := d.buf.Bytes()[before+1:]
+ d.buf.WriteByte(';')
+ if isName(name) {
+ s := string(name)
+ if r, ok := entity[s]; ok {
+ text = string(r)
+ haveText = true
+ } else if d.Entity != nil {
+ text, haveText = d.Entity[s]
+ }
+ }
+ }
+ }
+
+ if haveText {
+ d.buf.Truncate(before)
+ d.buf.Write([]byte(text))
+ b0, b1 = 0, 0
+ continue Input
+ }
+ if !d.Strict {
+ b0, b1 = 0, 0
+ continue Input
+ }
+ ent := string(d.buf.Bytes()[before:])
+ if ent[len(ent)-1] != ';' {
+ ent += " (no semicolon)"
+ }
+ d.err = d.syntaxError("invalid character entity " + ent)
+ return nil
+ }
+
+ // We must rewrite unescaped \r and \r\n into \n.
+ if b == '\r' {
+ d.buf.WriteByte('\n')
+ } else if b1 == '\r' && b == '\n' {
+ // Skip \r\n--we already wrote \n.
+ } else {
+ d.buf.WriteByte(b)
+ }
+
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-trunc]
+
+ // Inspect each rune for being a disallowed character.
+ buf := data
+ for len(buf) > 0 {
+ r, size := utf8.DecodeRune(buf)
+ if r == utf8.RuneError && size == 1 {
+ d.err = d.syntaxError("invalid UTF-8")
+ return nil
+ }
+ buf = buf[size:]
+ if !isInCharacterRange(r) {
+ d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r))
+ return nil
+ }
+ }
+
+ return data
+}
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of http://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xDF77 ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// Get name space name: name with a : stuck in the middle.
+// The part before the : is the name space identifier.
+func (d *Decoder) nsname() (name Name, ok bool) {
+ s, ok := d.name()
+ if !ok {
+ return
+ }
+ i := strings.Index(s, ":")
+ if i < 0 {
+ name.Local = s
+ } else {
+ name.Space = s[0:i]
+ name.Local = s[i+1:]
+ }
+ return name, true
+}
+
+// Get name: /first(first|second)*/
+// Do not set d.err if the name is missing (unless unexpected EOF is received):
+// let the caller provide better context.
+func (d *Decoder) name() (s string, ok bool) {
+ d.buf.Reset()
+ if !d.readName() {
+ return "", false
+ }
+
+ // Now we check the characters.
+ b := d.buf.Bytes()
+ if !isName(b) {
+ d.err = d.syntaxError("invalid XML name: " + string(b))
+ return "", false
+ }
+ return string(b), true
+}
+
+// Read a name and append its bytes to d.buf.
+// The name is delimited by any single-byte character not valid in names.
+// All multi-byte characters are accepted; the caller must check their validity.
+func (d *Decoder) readName() (ok bool) {
+ var b byte
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ return false
+ }
+ d.buf.WriteByte(b)
+
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ break
+ }
+ d.buf.WriteByte(b)
+ }
+ return true
+}
+
+func isNameByte(c byte) bool {
+ return 'A' <= c && c <= 'Z' ||
+ 'a' <= c && c <= 'z' ||
+ '0' <= c && c <= '9' ||
+ c == '_' || c == ':' || c == '.' || c == '-'
+}
+
+func isName(s []byte) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNameString(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// These tables were generated by cut and paste from Appendix B of
+// the XML spec at http://www.xml.com/axml/testaxml.htm
+// and then reformatting. First corresponds to (Letter | '_' | ':')
+// and second corresponds to NameChar.
+
+var first = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1},
+ {0x0041, 0x005A, 1},
+ {0x005F, 0x005F, 1},
+ {0x0061, 0x007A, 1},
+ {0x00C0, 0x00D6, 1},
+ {0x00D8, 0x00F6, 1},
+ {0x00F8, 0x00FF, 1},
+ {0x0100, 0x0131, 1},
+ {0x0134, 0x013E, 1},
+ {0x0141, 0x0148, 1},
+ {0x014A, 0x017E, 1},
+ {0x0180, 0x01C3, 1},
+ {0x01CD, 0x01F0, 1},
+ {0x01F4, 0x01F5, 1},
+ {0x01FA, 0x0217, 1},
+ {0x0250, 0x02A8, 1},
+ {0x02BB, 0x02C1, 1},
+ {0x0386, 0x0386, 1},
+ {0x0388, 0x038A, 1},
+ {0x038C, 0x038C, 1},
+ {0x038E, 0x03A1, 1},
+ {0x03A3, 0x03CE, 1},
+ {0x03D0, 0x03D6, 1},
+ {0x03DA, 0x03E0, 2},
+ {0x03E2, 0x03F3, 1},
+ {0x0401, 0x040C, 1},
+ {0x040E, 0x044F, 1},
+ {0x0451, 0x045C, 1},
+ {0x045E, 0x0481, 1},
+ {0x0490, 0x04C4, 1},
+ {0x04C7, 0x04C8, 1},
+ {0x04CB, 0x04CC, 1},
+ {0x04D0, 0x04EB, 1},
+ {0x04EE, 0x04F5, 1},
+ {0x04F8, 0x04F9, 1},
+ {0x0531, 0x0556, 1},
+ {0x0559, 0x0559, 1},
+ {0x0561, 0x0586, 1},
+ {0x05D0, 0x05EA, 1},
+ {0x05F0, 0x05F2, 1},
+ {0x0621, 0x063A, 1},
+ {0x0641, 0x064A, 1},
+ {0x0671, 0x06B7, 1},
+ {0x06BA, 0x06BE, 1},
+ {0x06C0, 0x06CE, 1},
+ {0x06D0, 0x06D3, 1},
+ {0x06D5, 0x06D5, 1},
+ {0x06E5, 0x06E6, 1},
+ {0x0905, 0x0939, 1},
+ {0x093D, 0x093D, 1},
+ {0x0958, 0x0961, 1},
+ {0x0985, 0x098C, 1},
+ {0x098F, 0x0990, 1},
+ {0x0993, 0x09A8, 1},
+ {0x09AA, 0x09B0, 1},
+ {0x09B2, 0x09B2, 1},
+ {0x09B6, 0x09B9, 1},
+ {0x09DC, 0x09DD, 1},
+ {0x09DF, 0x09E1, 1},
+ {0x09F0, 0x09F1, 1},
+ {0x0A05, 0x0A0A, 1},
+ {0x0A0F, 0x0A10, 1},
+ {0x0A13, 0x0A28, 1},
+ {0x0A2A, 0x0A30, 1},
+ {0x0A32, 0x0A33, 1},
+ {0x0A35, 0x0A36, 1},
+ {0x0A38, 0x0A39, 1},
+ {0x0A59, 0x0A5C, 1},
+ {0x0A5E, 0x0A5E, 1},
+ {0x0A72, 0x0A74, 1},
+ {0x0A85, 0x0A8B, 1},
+ {0x0A8D, 0x0A8D, 1},
+ {0x0A8F, 0x0A91, 1},
+ {0x0A93, 0x0AA8, 1},
+ {0x0AAA, 0x0AB0, 1},
+ {0x0AB2, 0x0AB3, 1},
+ {0x0AB5, 0x0AB9, 1},
+ {0x0ABD, 0x0AE0, 0x23},
+ {0x0B05, 0x0B0C, 1},
+ {0x0B0F, 0x0B10, 1},
+ {0x0B13, 0x0B28, 1},
+ {0x0B2A, 0x0B30, 1},
+ {0x0B32, 0x0B33, 1},
+ {0x0B36, 0x0B39, 1},
+ {0x0B3D, 0x0B3D, 1},
+ {0x0B5C, 0x0B5D, 1},
+ {0x0B5F, 0x0B61, 1},
+ {0x0B85, 0x0B8A, 1},
+ {0x0B8E, 0x0B90, 1},
+ {0x0B92, 0x0B95, 1},
+ {0x0B99, 0x0B9A, 1},
+ {0x0B9C, 0x0B9C, 1},
+ {0x0B9E, 0x0B9F, 1},
+ {0x0BA3, 0x0BA4, 1},
+ {0x0BA8, 0x0BAA, 1},
+ {0x0BAE, 0x0BB5, 1},
+ {0x0BB7, 0x0BB9, 1},
+ {0x0C05, 0x0C0C, 1},
+ {0x0C0E, 0x0C10, 1},
+ {0x0C12, 0x0C28, 1},
+ {0x0C2A, 0x0C33, 1},
+ {0x0C35, 0x0C39, 1},
+ {0x0C60, 0x0C61, 1},
+ {0x0C85, 0x0C8C, 1},
+ {0x0C8E, 0x0C90, 1},
+ {0x0C92, 0x0CA8, 1},
+ {0x0CAA, 0x0CB3, 1},
+ {0x0CB5, 0x0CB9, 1},
+ {0x0CDE, 0x0CDE, 1},
+ {0x0CE0, 0x0CE1, 1},
+ {0x0D05, 0x0D0C, 1},
+ {0x0D0E, 0x0D10, 1},
+ {0x0D12, 0x0D28, 1},
+ {0x0D2A, 0x0D39, 1},
+ {0x0D60, 0x0D61, 1},
+ {0x0E01, 0x0E2E, 1},
+ {0x0E30, 0x0E30, 1},
+ {0x0E32, 0x0E33, 1},
+ {0x0E40, 0x0E45, 1},
+ {0x0E81, 0x0E82, 1},
+ {0x0E84, 0x0E84, 1},
+ {0x0E87, 0x0E88, 1},
+ {0x0E8A, 0x0E8D, 3},
+ {0x0E94, 0x0E97, 1},
+ {0x0E99, 0x0E9F, 1},
+ {0x0EA1, 0x0EA3, 1},
+ {0x0EA5, 0x0EA7, 2},
+ {0x0EAA, 0x0EAB, 1},
+ {0x0EAD, 0x0EAE, 1},
+ {0x0EB0, 0x0EB0, 1},
+ {0x0EB2, 0x0EB3, 1},
+ {0x0EBD, 0x0EBD, 1},
+ {0x0EC0, 0x0EC4, 1},
+ {0x0F40, 0x0F47, 1},
+ {0x0F49, 0x0F69, 1},
+ {0x10A0, 0x10C5, 1},
+ {0x10D0, 0x10F6, 1},
+ {0x1100, 0x1100, 1},
+ {0x1102, 0x1103, 1},
+ {0x1105, 0x1107, 1},
+ {0x1109, 0x1109, 1},
+ {0x110B, 0x110C, 1},
+ {0x110E, 0x1112, 1},
+ {0x113C, 0x1140, 2},
+ {0x114C, 0x1150, 2},
+ {0x1154, 0x1155, 1},
+ {0x1159, 0x1159, 1},
+ {0x115F, 0x1161, 1},
+ {0x1163, 0x1169, 2},
+ {0x116D, 0x116E, 1},
+ {0x1172, 0x1173, 1},
+ {0x1175, 0x119E, 0x119E - 0x1175},
+ {0x11A8, 0x11AB, 0x11AB - 0x11A8},
+ {0x11AE, 0x11AF, 1},
+ {0x11B7, 0x11B8, 1},
+ {0x11BA, 0x11BA, 1},
+ {0x11BC, 0x11C2, 1},
+ {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
+ {0x11F9, 0x11F9, 1},
+ {0x1E00, 0x1E9B, 1},
+ {0x1EA0, 0x1EF9, 1},
+ {0x1F00, 0x1F15, 1},
+ {0x1F18, 0x1F1D, 1},
+ {0x1F20, 0x1F45, 1},
+ {0x1F48, 0x1F4D, 1},
+ {0x1F50, 0x1F57, 1},
+ {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
+ {0x1F5D, 0x1F5D, 1},
+ {0x1F5F, 0x1F7D, 1},
+ {0x1F80, 0x1FB4, 1},
+ {0x1FB6, 0x1FBC, 1},
+ {0x1FBE, 0x1FBE, 1},
+ {0x1FC2, 0x1FC4, 1},
+ {0x1FC6, 0x1FCC, 1},
+ {0x1FD0, 0x1FD3, 1},
+ {0x1FD6, 0x1FDB, 1},
+ {0x1FE0, 0x1FEC, 1},
+ {0x1FF2, 0x1FF4, 1},
+ {0x1FF6, 0x1FFC, 1},
+ {0x2126, 0x2126, 1},
+ {0x212A, 0x212B, 1},
+ {0x212E, 0x212E, 1},
+ {0x2180, 0x2182, 1},
+ {0x3007, 0x3007, 1},
+ {0x3021, 0x3029, 1},
+ {0x3041, 0x3094, 1},
+ {0x30A1, 0x30FA, 1},
+ {0x3105, 0x312C, 1},
+ {0x4E00, 0x9FA5, 1},
+ {0xAC00, 0xD7A3, 1},
+ },
+}
+
+var second = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x002D, 0x002E, 1},
+ {0x0030, 0x0039, 1},
+ {0x00B7, 0x00B7, 1},
+ {0x02D0, 0x02D1, 1},
+ {0x0300, 0x0345, 1},
+ {0x0360, 0x0361, 1},
+ {0x0387, 0x0387, 1},
+ {0x0483, 0x0486, 1},
+ {0x0591, 0x05A1, 1},
+ {0x05A3, 0x05B9, 1},
+ {0x05BB, 0x05BD, 1},
+ {0x05BF, 0x05BF, 1},
+ {0x05C1, 0x05C2, 1},
+ {0x05C4, 0x0640, 0x0640 - 0x05C4},
+ {0x064B, 0x0652, 1},
+ {0x0660, 0x0669, 1},
+ {0x0670, 0x0670, 1},
+ {0x06D6, 0x06DC, 1},
+ {0x06DD, 0x06DF, 1},
+ {0x06E0, 0x06E4, 1},
+ {0x06E7, 0x06E8, 1},
+ {0x06EA, 0x06ED, 1},
+ {0x06F0, 0x06F9, 1},
+ {0x0901, 0x0903, 1},
+ {0x093C, 0x093C, 1},
+ {0x093E, 0x094C, 1},
+ {0x094D, 0x094D, 1},
+ {0x0951, 0x0954, 1},
+ {0x0962, 0x0963, 1},
+ {0x0966, 0x096F, 1},
+ {0x0981, 0x0983, 1},
+ {0x09BC, 0x09BC, 1},
+ {0x09BE, 0x09BF, 1},
+ {0x09C0, 0x09C4, 1},
+ {0x09C7, 0x09C8, 1},
+ {0x09CB, 0x09CD, 1},
+ {0x09D7, 0x09D7, 1},
+ {0x09E2, 0x09E3, 1},
+ {0x09E6, 0x09EF, 1},
+ {0x0A02, 0x0A3C, 0x3A},
+ {0x0A3E, 0x0A3F, 1},
+ {0x0A40, 0x0A42, 1},
+ {0x0A47, 0x0A48, 1},
+ {0x0A4B, 0x0A4D, 1},
+ {0x0A66, 0x0A6F, 1},
+ {0x0A70, 0x0A71, 1},
+ {0x0A81, 0x0A83, 1},
+ {0x0ABC, 0x0ABC, 1},
+ {0x0ABE, 0x0AC5, 1},
+ {0x0AC7, 0x0AC9, 1},
+ {0x0ACB, 0x0ACD, 1},
+ {0x0AE6, 0x0AEF, 1},
+ {0x0B01, 0x0B03, 1},
+ {0x0B3C, 0x0B3C, 1},
+ {0x0B3E, 0x0B43, 1},
+ {0x0B47, 0x0B48, 1},
+ {0x0B4B, 0x0B4D, 1},
+ {0x0B56, 0x0B57, 1},
+ {0x0B66, 0x0B6F, 1},
+ {0x0B82, 0x0B83, 1},
+ {0x0BBE, 0x0BC2, 1},
+ {0x0BC6, 0x0BC8, 1},
+ {0x0BCA, 0x0BCD, 1},
+ {0x0BD7, 0x0BD7, 1},
+ {0x0BE7, 0x0BEF, 1},
+ {0x0C01, 0x0C03, 1},
+ {0x0C3E, 0x0C44, 1},
+ {0x0C46, 0x0C48, 1},
+ {0x0C4A, 0x0C4D, 1},
+ {0x0C55, 0x0C56, 1},
+ {0x0C66, 0x0C6F, 1},
+ {0x0C82, 0x0C83, 1},
+ {0x0CBE, 0x0CC4, 1},
+ {0x0CC6, 0x0CC8, 1},
+ {0x0CCA, 0x0CCD, 1},
+ {0x0CD5, 0x0CD6, 1},
+ {0x0CE6, 0x0CEF, 1},
+ {0x0D02, 0x0D03, 1},
+ {0x0D3E, 0x0D43, 1},
+ {0x0D46, 0x0D48, 1},
+ {0x0D4A, 0x0D4D, 1},
+ {0x0D57, 0x0D57, 1},
+ {0x0D66, 0x0D6F, 1},
+ {0x0E31, 0x0E31, 1},
+ {0x0E34, 0x0E3A, 1},
+ {0x0E46, 0x0E46, 1},
+ {0x0E47, 0x0E4E, 1},
+ {0x0E50, 0x0E59, 1},
+ {0x0EB1, 0x0EB1, 1},
+ {0x0EB4, 0x0EB9, 1},
+ {0x0EBB, 0x0EBC, 1},
+ {0x0EC6, 0x0EC6, 1},
+ {0x0EC8, 0x0ECD, 1},
+ {0x0ED0, 0x0ED9, 1},
+ {0x0F18, 0x0F19, 1},
+ {0x0F20, 0x0F29, 1},
+ {0x0F35, 0x0F39, 2},
+ {0x0F3E, 0x0F3F, 1},
+ {0x0F71, 0x0F84, 1},
+ {0x0F86, 0x0F8B, 1},
+ {0x0F90, 0x0F95, 1},
+ {0x0F97, 0x0F97, 1},
+ {0x0F99, 0x0FAD, 1},
+ {0x0FB1, 0x0FB7, 1},
+ {0x0FB9, 0x0FB9, 1},
+ {0x20D0, 0x20DC, 1},
+ {0x20E1, 0x3005, 0x3005 - 0x20E1},
+ {0x302A, 0x302F, 1},
+ {0x3031, 0x3035, 1},
+ {0x3099, 0x309A, 1},
+ {0x309D, 0x309E, 1},
+ {0x30FC, 0x30FE, 1},
+ },
+}
+
+// HTMLEntity is an entity map containing translations for the
+// standard HTML entity characters.
+var HTMLEntity = htmlEntity
+
+var htmlEntity = map[string]string{
+ /*
+ hget http://www.w3.org/TR/html4/sgml/entities.html |
+ ssam '
+ ,y /\&gt;/ x/\&lt;(.|\n)+/ s/\n/ /g
+ ,x v/^\&lt;!ENTITY/d
+ ,s/\&lt;!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
+ '
+ */
+ "nbsp": "\u00A0",
+ "iexcl": "\u00A1",
+ "cent": "\u00A2",
+ "pound": "\u00A3",
+ "curren": "\u00A4",
+ "yen": "\u00A5",
+ "brvbar": "\u00A6",
+ "sect": "\u00A7",
+ "uml": "\u00A8",
+ "copy": "\u00A9",
+ "ordf": "\u00AA",
+ "laquo": "\u00AB",
+ "not": "\u00AC",
+ "shy": "\u00AD",
+ "reg": "\u00AE",
+ "macr": "\u00AF",
+ "deg": "\u00B0",
+ "plusmn": "\u00B1",
+ "sup2": "\u00B2",
+ "sup3": "\u00B3",
+ "acute": "\u00B4",
+ "micro": "\u00B5",
+ "para": "\u00B6",
+ "middot": "\u00B7",
+ "cedil": "\u00B8",
+ "sup1": "\u00B9",
+ "ordm": "\u00BA",
+ "raquo": "\u00BB",
+ "frac14": "\u00BC",
+ "frac12": "\u00BD",
+ "frac34": "\u00BE",
+ "iquest": "\u00BF",
+ "Agrave": "\u00C0",
+ "Aacute": "\u00C1",
+ "Acirc": "\u00C2",
+ "Atilde": "\u00C3",
+ "Auml": "\u00C4",
+ "Aring": "\u00C5",
+ "AElig": "\u00C6",
+ "Ccedil": "\u00C7",
+ "Egrave": "\u00C8",
+ "Eacute": "\u00C9",
+ "Ecirc": "\u00CA",
+ "Euml": "\u00CB",
+ "Igrave": "\u00CC",
+ "Iacute": "\u00CD",
+ "Icirc": "\u00CE",
+ "Iuml": "\u00CF",
+ "ETH": "\u00D0",
+ "Ntilde": "\u00D1",
+ "Ograve": "\u00D2",
+ "Oacute": "\u00D3",
+ "Ocirc": "\u00D4",
+ "Otilde": "\u00D5",
+ "Ouml": "\u00D6",
+ "times": "\u00D7",
+ "Oslash": "\u00D8",
+ "Ugrave": "\u00D9",
+ "Uacute": "\u00DA",
+ "Ucirc": "\u00DB",
+ "Uuml": "\u00DC",
+ "Yacute": "\u00DD",
+ "THORN": "\u00DE",
+ "szlig": "\u00DF",
+ "agrave": "\u00E0",
+ "aacute": "\u00E1",
+ "acirc": "\u00E2",
+ "atilde": "\u00E3",
+ "auml": "\u00E4",
+ "aring": "\u00E5",
+ "aelig": "\u00E6",
+ "ccedil": "\u00E7",
+ "egrave": "\u00E8",
+ "eacute": "\u00E9",
+ "ecirc": "\u00EA",
+ "euml": "\u00EB",
+ "igrave": "\u00EC",
+ "iacute": "\u00ED",
+ "icirc": "\u00EE",
+ "iuml": "\u00EF",
+ "eth": "\u00F0",
+ "ntilde": "\u00F1",
+ "ograve": "\u00F2",
+ "oacute": "\u00F3",
+ "ocirc": "\u00F4",
+ "otilde": "\u00F5",
+ "ouml": "\u00F6",
+ "divide": "\u00F7",
+ "oslash": "\u00F8",
+ "ugrave": "\u00F9",
+ "uacute": "\u00FA",
+ "ucirc": "\u00FB",
+ "uuml": "\u00FC",
+ "yacute": "\u00FD",
+ "thorn": "\u00FE",
+ "yuml": "\u00FF",
+ "fnof": "\u0192",
+ "Alpha": "\u0391",
+ "Beta": "\u0392",
+ "Gamma": "\u0393",
+ "Delta": "\u0394",
+ "Epsilon": "\u0395",
+ "Zeta": "\u0396",
+ "Eta": "\u0397",
+ "Theta": "\u0398",
+ "Iota": "\u0399",
+ "Kappa": "\u039A",
+ "Lambda": "\u039B",
+ "Mu": "\u039C",
+ "Nu": "\u039D",
+ "Xi": "\u039E",
+ "Omicron": "\u039F",
+ "Pi": "\u03A0",
+ "Rho": "\u03A1",
+ "Sigma": "\u03A3",
+ "Tau": "\u03A4",
+ "Upsilon": "\u03A5",
+ "Phi": "\u03A6",
+ "Chi": "\u03A7",
+ "Psi": "\u03A8",
+ "Omega": "\u03A9",
+ "alpha": "\u03B1",
+ "beta": "\u03B2",
+ "gamma": "\u03B3",
+ "delta": "\u03B4",
+ "epsilon": "\u03B5",
+ "zeta": "\u03B6",
+ "eta": "\u03B7",
+ "theta": "\u03B8",
+ "iota": "\u03B9",
+ "kappa": "\u03BA",
+ "lambda": "\u03BB",
+ "mu": "\u03BC",
+ "nu": "\u03BD",
+ "xi": "\u03BE",
+ "omicron": "\u03BF",
+ "pi": "\u03C0",
+ "rho": "\u03C1",
+ "sigmaf": "\u03C2",
+ "sigma": "\u03C3",
+ "tau": "\u03C4",
+ "upsilon": "\u03C5",
+ "phi": "\u03C6",
+ "chi": "\u03C7",
+ "psi": "\u03C8",
+ "omega": "\u03C9",
+ "thetasym": "\u03D1",
+ "upsih": "\u03D2",
+ "piv": "\u03D6",
+ "bull": "\u2022",
+ "hellip": "\u2026",
+ "prime": "\u2032",
+ "Prime": "\u2033",
+ "oline": "\u203E",
+ "frasl": "\u2044",
+ "weierp": "\u2118",
+ "image": "\u2111",
+ "real": "\u211C",
+ "trade": "\u2122",
+ "alefsym": "\u2135",
+ "larr": "\u2190",
+ "uarr": "\u2191",
+ "rarr": "\u2192",
+ "darr": "\u2193",
+ "harr": "\u2194",
+ "crarr": "\u21B5",
+ "lArr": "\u21D0",
+ "uArr": "\u21D1",
+ "rArr": "\u21D2",
+ "dArr": "\u21D3",
+ "hArr": "\u21D4",
+ "forall": "\u2200",
+ "part": "\u2202",
+ "exist": "\u2203",
+ "empty": "\u2205",
+ "nabla": "\u2207",
+ "isin": "\u2208",
+ "notin": "\u2209",
+ "ni": "\u220B",
+ "prod": "\u220F",
+ "sum": "\u2211",
+ "minus": "\u2212",
+ "lowast": "\u2217",
+ "radic": "\u221A",
+ "prop": "\u221D",
+ "infin": "\u221E",
+ "ang": "\u2220",
+ "and": "\u2227",
+ "or": "\u2228",
+ "cap": "\u2229",
+ "cup": "\u222A",
+ "int": "\u222B",
+ "there4": "\u2234",
+ "sim": "\u223C",
+ "cong": "\u2245",
+ "asymp": "\u2248",
+ "ne": "\u2260",
+ "equiv": "\u2261",
+ "le": "\u2264",
+ "ge": "\u2265",
+ "sub": "\u2282",
+ "sup": "\u2283",
+ "nsub": "\u2284",
+ "sube": "\u2286",
+ "supe": "\u2287",
+ "oplus": "\u2295",
+ "otimes": "\u2297",
+ "perp": "\u22A5",
+ "sdot": "\u22C5",
+ "lceil": "\u2308",
+ "rceil": "\u2309",
+ "lfloor": "\u230A",
+ "rfloor": "\u230B",
+ "lang": "\u2329",
+ "rang": "\u232A",
+ "loz": "\u25CA",
+ "spades": "\u2660",
+ "clubs": "\u2663",
+ "hearts": "\u2665",
+ "diams": "\u2666",
+ "quot": "\u0022",
+ "amp": "\u0026",
+ "lt": "\u003C",
+ "gt": "\u003E",
+ "OElig": "\u0152",
+ "oelig": "\u0153",
+ "Scaron": "\u0160",
+ "scaron": "\u0161",
+ "Yuml": "\u0178",
+ "circ": "\u02C6",
+ "tilde": "\u02DC",
+ "ensp": "\u2002",
+ "emsp": "\u2003",
+ "thinsp": "\u2009",
+ "zwnj": "\u200C",
+ "zwj": "\u200D",
+ "lrm": "\u200E",
+ "rlm": "\u200F",
+ "ndash": "\u2013",
+ "mdash": "\u2014",
+ "lsquo": "\u2018",
+ "rsquo": "\u2019",
+ "sbquo": "\u201A",
+ "ldquo": "\u201C",
+ "rdquo": "\u201D",
+ "bdquo": "\u201E",
+ "dagger": "\u2020",
+ "Dagger": "\u2021",
+ "permil": "\u2030",
+ "lsaquo": "\u2039",
+ "rsaquo": "\u203A",
+ "euro": "\u20AC",
+}
+
+// HTMLAutoClose is the set of HTML elements that
+// should be considered to close automatically.
+var HTMLAutoClose = htmlAutoClose
+
+var htmlAutoClose = []string{
+ /*
+ hget http://www.w3.org/TR/html4/loose.dtd |
+ 9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/ "\1",/p' | tr A-Z a-z
+ */
+ "basefont",
+ "br",
+ "area",
+ "link",
+ "img",
+ "param",
+ "hr",
+ "input",
+ "col",
+ "frame",
+ "isindex",
+ "base",
+ "meta",
+}
+
+var (
+ esc_quot = []byte("&#34;") // shorter than "&quot;"
+ esc_apos = []byte("&#39;") // shorter than "&apos;"
+ esc_amp = []byte("&amp;")
+ esc_lt = []byte("&lt;")
+ esc_gt = []byte("&gt;")
+ esc_tab = []byte("&#x9;")
+ esc_nl = []byte("&#xA;")
+ esc_cr = []byte("&#xD;")
+ esc_fffd = []byte("\uFFFD") // Unicode replacement character
+)
+
+// EscapeText writes to w the properly escaped XML equivalent
+// of the plain text data s.
+func EscapeText(w io.Writer, s []byte) error {
+ return escapeText(w, s, true)
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+func escapeText(w io.Writer, s []byte, escapeNewline bool) error {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRune(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ if !escapeNewline {
+ continue
+ }
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ if _, err := w.Write(s[last : i-width]); err != nil {
+ return err
+ }
+ if _, err := w.Write(esc); err != nil {
+ return err
+ }
+ last = i
+ }
+ if _, err := w.Write(s[last:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+// EscapeString writes to p the properly escaped XML equivalent
+// of the plain text data s.
+func (p *printer) EscapeString(s string) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ p.WriteString(s[last : i-width])
+ p.Write(esc)
+ last = i
+ }
+ p.WriteString(s[last:])
+}
+
+// Escape is like EscapeText but omits the error return value.
+// It is provided for backwards compatibility with Go 1.0.
+// Code targeting Go 1.1 or later should use EscapeText.
+func Escape(w io.Writer, s []byte) {
+ EscapeText(w, s)
+}
+
+// procInst parses the `param="..."` or `param='...'`
+// value out of the provided string, returning "" if not found.
+func procInst(param, s string) string {
+ // TODO: this parsing is somewhat lame and not exact.
+ // It works for all actual cases, though.
+ param = param + "="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go b/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go
new file mode 100644
index 000000000..312a7c98a
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go
@@ -0,0 +1,752 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+const testInput = `
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
+ "\r\n\t" + ` >
+ <hello lang="en">World &lt;&gt;&apos;&quot; &#x767d;&#40300;ç¿”</hello>
+ <query>&何; &is-it;</query>
+ <goodbye />
+ <outer foo:attr="value" xmlns:tag="ns4">
+ <inner/>
+ </outer>
+ <tag:name>
+ <![CDATA[Some text here.]]>
+ </tag:name>
+</body><!-- missing final newline -->`
+
+var testEntity = map[string]string{"何": "What", "is-it": "is it?"}
+
+var rawTokens = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData("\n"),
+ Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ CharData("\n"),
+ StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData("\n "),
+ StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData("World <>'\" 白鵬翔"),
+ EndElement{Name{"", "hello"}},
+ CharData("\n "),
+ StartElement{Name{"", "query"}, []Attr{}},
+ CharData("What is it?"),
+ EndElement{Name{"", "query"}},
+ CharData("\n "),
+ StartElement{Name{"", "goodbye"}, []Attr{}},
+ EndElement{Name{"", "goodbye"}},
+ CharData("\n "),
+ StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData("\n "),
+ StartElement{Name{"", "inner"}, []Attr{}},
+ EndElement{Name{"", "inner"}},
+ CharData("\n "),
+ EndElement{Name{"", "outer"}},
+ CharData("\n "),
+ StartElement{Name{"tag", "name"}, []Attr{}},
+ CharData("\n "),
+ CharData("Some text here."),
+ CharData("\n "),
+ EndElement{Name{"tag", "name"}},
+ CharData("\n"),
+ EndElement{Name{"", "body"}},
+ Comment(" missing final newline "),
+}
+
+var cookedTokens = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData("\n"),
+ Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ CharData("\n"),
+ StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData("World <>'\" 白鵬翔"),
+ EndElement{Name{"ns2", "hello"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "query"}, []Attr{}},
+ CharData("What is it?"),
+ EndElement{Name{"ns2", "query"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "goodbye"}, []Attr{}},
+ EndElement{Name{"ns2", "goodbye"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "inner"}, []Attr{}},
+ EndElement{Name{"ns2", "inner"}},
+ CharData("\n "),
+ EndElement{Name{"ns2", "outer"}},
+ CharData("\n "),
+ StartElement{Name{"ns3", "name"}, []Attr{}},
+ CharData("\n "),
+ CharData("Some text here."),
+ CharData("\n "),
+ EndElement{Name{"ns3", "name"}},
+ CharData("\n"),
+ EndElement{Name{"ns2", "body"}},
+ Comment(" missing final newline "),
+}
+
+const testInputAltEncoding = `
+<?xml version="1.0" encoding="x-testing-uppercase"?>
+<TAG>VALUE</TAG>`
+
+var rawTokensAltEncoding = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("value"),
+ EndElement{Name{"", "tag"}},
+}
+
+var xmlInput = []string{
+ // unexpected EOF cases
+ "<",
+ "<t",
+ "<t ",
+ "<t/",
+ "<!",
+ "<!-",
+ "<!--",
+ "<!--c-",
+ "<!--c--",
+ "<!d",
+ "<t></",
+ "<t></t",
+ "<?",
+ "<?p",
+ "<t a",
+ "<t a=",
+ "<t a='",
+ "<t a=''",
+ "<t/><![",
+ "<t/><![C",
+ "<t/><![CDATA[d",
+ "<t/><![CDATA[d]",
+ "<t/><![CDATA[d]]",
+
+ // other Syntax errors
+ "<>",
+ "<t/a",
+ "<0 />",
+ "<?0 >",
+ // "<!0 >", // let the Token() caller handle
+ "</0>",
+ "<t 0=''>",
+ "<t a='&'>",
+ "<t a='<'>",
+ "<t>&nbspc;</t>",
+ "<t a>",
+ "<t a=>",
+ "<t a=v>",
+ // "<![CDATA[d]]>", // let the Token() caller handle
+ "<t></e>",
+ "<t></>",
+ "<t></t!",
+ "<t>cdata]]></t>",
+}
+
+func TestRawToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInput))
+ d.Entity = testEntity
+ testRawToken(t, d, testInput, rawTokens)
+}
+
+const nonStrictInput = `
+<tag>non&entity</tag>
+<tag>&unknown;entity</tag>
+<tag>&#123</tag>
+<tag>&#zzz;</tag>
+<tag>&ãªã¾ãˆ3;</tag>
+<tag>&lt-gt;</tag>
+<tag>&;</tag>
+<tag>&0a;</tag>
+`
+
+var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"}
+
+var nonStrictTokens = []Token{
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("non&entity"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&unknown;entity"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&#123"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&#zzz;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&ãªã¾ãˆ3;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&lt-gt;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&0a;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+}
+
+func TestNonStrictRawToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(nonStrictInput))
+ d.Strict = false
+ testRawToken(t, d, nonStrictInput, nonStrictTokens)
+}
+
+type downCaser struct {
+ t *testing.T
+ r io.ByteReader
+}
+
+func (d *downCaser) ReadByte() (c byte, err error) {
+ c, err = d.r.ReadByte()
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ return
+}
+
+func (d *downCaser) Read(p []byte) (int, error) {
+ d.t.Fatalf("unexpected Read call on downCaser reader")
+ panic("unreachable")
+}
+
+func TestRawTokenAltEncoding(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInputAltEncoding))
+ d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
+ if charset != "x-testing-uppercase" {
+ t.Fatalf("unexpected charset %q", charset)
+ }
+ return &downCaser{t, input.(io.ByteReader)}, nil
+ }
+ testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)
+}
+
+func TestRawTokenAltEncodingNoConverter(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInputAltEncoding))
+ token, err := d.RawToken()
+ if token == nil {
+ t.Fatalf("expected a token on first RawToken call")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ token, err = d.RawToken()
+ if token != nil {
+ t.Errorf("expected a nil token; got %#v", token)
+ }
+ if err == nil {
+ t.Fatalf("expected an error on second RawToken call")
+ }
+ const encoding = "x-testing-uppercase"
+ if !strings.Contains(err.Error(), encoding) {
+ t.Errorf("expected error to contain %q; got error: %v",
+ encoding, err)
+ }
+}
+
+func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {
+ lastEnd := int64(0)
+ for i, want := range rawTokens {
+ start := d.InputOffset()
+ have, err := d.RawToken()
+ end := d.InputOffset()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ var shave, swant string
+ if _, ok := have.(CharData); ok {
+ shave = fmt.Sprintf("CharData(%q)", have)
+ } else {
+ shave = fmt.Sprintf("%#v", have)
+ }
+ if _, ok := want.(CharData); ok {
+ swant = fmt.Sprintf("CharData(%q)", want)
+ } else {
+ swant = fmt.Sprintf("%#v", want)
+ }
+ t.Errorf("token %d = %s, want %s", i, shave, swant)
+ }
+
+ // Check that InputOffset returned actual token.
+ switch {
+ case start < lastEnd:
+ t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have)
+ case start >= end:
+ // Special case: EndElement can be synthesized.
+ if start == end && end == lastEnd {
+ break
+ }
+ t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have)
+ case end > int64(len(raw)):
+ t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have)
+ default:
+ text := raw[start:end]
+ if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) {
+ t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have)
+ }
+ }
+ lastEnd = end
+ }
+}
+
+// Ensure that directives (specifically !DOCTYPE) include the complete
+// text of any nested directives, noting that < and > do not change
+// nesting depth if they are in single or double quotes.
+
+var nestedDirectivesInput = `
+<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
+<!DOCTYPE [<!ENTITY xlt ">">]>
+<!DOCTYPE [<!ENTITY xlt "<">]>
+<!DOCTYPE [<!ENTITY xlt '>'>]>
+<!DOCTYPE [<!ENTITY xlt '<'>]>
+<!DOCTYPE [<!ENTITY xlt '">'>]>
+<!DOCTYPE [<!ENTITY xlt "'<">]>
+`
+
+var nestedDirectivesTokens = []Token{
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt ">">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt "<">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '>'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '<'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '">'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt "'<">]`),
+ CharData("\n"),
+}
+
+func TestNestedDirectives(t *testing.T) {
+ d := NewDecoder(strings.NewReader(nestedDirectivesInput))
+
+ for i, want := range nestedDirectivesTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInput))
+ d.Entity = testEntity
+
+ for i, want := range cookedTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestSyntax(t *testing.T) {
+ for i := range xmlInput {
+ d := NewDecoder(strings.NewReader(xmlInput[i]))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
+ }
+ }
+}
+
+type allScalars struct {
+ True1 bool
+ True2 bool
+ False1 bool
+ False2 bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint int
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+ String string
+ PtrString *string
+}
+
+var all = allScalars{
+ True1: true,
+ True2: true,
+ False1: false,
+ False2: false,
+ Int: 1,
+ Int8: -2,
+ Int16: 3,
+ Int32: -4,
+ Int64: 5,
+ Uint: 6,
+ Uint8: 7,
+ Uint16: 8,
+ Uint32: 9,
+ Uint64: 10,
+ Uintptr: 11,
+ Float32: 13.0,
+ Float64: 14.0,
+ String: "15",
+ PtrString: &sixteen,
+}
+
+var sixteen = "16"
+
+const testScalarsInput = `<allscalars>
+ <True1>true</True1>
+ <True2>1</True2>
+ <False1>false</False1>
+ <False2>0</False2>
+ <Int>1</Int>
+ <Int8>-2</Int8>
+ <Int16>3</Int16>
+ <Int32>-4</Int32>
+ <Int64>5</Int64>
+ <Uint>6</Uint>
+ <Uint8>7</Uint8>
+ <Uint16>8</Uint16>
+ <Uint32>9</Uint32>
+ <Uint64>10</Uint64>
+ <Uintptr>11</Uintptr>
+ <Float>12.0</Float>
+ <Float32>13.0</Float32>
+ <Float64>14.0</Float64>
+ <String>15</String>
+ <PtrString>16</PtrString>
+</allscalars>`
+
+func TestAllScalars(t *testing.T) {
+ var a allScalars
+ err := Unmarshal([]byte(testScalarsInput), &a)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, all) {
+ t.Errorf("have %+v want %+v", a, all)
+ }
+}
+
+type item struct {
+ Field_a string
+}
+
+func TestIssue569(t *testing.T) {
+ data := `<item><Field_a>abcd</Field_a></item>`
+ var i item
+ err := Unmarshal([]byte(data), &i)
+
+ if err != nil || i.Field_a != "abcd" {
+ t.Fatal("Expecting abcd")
+ }
+}
+
+func TestUnquotedAttrs(t *testing.T) {
+ data := "<tag attr=azAZ09:-_\t>"
+ d := NewDecoder(strings.NewReader(data))
+ d.Strict = false
+ token, err := d.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != "tag" {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != "azAZ09:-_" {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != "attr" {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+}
+
+func TestValuelessAttrs(t *testing.T) {
+ tests := [][3]string{
+ {"<p nowrap>", "p", "nowrap"},
+ {"<p nowrap >", "p", "nowrap"},
+ {"<input checked/>", "input", "checked"},
+ {"<input checked />", "input", "checked"},
+ }
+ for _, test := range tests {
+ d := NewDecoder(strings.NewReader(test[0]))
+ d.Strict = false
+ token, err := d.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != test[1] {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != test[2] {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != test[2] {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+ }
+}
+
+func TestCopyTokenCharData(t *testing.T) {
+ data := []byte("same data")
+ var tok1 Token = CharData(data)
+ tok2 := CopyToken(tok1)
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) != CharData")
+ }
+ data[1] = 'o'
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestCopyTokenStartElement(t *testing.T) {
+ elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
+ var tok1 Token = elt
+ tok2 := CopyToken(tok1)
+ if tok1.(StartElement).Attr[0].Value != "en" {
+ t.Error("CopyToken overwrote Attr[0]")
+ }
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(StartElement) != StartElement")
+ }
+ tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"}
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestSyntaxErrorLineNum(t *testing.T) {
+ testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
+ d := NewDecoder(strings.NewReader(testInput))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Error("Expected SyntaxError.")
+ }
+ if synerr.Line != 3 {
+ t.Error("SyntaxError didn't have correct line number.")
+ }
+}
+
+func TestTrailingRawToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.RawToken(); err == nil; _, err = d.RawToken() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestTrailingToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestEntityInsideCDATA(t *testing.T) {
+ input := `<test><![CDATA[ &val=foo ]]></test>`
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+var characterTests = []struct {
+ in string
+ err string
+}{
+ {"\x12<doc/>", "illegal character code U+0012"},
+ {"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
+ {"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
+ {"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
+ {"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
+ {"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"},
+ {"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"},
+ {"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"},
+ {"<doc>&hello;</doc>", "invalid character entity &hello;"},
+}
+
+func TestDisallowedCharacters(t *testing.T) {
+
+ for i, tt := range characterTests {
+ d := NewDecoder(strings.NewReader(tt.in))
+ var err error
+
+ for err == nil {
+ _, err = d.Token()
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err)
+ }
+ if synerr.Msg != tt.err {
+ t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg)
+ }
+ }
+}
+
+type procInstEncodingTest struct {
+ expect, got string
+}
+
+var procInstTests = []struct {
+ input string
+ expect [2]string
+}{
+ {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}},
+ {`encoding="FOO" `, [2]string{"", "FOO"}},
+}
+
+func TestProcInstEncoding(t *testing.T) {
+ for _, test := range procInstTests {
+ if got := procInst("version", test.input); got != test.expect[0] {
+ t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0])
+ }
+ if got := procInst("encoding", test.input); got != test.expect[1] {
+ t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1])
+ }
+ }
+}
+
+// Ensure that directives with comments include the complete
+// text of any nested directives.
+
+var directivesWithCommentsInput = `
+<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
+<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]>
+<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]>
+`
+
+var directivesWithCommentsTokens = []Token{
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY go "Golang">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`),
+ CharData("\n"),
+}
+
+func TestDirectivesWithComments(t *testing.T) {
+ d := NewDecoder(strings.NewReader(directivesWithCommentsInput))
+
+ for i, want := range directivesWithCommentsTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+// Writer whose Write method always returns an error.
+type errWriter struct{}
+
+func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") }
+
+func TestEscapeTextIOErrors(t *testing.T) {
+ expectErr := "unwritable"
+ err := EscapeText(errWriter{}, []byte{'A'})
+
+ if err == nil || err.Error() != expectErr {
+ t.Errorf("have %v, want %v", err, expectErr)
+ }
+}
+
+func TestEscapeTextInvalidChar(t *testing.T) {
+ input := []byte("A \x00 terminated string.")
+ expected := "A \uFFFD terminated string."
+
+ buff := new(bytes.Buffer)
+ if err := EscapeText(buff, input); err != nil {
+ t.Fatalf("have %v, want nil", err)
+ }
+ text := buff.String()
+
+ if text != expected {
+ t.Errorf("have %v, want %v", text, expected)
+ }
+}
+
+func TestIssue5880(t *testing.T) {
+ type T []byte
+ data, err := Marshal(T{192, 168, 0, 1})
+ if err != nil {
+ t.Errorf("Marshal error: %v", err)
+ }
+ if !utf8.Valid(data) {
+ t.Errorf("Marshal generated invalid UTF-8: %x", data)
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go
new file mode 100644
index 000000000..514db5dd1
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go
@@ -0,0 +1,94 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program is a server for the WebDAV 'litmus' compliance test at
+http://www.webdav.org/neon/litmus/
+To run the test:
+
+go run litmus_test_server.go
+
+and separately, from the downloaded litmus-xxx directory:
+
+make URL=http://localhost:9999/ check
+*/
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/webdav"
+)
+
+var port = flag.Int("port", 9999, "server port")
+
+func main() {
+ flag.Parse()
+ log.SetFlags(0)
+ h := &webdav.Handler{
+ FileSystem: webdav.NewMemFS(),
+ LockSystem: webdav.NewMemLS(),
+ Logger: func(r *http.Request, err error) {
+ litmus := r.Header.Get("X-Litmus")
+ if len(litmus) > 19 {
+ litmus = litmus[:16] + "..."
+ }
+
+ switch r.Method {
+ case "COPY", "MOVE":
+ dst := ""
+ if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
+ dst = u.Path
+ }
+ o := r.Header.Get("Overwrite")
+ log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
+ default:
+ log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
+ }
+ },
+ }
+
+ // The next line would normally be:
+ // http.Handle("/", h)
+ // but we wrap that HTTP handler h to cater for a special case.
+ //
+ // The propfind_invalid2 litmus test case expects an empty namespace prefix
+ // declaration to be an error. The FAQ in the webdav litmus test says:
+ //
+ // "What does the "propfind_invalid2" test check for?...
+ //
+ // If a request was sent with an XML body which included an empty namespace
+ // prefix declaration (xmlns:ns1=""), then the server must reject that with
+ // a "400 Bad Request" response, as it is invalid according to the XML
+ // Namespace specification."
+ //
+ // On the other hand, the Go standard library's encoding/xml package
+ // accepts an empty xmlns namespace, as per the discussion at
+ // https://github.com/golang/go/issues/8068
+ //
+ // Empty namespaces seem disallowed in the second (2006) edition of the XML
+ // standard, but allowed in a later edition. The grammar differs between
+ // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
+ // http://www.w3.org/TR/REC-xml-names/#dt-prefix
+ //
+ // Thus, we assume that the propfind_invalid2 test is obsolete, and
+ // hard-code the 400 Bad Request response that the test expects.
+ http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
+ http.Error(w, "400 Bad Request", http.StatusBadRequest)
+ return
+ }
+ h.ServeHTTP(w, r)
+ }))
+
+ addr := fmt.Sprintf(":%d", *port)
+ log.Printf("Serving %v", addr)
+ log.Fatal(http.ListenAndServe(addr, nil))
+}
diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go
new file mode 100644
index 000000000..344ac5cea
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/lock.go
@@ -0,0 +1,445 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "container/heap"
+ "errors"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ // ErrConfirmationFailed is returned by a LockSystem's Confirm method.
+ ErrConfirmationFailed = errors.New("webdav: confirmation failed")
+ // ErrForbidden is returned by a LockSystem's Unlock method.
+ ErrForbidden = errors.New("webdav: forbidden")
+ // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
+ ErrLocked = errors.New("webdav: locked")
+ // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
+ ErrNoSuchLock = errors.New("webdav: no such lock")
+)
+
+// Condition can match a WebDAV resource, based on a token or ETag.
+// Exactly one of Token and ETag should be non-empty.
+type Condition struct {
+ Not bool
+ Token string
+ ETag string
+}
+
+// LockSystem manages access to a collection of named resources. The elements
+// in a lock name are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+type LockSystem interface {
+ // Confirm confirms that the caller can claim all of the locks specified by
+ // the given conditions, and that holding the union of all of those locks
+ // gives exclusive access to all of the named resources. Up to two resources
+ // can be named. Empty names are ignored.
+ //
+ // Exactly one of release and err will be non-nil. If release is non-nil,
+ // all of the requested locks are held until release is called. Calling
+ // release does not unlock the lock, in the WebDAV UNLOCK sense, but once
+ // Confirm has confirmed that a lock claim is valid, that lock cannot be
+ // Confirmed again until it has been released.
+ //
+ // If Confirm returns ErrConfirmationFailed then the Handler will continue
+ // to try any other set of locks presented (a WebDAV HTTP request can
+ // present more than one set of locks). If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
+
+ // Create creates a lock with the given depth, duration, owner and root
+ // (name). The depth will either be negative (meaning infinite) or zero.
+ //
+ // If Create returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP status. If it returns any other non-nil error, the Handler will
+ // write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ //
+ // The token returned identifies the created lock. It should be an absolute
+ // URI as defined by RFC 3986, Section 4.3. In particular, it should not
+ // contain whitespace.
+ Create(now time.Time, details LockDetails) (token string, err error)
+
+ // Refresh refreshes the lock with the given token.
+ //
+ // If Refresh returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
+ // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
+
+ // Unlock unlocks the lock with the given token.
+ //
+ // If Unlock returns ErrForbidden then the Handler will write a "403
+ // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
+ // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
+ // then the Handler will write a "409 Conflict" HTTP Status. If it returns
+ // any other non-nil error, the Handler will write a "500 Internal Server
+ // Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
+ // when to use each error.
+ Unlock(now time.Time, token string) error
+}
+
+// LockDetails are a lock's metadata.
+type LockDetails struct {
+ // Root is the root resource name being locked. For a zero-depth lock, the
+ // root is the only resource being locked.
+ Root string
+ // Duration is the lock timeout. A negative duration means infinite.
+ Duration time.Duration
+ // OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
+ //
+ // TODO: does the "verbatim" nature play well with XML namespaces?
+ // Does the OwnerXML field need to have more structure? See
+ // https://codereview.appspot.com/175140043/#msg2
+ OwnerXML string
+ // ZeroDepth is whether the lock has zero depth. If it does not have zero
+ // depth, it has infinite depth.
+ ZeroDepth bool
+}
+
+// NewMemLS returns a new in-memory LockSystem.
+func NewMemLS() LockSystem {
+ return &memLS{
+ byName: make(map[string]*memLSNode),
+ byToken: make(map[string]*memLSNode),
+ gen: uint64(time.Now().Unix()),
+ }
+}
+
+type memLS struct {
+ mu sync.Mutex
+ byName map[string]*memLSNode
+ byToken map[string]*memLSNode
+ gen uint64
+ // byExpiry only contains those nodes whose LockDetails have a finite
+ // Duration and are yet to expire.
+ byExpiry byExpiry
+}
+
+func (m *memLS) nextToken() string {
+ m.gen++
+ return strconv.FormatUint(m.gen, 10)
+}
+
+func (m *memLS) collectExpiredNodes(now time.Time) {
+ for len(m.byExpiry) > 0 {
+ if now.Before(m.byExpiry[0].expiry) {
+ break
+ }
+ m.remove(m.byExpiry[0])
+ }
+}
+
+func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ var n0, n1 *memLSNode
+ if name0 != "" {
+ if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+ if name1 != "" {
+ if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+
+ // Don't hold the same node twice.
+ if n1 == n0 {
+ n1 = nil
+ }
+
+ if n0 != nil {
+ m.hold(n0)
+ }
+ if n1 != nil {
+ m.hold(n1)
+ }
+ return func() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if n1 != nil {
+ m.unhold(n1)
+ }
+ if n0 != nil {
+ m.unhold(n0)
+ }
+ }, nil
+}
+
+// lookup returns the node n that locks the named resource, provided that n
+// matches at least one of the given conditions and that lock isn't held by
+// another party. Otherwise, it returns nil.
+//
+// n may be a parent of the named resource, if n is an infinite depth lock.
+func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
+ // TODO: support Condition.Not and Condition.ETag.
+ for _, c := range conditions {
+ n = m.byToken[c.Token]
+ if n == nil || n.held {
+ continue
+ }
+ if name == n.details.Root {
+ return n
+ }
+ if n.details.ZeroDepth {
+ continue
+ }
+ if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
+ return n
+ }
+ }
+ return nil
+}
+
+func (m *memLS) hold(n *memLSNode) {
+ if n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = true
+ if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func (m *memLS) unhold(n *memLSNode) {
+ if !n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = false
+ if n.details.Duration >= 0 {
+ heap.Push(&m.byExpiry, n)
+ }
+}
+
+func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+ details.Root = slashClean(details.Root)
+
+ if !m.canCreate(details.Root, details.ZeroDepth) {
+ return "", ErrLocked
+ }
+ n := m.create(details.Root)
+ n.token = m.nextToken()
+ m.byToken[n.token] = n
+ n.details = details
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.token, nil
+}
+
+func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return LockDetails{}, ErrNoSuchLock
+ }
+ if n.held {
+ return LockDetails{}, ErrLocked
+ }
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+ n.details.Duration = duration
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.details, nil
+}
+
+func (m *memLS) Unlock(now time.Time, token string) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return ErrNoSuchLock
+ }
+ if n.held {
+ return ErrLocked
+ }
+ m.remove(n)
+ return nil
+}
+
+func (m *memLS) canCreate(name string, zeroDepth bool) bool {
+ return walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ return true
+ }
+ if first {
+ if n.token != "" {
+ // The target node is already locked.
+ return false
+ }
+ if !zeroDepth {
+ // The requested lock depth is infinite, and the fact that n exists
+ // (n != nil) means that a descendent of the target node is locked.
+ return false
+ }
+ } else if n.token != "" && !n.details.ZeroDepth {
+ // An ancestor of the target node is locked with infinite depth.
+ return false
+ }
+ return true
+ })
+}
+
+func (m *memLS) create(name string) (ret *memLSNode) {
+ walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ n = &memLSNode{
+ details: LockDetails{
+ Root: name0,
+ },
+ byExpiryIndex: -1,
+ }
+ m.byName[name0] = n
+ }
+ n.refCount++
+ if first {
+ ret = n
+ }
+ return true
+ })
+ return ret
+}
+
+func (m *memLS) remove(n *memLSNode) {
+ delete(m.byToken, n.token)
+ n.token = ""
+ walkToRoot(n.details.Root, func(name0 string, first bool) bool {
+ x := m.byName[name0]
+ x.refCount--
+ if x.refCount == 0 {
+ delete(m.byName, name0)
+ }
+ return true
+ })
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
+ for first := true; ; first = false {
+ if !f(name, first) {
+ return false
+ }
+ if name == "/" {
+ break
+ }
+ name = name[:strings.LastIndex(name, "/")]
+ if name == "" {
+ name = "/"
+ }
+ }
+ return true
+}
+
+type memLSNode struct {
+ // details are the lock metadata. Even if this node's name is not explicitly locked,
+ // details.Root will still equal the node's name.
+ details LockDetails
+ // token is the unique identifier for this node's lock. An empty token means that
+ // this node is not explicitly locked.
+ token string
+ // refCount is the number of self-or-descendent nodes that are explicitly locked.
+ refCount int
+ // expiry is when this node's lock expires.
+ expiry time.Time
+ // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
+ // if this node does not expire, or has expired.
+ byExpiryIndex int
+ // held is whether this node's lock is actively held by a Confirm call.
+ held bool
+}
+
+type byExpiry []*memLSNode
+
+func (b *byExpiry) Len() int {
+ return len(*b)
+}
+
+func (b *byExpiry) Less(i, j int) bool {
+ return (*b)[i].expiry.Before((*b)[j].expiry)
+}
+
+func (b *byExpiry) Swap(i, j int) {
+ (*b)[i], (*b)[j] = (*b)[j], (*b)[i]
+ (*b)[i].byExpiryIndex = i
+ (*b)[j].byExpiryIndex = j
+}
+
+func (b *byExpiry) Push(x interface{}) {
+ n := x.(*memLSNode)
+ n.byExpiryIndex = len(*b)
+ *b = append(*b, n)
+}
+
+func (b *byExpiry) Pop() interface{} {
+ i := len(*b) - 1
+ n := (*b)[i]
+ (*b)[i] = nil
+ n.byExpiryIndex = -1
+ *b = (*b)[:i]
+ return n
+}
+
+const infiniteTimeout = -1
+
+// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
+// empty, an infiniteTimeout is returned.
+func parseTimeout(s string) (time.Duration, error) {
+ if s == "" {
+ return infiniteTimeout, nil
+ }
+ if i := strings.IndexByte(s, ','); i >= 0 {
+ s = s[:i]
+ }
+ s = strings.TrimSpace(s)
+ if s == "Infinite" {
+ return infiniteTimeout, nil
+ }
+ const pre = "Second-"
+ if !strings.HasPrefix(s, pre) {
+ return 0, errInvalidTimeout
+ }
+ s = s[len(pre):]
+ if s == "" || s[0] < '0' || '9' < s[0] {
+ return 0, errInvalidTimeout
+ }
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || 1<<32-1 < n {
+ return 0, errInvalidTimeout
+ }
+ return time.Duration(n) * time.Second, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go
new file mode 100644
index 000000000..116d6c0d7
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/lock_test.go
@@ -0,0 +1,731 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "fmt"
+ "math/rand"
+ "path"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestWalkToRoot(t *testing.T) {
+ testCases := []struct {
+ name string
+ want []string
+ }{{
+ "/a/b/c/d",
+ []string{
+ "/a/b/c/d",
+ "/a/b/c",
+ "/a/b",
+ "/a",
+ "/",
+ },
+ }, {
+ "/a",
+ []string{
+ "/a",
+ "/",
+ },
+ }, {
+ "/",
+ []string{
+ "/",
+ },
+ }}
+
+ for _, tc := range testCases {
+ var got []string
+ if !walkToRoot(tc.name, func(name0 string, first bool) bool {
+ if first != (len(got) == 0) {
+ t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got))
+ return false
+ }
+ got = append(got, name0)
+ return true
+ }) {
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want)
+ }
+ }
+}
+
+var lockTestDurations = []time.Duration{
+ infiniteTimeout, // infiniteTimeout means to never expire.
+ 0, // A zero duration means to expire immediately.
+ 100 * time.Hour, // A very large duration will not expire in these tests.
+}
+
+// lockTestNames are the names of a set of mutually compatible locks. For each
+// name fragment:
+// - _ means no explicit lock.
+// - i means a infinite-depth lock,
+// - z means a zero-depth lock,
+var lockTestNames = []string{
+ "/_/_/_/_/z",
+ "/_/_/i",
+ "/_/z",
+ "/_/z/i",
+ "/_/z/z",
+ "/_/z/_/i",
+ "/_/z/_/z",
+ "/i",
+ "/z",
+ "/z/_/i",
+ "/z/_/z",
+}
+
+func lockTestZeroDepth(name string) bool {
+ switch name[len(name)-1] {
+ case 'i':
+ return false
+ case 'z':
+ return true
+ }
+ panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name))
+}
+
+func TestMemLSCanCreate(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+
+ for _, name := range lockTestNames {
+ _, err := m.Create(now, LockDetails{
+ Root: name,
+ Duration: infiniteTimeout,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("creating lock for %q: %v", name, err)
+ }
+ }
+
+ wantCanCreate := func(name string, zeroDepth bool) bool {
+ for _, n := range lockTestNames {
+ switch {
+ case n == name:
+ // An existing lock has the same name as the proposed lock.
+ return false
+ case strings.HasPrefix(n, name):
+ // An existing lock would be a child of the proposed lock,
+ // which conflicts if the proposed lock has infinite depth.
+ if !zeroDepth {
+ return false
+ }
+ case strings.HasPrefix(name, n):
+ // An existing lock would be an ancestor of the proposed lock,
+ // which conflicts if the ancestor has infinite depth.
+ if n[len(n)-1] == 'i' {
+ return false
+ }
+ }
+ }
+ return true
+ }
+
+ var check func(int, string)
+ check = func(recursion int, name string) {
+ for _, zeroDepth := range []bool{false, true} {
+ got := m.canCreate(name, zeroDepth)
+ want := wantCanCreate(name, zeroDepth)
+ if got != want {
+ t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want)
+ }
+ }
+ if recursion == 6 {
+ return
+ }
+ if name != "/" {
+ name += "/"
+ }
+ for _, c := range "_iz" {
+ check(recursion+1, name+string(c))
+ }
+ }
+ check(0, "/")
+}
+
+func TestMemLSLookup(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+
+ badToken := m.nextToken()
+ t.Logf("badToken=%q", badToken)
+
+ for _, name := range lockTestNames {
+ token, err := m.Create(now, LockDetails{
+ Root: name,
+ Duration: infiniteTimeout,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("creating lock for %q: %v", name, err)
+ }
+ t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token)
+ }
+
+ baseNames := append([]string{"/a", "/b/c"}, lockTestNames...)
+ for _, baseName := range baseNames {
+ for _, suffix := range []string{"", "/0", "/1/2/3"} {
+ name := baseName + suffix
+
+ goodToken := ""
+ base := m.byName[baseName]
+ if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) {
+ goodToken = base.token
+ }
+
+ for _, token := range []string{badToken, goodToken} {
+ if token == "" {
+ continue
+ }
+
+ got := m.lookup(name, Condition{Token: token})
+ want := base
+ if token == badToken {
+ want = nil
+ }
+ if got != want {
+ t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p",
+ name, token, token == badToken, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestMemLSConfirm(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ alice, err := m.Create(now, LockDetails{
+ Root: "/alice",
+ Duration: infiniteTimeout,
+ ZeroDepth: false,
+ })
+ tweedle, err := m.Create(now, LockDetails{
+ Root: "/tweedle",
+ Duration: infiniteTimeout,
+ ZeroDepth: false,
+ })
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Create: inconsistent state: %v", err)
+ }
+
+ // Test a mismatch between name and condition.
+ _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice})
+ if err != ErrConfirmationFailed {
+ t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (mismatch): inconsistent state: %v", err)
+ }
+
+ // Test two names (that fall under the same lock) in the one Confirm call.
+ release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (twins): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (twins): inconsistent state: %v", err)
+ }
+ release()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (twins): inconsistent state: %v", err)
+ }
+
+ // Test the same two names in overlapping Confirm / release calls.
+ releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (sequence #0): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err)
+ }
+
+ _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
+ if err != ErrConfirmationFailed {
+ t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err)
+ }
+
+ releaseDee()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (sequence #2): inconsistent state: %v", err)
+ }
+
+ releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (sequence #3): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err)
+ }
+
+ // Test that you can't unlock a held lock.
+ err = m.Unlock(now, tweedle)
+ if err != ErrLocked {
+ t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err)
+ }
+
+ releaseDum()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (sequence #5): inconsistent state: %v", err)
+ }
+
+ err = m.Unlock(now, tweedle)
+ if err != nil {
+ t.Fatalf("Unlock (sequence #6): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err)
+ }
+}
+
+func TestMemLSNonCanonicalRoot(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ token, err := m.Create(now, LockDetails{
+ Root: "/foo/./bar//",
+ Duration: 1 * time.Second,
+ })
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Create: inconsistent state: %v", err)
+ }
+ if err := m.Unlock(now, token); err != nil {
+ t.Fatalf("Unlock: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Unlock: inconsistent state: %v", err)
+ }
+}
+
+func TestMemLSExpiry(t *testing.T) {
+ m := NewMemLS().(*memLS)
+ testCases := []string{
+ "setNow 0",
+ "create /a.5",
+ "want /a.5",
+ "create /c.6",
+ "want /a.5 /c.6",
+ "create /a/b.7",
+ "want /a.5 /a/b.7 /c.6",
+ "setNow 4",
+ "want /a.5 /a/b.7 /c.6",
+ "setNow 5",
+ "want /a/b.7 /c.6",
+ "setNow 6",
+ "want /a/b.7",
+ "setNow 7",
+ "want ",
+ "setNow 8",
+ "want ",
+ "create /a.12",
+ "create /b.13",
+ "create /c.15",
+ "create /a/d.16",
+ "want /a.12 /a/d.16 /b.13 /c.15",
+ "refresh /a.14",
+ "want /a.14 /a/d.16 /b.13 /c.15",
+ "setNow 12",
+ "want /a.14 /a/d.16 /b.13 /c.15",
+ "setNow 13",
+ "want /a.14 /a/d.16 /c.15",
+ "setNow 14",
+ "want /a/d.16 /c.15",
+ "refresh /a/d.20",
+ "refresh /c.20",
+ "want /a/d.20 /c.20",
+ "setNow 20",
+ "want ",
+ }
+
+ tokens := map[string]string{}
+ zTime := time.Unix(0, 0)
+ now := zTime
+ for i, tc := range testCases {
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "create", "refresh":
+ parts := strings.Split(arg, ".")
+ if len(parts) != 2 {
+ t.Fatalf("test case #%d %q: invalid create", i, tc)
+ }
+ root := parts[0]
+ d, err := strconv.Atoi(parts[1])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid duration", i, tc)
+ }
+ dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)
+
+ switch op {
+ case "create":
+ token, err := m.Create(now, LockDetails{
+ Root: root,
+ Duration: dur,
+ ZeroDepth: true,
+ })
+ if err != nil {
+ t.Fatalf("test case #%d %q: Create: %v", i, tc, err)
+ }
+ tokens[root] = token
+
+ case "refresh":
+ token := tokens[root]
+ if token == "" {
+ t.Fatalf("test case #%d %q: no token for %q", i, tc, root)
+ }
+ got, err := m.Refresh(now, token, dur)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err)
+ }
+ want := LockDetails{
+ Root: root,
+ Duration: dur,
+ ZeroDepth: true,
+ }
+ if got != want {
+ t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want)
+ }
+ }
+
+ case "setNow":
+ d, err := strconv.Atoi(arg)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid duration", i, tc)
+ }
+ now = time.Unix(0, 0).Add(time.Duration(d) * time.Second)
+
+ case "want":
+ m.mu.Lock()
+ m.collectExpiredNodes(now)
+ got := make([]string, 0, len(m.byToken))
+ for _, n := range m.byToken {
+ got = append(got, fmt.Sprintf("%s.%d",
+ n.details.Root, n.expiry.Sub(zTime)/time.Second))
+ }
+ m.mu.Unlock()
+ sort.Strings(got)
+ want := []string{}
+ if arg != "" {
+ want = strings.Split(arg, " ")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want)
+ }
+ }
+
+ if err := m.consistent(); err != nil {
+ t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err)
+ }
+ }
+}
+
+func TestMemLS(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ rng := rand.New(rand.NewSource(0))
+ tokens := map[string]string{}
+ nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0
+ const N = 2000
+
+ for i := 0; i < N; i++ {
+ name := lockTestNames[rng.Intn(len(lockTestNames))]
+ duration := lockTestDurations[rng.Intn(len(lockTestDurations))]
+ confirmed, unlocked := false, false
+
+ // If the name was already locked, we randomly confirm/release, refresh
+ // or unlock it. Otherwise, we create a lock.
+ token := tokens[name]
+ if token != "" {
+ switch rng.Intn(3) {
+ case 0:
+ confirmed = true
+ nConfirm++
+ release, err := m.Confirm(now, name, "", Condition{Token: token})
+ if err != nil {
+ t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
+ }
+ release()
+
+ case 1:
+ nRefresh++
+ if _, err := m.Refresh(now, token, duration); err != nil {
+ t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err)
+ }
+
+ case 2:
+ unlocked = true
+ nUnlock++
+ if err := m.Unlock(now, token); err != nil {
+ t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err)
+ }
+ }
+
+ } else {
+ nCreate++
+ var err error
+ token, err = m.Create(now, LockDetails{
+ Root: name,
+ Duration: duration,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("iteration #%d: Create %q: %v", i, name, err)
+ }
+ }
+
+ if !confirmed {
+ if duration == 0 || unlocked {
+ // A zero-duration lock should expire immediately and is
+ // effectively equivalent to being unlocked.
+ tokens[name] = ""
+ } else {
+ tokens[name] = token
+ }
+ }
+
+ if err := m.consistent(); err != nil {
+ t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
+ }
+ }
+
+ if nConfirm < N/10 {
+ t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10)
+ }
+ if nCreate < N/10 {
+ t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10)
+ }
+ if nRefresh < N/10 {
+ t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10)
+ }
+ if nUnlock < N/10 {
+ t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10)
+ }
+}
+
+func (m *memLS) consistent() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ // If m.byName is non-empty, then it must contain an entry for the root "/",
+ // and its refCount should equal the number of locked nodes.
+ if len(m.byName) > 0 {
+ n := m.byName["/"]
+ if n == nil {
+ return fmt.Errorf(`non-empty m.byName does not contain the root "/"`)
+ }
+ if n.refCount != len(m.byToken) {
+ return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken))
+ }
+ }
+
+ for name, n := range m.byName {
+ // The map keys should be consistent with the node's copy of the key.
+ if n.details.Root != name {
+ return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name)
+ }
+
+ // A name must be clean, and start with a "/".
+ if len(name) == 0 || name[0] != '/' {
+ return fmt.Errorf(`node name %q does not start with "/"`, name)
+ }
+ if name != path.Clean(name) {
+ return fmt.Errorf(`node name %q is not clean`, name)
+ }
+
+ // A node's refCount should be positive.
+ if n.refCount <= 0 {
+ return fmt.Errorf("non-positive refCount for node at name %q", name)
+ }
+
+ // A node's refCount should be the number of self-or-descendents that
+ // are locked (i.e. have a non-empty token).
+ var list []string
+ for name0, n0 := range m.byName {
+ // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',
+ // so strings.HasPrefix is equivalent to self-or-descendent name match.
+ // We don't have to worry about "/foo/bar" being a false positive match
+ // for "/foo/b".
+ if strings.HasPrefix(name0, name) && n0.token != "" {
+ list = append(list, name0)
+ }
+ }
+ if n.refCount != len(list) {
+ sort.Strings(list)
+ return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)",
+ name, n.refCount, list, len(list))
+ }
+
+ // A node n is in m.byToken if it has a non-empty token.
+ if n.token != "" {
+ if _, ok := m.byToken[n.token]; !ok {
+ return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token)
+ }
+ }
+
+ // A node n is in m.byExpiry if it has a non-negative byExpiryIndex.
+ if n.byExpiryIndex >= 0 {
+ if n.byExpiryIndex >= len(m.byExpiry) {
+ return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry))
+ }
+ if n != m.byExpiry[n.byExpiryIndex] {
+ return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex)
+ }
+ }
+ }
+
+ for token, n := range m.byToken {
+ // The map keys should be consistent with the node's copy of the key.
+ if n.token != token {
+ return fmt.Errorf("node token %q != byToken map key %q", n.token, token)
+ }
+
+ // Every node in m.byToken is in m.byName.
+ if _, ok := m.byName[n.details.Root]; !ok {
+ return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root)
+ }
+ }
+
+ for i, n := range m.byExpiry {
+ // The slice indices should be consistent with the node's copy of the index.
+ if n.byExpiryIndex != i {
+ return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i)
+ }
+
+ // Every node in m.byExpiry is in m.byName.
+ if _, ok := m.byName[n.details.Root]; !ok {
+ return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root)
+ }
+
+ // No node in m.byExpiry should be held.
+ if n.held {
+ return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root)
+ }
+ }
+ return nil
+}
+
+func TestParseTimeout(t *testing.T) {
+ testCases := []struct {
+ s string
+ want time.Duration
+ wantErr error
+ }{{
+ "",
+ infiniteTimeout,
+ nil,
+ }, {
+ "Infinite",
+ infiniteTimeout,
+ nil,
+ }, {
+ "Infinitesimal",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "infinite",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-0",
+ 0 * time.Second,
+ nil,
+ }, {
+ "Second-123",
+ 123 * time.Second,
+ nil,
+ }, {
+ " Second-456 ",
+ 456 * time.Second,
+ nil,
+ }, {
+ "Second-4100000000",
+ 4100000000 * time.Second,
+ nil,
+ }, {
+ "junk",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second--1",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second--123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-+123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-0x123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "second-123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-4294967295",
+ 4294967295 * time.Second,
+ nil,
+ }, {
+ // Section 10.7 says that "The timeout value for TimeType "Second"
+ // must not be greater than 2^32-1."
+ "Second-4294967296",
+ 0,
+ errInvalidTimeout,
+ }, {
+ // This test case comes from section 9.10.9 of the spec. It says,
+ //
+ // "In this request, the client has specified that it desires an
+ // infinite-length lock, if available, otherwise a timeout of 4.1
+ // billion seconds, if available."
+ //
+ // The Go WebDAV package always supports infinite length locks,
+ // and ignores the fallback after the comma.
+ "Infinite, Second-4100000000",
+ infiniteTimeout,
+ nil,
+ }}
+
+ for _, tc := range testCases {
+ got, gotErr := parseTimeout(tc.s)
+ if got != tc.want || gotErr != tc.wantErr {
+ t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go
new file mode 100644
index 000000000..145946637
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/prop.go
@@ -0,0 +1,395 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// Proppatch describes a property update instruction as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
+type Proppatch struct {
+ // Remove specifies whether this patch removes properties. If it does not
+ // remove them, it sets them.
+ Remove bool
+ // Props contains the properties to be set or removed.
+ Props []Property
+}
+
+// Propstat describes a XML propstat element as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+type Propstat struct {
+ // Props contains the properties for which Status applies.
+ Props []Property
+
+ // Status defines the HTTP status code of the properties in Prop.
+ // Allowed values include, but are not limited to the WebDAV status
+ // code extensions for HTTP/1.1.
+ // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+ Status int
+
+ // XMLError contains the XML representation of the optional error element.
+ // XML content within this field must not rely on any predefined
+ // namespace declarations or prefixes. If empty, the XML error element
+ // is omitted.
+ XMLError string
+
+ // ResponseDescription contains the contents of the optional
+ // responsedescription field. If empty, the XML element is omitted.
+ ResponseDescription string
+}
+
+// makePropstats returns a slice containing those of x and y whose Props slice
+// is non-empty. If both are empty, it returns a slice containing an otherwise
+// zero Propstat whose HTTP status code is 200 OK.
+func makePropstats(x, y Propstat) []Propstat {
+ pstats := make([]Propstat, 0, 2)
+ if len(x.Props) != 0 {
+ pstats = append(pstats, x)
+ }
+ if len(y.Props) != 0 {
+ pstats = append(pstats, y)
+ }
+ if len(pstats) == 0 {
+ pstats = append(pstats, Propstat{
+ Status: http.StatusOK,
+ })
+ }
+ return pstats
+}
+
+// DeadPropsHolder holds the dead properties of a resource.
+//
+// Dead properties are those properties that are explicitly defined. In
+// comparison, live properties, such as DAV:getcontentlength, are implicitly
+// defined by the underlying resource, and cannot be explicitly overridden or
+// removed. See the Terminology section of
+// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
+//
+// There is a whitelist of the names of live properties. This package handles
+// all live properties, and will only pass non-whitelisted names to the Patch
+// method of DeadPropsHolder implementations.
+type DeadPropsHolder interface {
+ // DeadProps returns a copy of the dead properties held.
+ DeadProps() (map[xml.Name]Property, error)
+
+ // Patch patches the dead properties held.
+ //
+ // Patching is atomic; either all or no patches succeed. It returns (nil,
+ // non-nil) if an internal server error occurred, otherwise the Propstats
+ // collectively contain one Property for each proposed patch Property. If
+ // all patches succeed, Patch returns a slice of length one and a Propstat
+ // element with a 200 OK HTTP status code. If none succeed, for reasons
+ // other than an internal server error, no Propstat has status 200 OK.
+ //
+ // For more details on when various HTTP status codes apply, see
+ // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
+ Patch([]Proppatch) ([]Propstat, error)
+}
+
+// liveProps contains all supported, protected DAV: properties.
+var liveProps = map[xml.Name]struct {
+ // findFn implements the propfind function of this property. If nil,
+ // it indicates a hidden property.
+ findFn func(FileSystem, LockSystem, string, os.FileInfo) (string, error)
+ // dir is true if the property applies to directories.
+ dir bool
+}{
+ xml.Name{Space: "DAV:", Local: "resourcetype"}: {
+ findFn: findResourceType,
+ dir: true,
+ },
+ xml.Name{Space: "DAV:", Local: "displayname"}: {
+ findFn: findDisplayName,
+ dir: true,
+ },
+ xml.Name{Space: "DAV:", Local: "getcontentlength"}: {
+ findFn: findContentLength,
+ dir: false,
+ },
+ xml.Name{Space: "DAV:", Local: "getlastmodified"}: {
+ findFn: findLastModified,
+ // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
+ // suggests that getlastmodified should only apply to GETable
+ // resources, and this package does not support GET on directories.
+ //
+ // Nonetheless, some WebDAV clients expect child directories to be
+ // sortable by getlastmodified date, so this value is true, not false.
+ // See golang.org/issue/15334.
+ dir: true,
+ },
+ xml.Name{Space: "DAV:", Local: "creationdate"}: {
+ findFn: nil,
+ dir: false,
+ },
+ xml.Name{Space: "DAV:", Local: "getcontentlanguage"}: {
+ findFn: nil,
+ dir: false,
+ },
+ xml.Name{Space: "DAV:", Local: "getcontenttype"}: {
+ findFn: findContentType,
+ dir: false,
+ },
+ xml.Name{Space: "DAV:", Local: "getetag"}: {
+ findFn: findETag,
+ // findETag implements ETag as the concatenated hex values of a file's
+ // modification time and size. This is not a reliable synchronization
+ // mechanism for directories, so we do not advertise getetag for DAV
+ // collections.
+ dir: false,
+ },
+
+ // TODO: The lockdiscovery property requires LockSystem to list the
+ // active locks on a resource.
+ xml.Name{Space: "DAV:", Local: "lockdiscovery"}: {},
+ xml.Name{Space: "DAV:", Local: "supportedlock"}: {
+ findFn: findSupportedLock,
+ dir: true,
+ },
+}
+
+// TODO(nigeltao) merge props and allprop?
+
+// Props returns the status of the properties named pnames for resource name.
+//
+// Each Propstat has a unique status and each property name will only be part
+// of one Propstat element.
+func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
+ f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pstatOK := Propstat{Status: http.StatusOK}
+ pstatNotFound := Propstat{Status: http.StatusNotFound}
+ for _, pn := range pnames {
+ // If this file has dead properties, check if they contain pn.
+ if dp, ok := deadProps[pn]; ok {
+ pstatOK.Props = append(pstatOK.Props, dp)
+ continue
+ }
+ // Otherwise, it must either be a live property or we don't know it.
+ if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
+ innerXML, err := prop.findFn(fs, ls, name, fi)
+ if err != nil {
+ return nil, err
+ }
+ pstatOK.Props = append(pstatOK.Props, Property{
+ XMLName: pn,
+ InnerXML: []byte(innerXML),
+ })
+ } else {
+ pstatNotFound.Props = append(pstatNotFound.Props, Property{
+ XMLName: pn,
+ })
+ }
+ }
+ return makePropstats(pstatOK, pstatNotFound), nil
+}
+
+// Propnames returns the property names defined for resource name.
+func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
+ f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
+ for pn, prop := range liveProps {
+ if prop.findFn != nil && (prop.dir || !isDir) {
+ pnames = append(pnames, pn)
+ }
+ }
+ for pn := range deadProps {
+ pnames = append(pnames, pn)
+ }
+ return pnames, nil
+}
+
+// Allprop returns the properties defined for resource name and the properties
+// named in include.
+//
+// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
+// within the RFC plus dead properties. Other live properties should only be
+// returned if they are named in 'include'.
+//
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
+ pnames, err := propnames(fs, ls, name)
+ if err != nil {
+ return nil, err
+ }
+ // Add names from include if they are not already covered in pnames.
+ nameset := make(map[xml.Name]bool)
+ for _, pn := range pnames {
+ nameset[pn] = true
+ }
+ for _, pn := range include {
+ if !nameset[pn] {
+ pnames = append(pnames, pn)
+ }
+ }
+ return props(fs, ls, name, pnames)
+}
+
+// Patch patches the properties of resource name. The return values are
+// constrained in the same manner as DeadPropsHolder.Patch.
+func patch(fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
+ conflict := false
+loop:
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ conflict = true
+ break loop
+ }
+ }
+ }
+ if conflict {
+ pstatForbidden := Propstat{
+ Status: http.StatusForbidden,
+ XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
+ }
+ pstatFailedDep := Propstat{
+ Status: StatusFailedDependency,
+ }
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
+ } else {
+ pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ }
+ return makePropstats(pstatForbidden, pstatFailedDep), nil
+ }
+
+ f, err := fs.OpenFile(name, os.O_RDWR, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ if dph, ok := f.(DeadPropsHolder); ok {
+ ret, err := dph.Patch(patches)
+ if err != nil {
+ return nil, err
+ }
+ // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
+ // "The contents of the prop XML element must only list the names of
+ // properties to which the result in the status element applies."
+ for _, pstat := range ret {
+ for i, p := range pstat.Props {
+ pstat.Props[i] = Property{XMLName: p.XMLName}
+ }
+ }
+ return ret, nil
+ }
+ // The file doesn't implement the optional DeadPropsHolder interface, so
+ // all patches are forbidden.
+ pstat := Propstat{Status: http.StatusForbidden}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+func findResourceType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if fi.IsDir() {
+ return `<D:collection xmlns:D="DAV:"/>`, nil
+ }
+ return "", nil
+}
+
+func findDisplayName(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if slashClean(name) == "/" {
+ // Hide the real name of a possibly prefixed root directory.
+ return "", nil
+ }
+ return fi.Name(), nil
+}
+
+func findContentLength(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return strconv.FormatInt(fi.Size(), 10), nil
+}
+
+func findLastModified(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return fi.ModTime().Format(http.TimeFormat), nil
+}
+
+func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ // This implementation is based on serveContent's code in the standard net/http package.
+ ctype := mime.TypeByExtension(filepath.Ext(name))
+ if ctype != "" {
+ return ctype, nil
+ }
+ // Read a chunk to decide between utf-8 text and binary.
+ var buf [512]byte
+ n, err := io.ReadFull(f, buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return "", err
+ }
+ ctype = http.DetectContentType(buf[:n])
+ // Rewind file.
+ _, err = f.Seek(0, os.SEEK_SET)
+ return ctype, err
+}
+
+func findETag(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ // The Apache http 2.4 web server by default concatenates the
+ // modification time and size of a file. We replicate the heuristic
+ // with nanosecond granularity.
+ return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
+}
+
+func findSupportedLock(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return `` +
+ `<D:lockentry xmlns:D="DAV:">` +
+ `<D:lockscope><D:exclusive/></D:lockscope>` +
+ `<D:locktype><D:write/></D:locktype>` +
+ `</D:lockentry>`, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go
new file mode 100644
index 000000000..0834dc9f1
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/prop_test.go
@@ -0,0 +1,610 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "os"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestMemPS(t *testing.T) {
+ // calcProps calculates the getlastmodified and getetag DAV: property
+ // values in pstats for resource name in file-system fs.
+ calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error {
+ fi, err := fs.Stat(name)
+ if err != nil {
+ return err
+ }
+ for _, pst := range pstats {
+ for i, p := range pst.Props {
+ switch p.XMLName {
+ case xml.Name{Space: "DAV:", Local: "getlastmodified"}:
+ p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat))
+ pst.Props[i] = p
+ case xml.Name{Space: "DAV:", Local: "getetag"}:
+ if fi.IsDir() {
+ continue
+ }
+ etag, err := findETag(fs, ls, name, fi)
+ if err != nil {
+ return err
+ }
+ p.InnerXML = []byte(etag)
+ pst.Props[i] = p
+ }
+ }
+ }
+ return nil
+ }
+
+ const (
+ lockEntry = `` +
+ `<D:lockentry xmlns:D="DAV:">` +
+ `<D:lockscope><D:exclusive/></D:lockscope>` +
+ `<D:locktype><D:write/></D:locktype>` +
+ `</D:lockentry>`
+ statForbiddenError = `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`
+ )
+
+ type propOp struct {
+ op string
+ name string
+ pnames []xml.Name
+ patches []Proppatch
+ wantPnames []xml.Name
+ wantPropstats []Propstat
+ }
+
+ testCases := []struct {
+ desc string
+ noDeadProps bool
+ buildfs []string
+ propOp []propOp
+ }{{
+ desc: "propname",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propname",
+ name: "/dir",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "supportedlock"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ },
+ }, {
+ op: "propname",
+ name: "/file",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "getcontentlength"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ {Space: "DAV:", Local: "getcontenttype"},
+ {Space: "DAV:", Local: "getetag"},
+ {Space: "DAV:", Local: "supportedlock"},
+ },
+ }},
+ }, {
+ desc: "allprop dir and file",
+ buildfs: []string{"mkdir /dir", "write /file foobarbaz"},
+ propOp: []propOp{{
+ op: "allprop",
+ name: "/dir",
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("dir"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }},
+ }},
+ }, {
+ op: "allprop",
+ name: "/file",
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("file"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
+ InnerXML: []byte("9"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
+ InnerXML: []byte("text/plain; charset=utf-8"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }},
+ }},
+ }, {
+ op: "allprop",
+ name: "/file",
+ pnames: []xml.Name{
+ {"DAV:", "resourcetype"},
+ {"foo", "bar"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("file"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
+ InnerXML: []byte("9"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
+ InnerXML: []byte("text/plain; charset=utf-8"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }}}, {
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }}},
+ },
+ }},
+ }, {
+ desc: "propfind DAV:resourcetype",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "resourcetype"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/file",
+ pnames: []xml.Name{{"DAV:", "resourcetype"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }},
+ }},
+ }},
+ }, {
+ desc: "propfind unsupported DAV properties",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "getcontentlanguage"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "creationdate"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "creationdate"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "propfind getetag for files but not for directories",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "getetag"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/file",
+ pnames: []xml.Name{{"DAV:", "getetag"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch property on no-dead-properties file system",
+ buildfs: []string{"mkdir /dir"},
+ noDeadProps: true,
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ XMLError: statForbiddenError,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{Space: "foo", Local: "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch dead property with failed dependency",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }, {
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("xxx"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ XMLError: statForbiddenError,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ }},
+ }, {
+ Status: StatusFailedDependency,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{Space: "foo", Local: "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch remove dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{
+ {Space: "foo", Local: "bar"},
+ {Space: "spam", Local: "ham"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ }, {
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Remove: true,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{
+ {Space: "foo", Local: "bar"},
+ {Space: "spam", Local: "ham"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }, {
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ }},
+ }, {
+ desc: "propname with dead property",
+ buildfs: []string{"touch /file"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/file",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propname",
+ name: "/file",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "getcontentlength"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ {Space: "DAV:", Local: "getcontenttype"},
+ {Space: "DAV:", Local: "getetag"},
+ {Space: "DAV:", Local: "supportedlock"},
+ {Space: "foo", Local: "bar"},
+ },
+ }},
+ }, {
+ desc: "proppatch remove unknown dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Remove: true,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "bad: propfind unknown property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"foo:", "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo:", Local: "bar"},
+ }},
+ }},
+ }},
+ }}
+
+ for _, tc := range testCases {
+ fs, err := buildTestFS(tc.buildfs)
+ if err != nil {
+ t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
+ }
+ if tc.noDeadProps {
+ fs = noDeadPropsFS{fs}
+ }
+ ls := NewMemLS()
+ for _, op := range tc.propOp {
+ desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name)
+ if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil {
+ t.Fatalf("%s: calcProps: %v", desc, err)
+ }
+
+ // Call property system.
+ var propstats []Propstat
+ switch op.op {
+ case "propname":
+ pnames, err := propnames(fs, ls, op.name)
+ if err != nil {
+ t.Errorf("%s: got error %v, want nil", desc, err)
+ continue
+ }
+ sort.Sort(byXMLName(pnames))
+ sort.Sort(byXMLName(op.wantPnames))
+ if !reflect.DeepEqual(pnames, op.wantPnames) {
+ t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames)
+ }
+ continue
+ case "allprop":
+ propstats, err = allprop(fs, ls, op.name, op.pnames)
+ case "propfind":
+ propstats, err = props(fs, ls, op.name, op.pnames)
+ case "proppatch":
+ propstats, err = patch(fs, ls, op.name, op.patches)
+ default:
+ t.Fatalf("%s: %s not implemented", desc, op.op)
+ }
+ if err != nil {
+ t.Errorf("%s: got error %v, want nil", desc, err)
+ continue
+ }
+ // Compare return values from allprop, propfind or proppatch.
+ for _, pst := range propstats {
+ sort.Sort(byPropname(pst.Props))
+ }
+ for _, pst := range op.wantPropstats {
+ sort.Sort(byPropname(pst.Props))
+ }
+ sort.Sort(byStatus(propstats))
+ sort.Sort(byStatus(op.wantPropstats))
+ if !reflect.DeepEqual(propstats, op.wantPropstats) {
+ t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats)
+ }
+ }
+ }
+}
+
+func cmpXMLName(a, b xml.Name) bool {
+ if a.Space != b.Space {
+ return a.Space < b.Space
+ }
+ return a.Local < b.Local
+}
+
+type byXMLName []xml.Name
+
+func (b byXMLName) Len() int { return len(b) }
+func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) }
+
+type byPropname []Property
+
+func (b byPropname) Len() int { return len(b) }
+func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) }
+
+type byStatus []Propstat
+
+func (b byStatus) Len() int { return len(b) }
+func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status }
+
+type noDeadPropsFS struct {
+ FileSystem
+}
+
+func (fs noDeadPropsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, err := fs.FileSystem.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return noDeadPropsFile{f}, nil
+}
+
+// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods
+// provided by the underlying File implementation.
+type noDeadPropsFile struct {
+ f File
+}
+
+func (f noDeadPropsFile) Close() error { return f.f.Close() }
+func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) }
+func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) }
+func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) }
+func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() }
+func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) }
diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go
new file mode 100644
index 000000000..4ce09728b
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/webdav.go
@@ -0,0 +1,689 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package webdav provides a WebDAV server implementation.
+package webdav // import "golang.org/x/net/webdav"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+type Handler struct {
+ // Prefix is the URL path prefix to strip from WebDAV resource paths.
+ Prefix string
+ // FileSystem is the virtual file system.
+ FileSystem FileSystem
+ // LockSystem is the lock management system.
+ LockSystem LockSystem
+ // Logger is an optional error logger. If non-nil, it will be called
+ // for all HTTP requests.
+ Logger func(*http.Request, error)
+}
+
+func (h *Handler) stripPrefix(p string) (string, int, error) {
+ if h.Prefix == "" {
+ return p, http.StatusOK, nil
+ }
+ if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
+ return r, http.StatusOK, nil
+ }
+ return p, http.StatusNotFound, errPrefixMismatch
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ status, err := http.StatusBadRequest, errUnsupportedMethod
+ if h.FileSystem == nil {
+ status, err = http.StatusInternalServerError, errNoFileSystem
+ } else if h.LockSystem == nil {
+ status, err = http.StatusInternalServerError, errNoLockSystem
+ } else {
+ switch r.Method {
+ case "OPTIONS":
+ status, err = h.handleOptions(w, r)
+ case "GET", "HEAD", "POST":
+ status, err = h.handleGetHeadPost(w, r)
+ case "DELETE":
+ status, err = h.handleDelete(w, r)
+ case "PUT":
+ status, err = h.handlePut(w, r)
+ case "MKCOL":
+ status, err = h.handleMkcol(w, r)
+ case "COPY", "MOVE":
+ status, err = h.handleCopyMove(w, r)
+ case "LOCK":
+ status, err = h.handleLock(w, r)
+ case "UNLOCK":
+ status, err = h.handleUnlock(w, r)
+ case "PROPFIND":
+ status, err = h.handlePropfind(w, r)
+ case "PROPPATCH":
+ status, err = h.handleProppatch(w, r)
+ }
+ }
+
+ if status != 0 {
+ w.WriteHeader(status)
+ if status != http.StatusNoContent {
+ w.Write([]byte(StatusText(status)))
+ }
+ }
+ if h.Logger != nil {
+ h.Logger(r, err)
+ }
+}
+
+func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
+ token, err = h.LockSystem.Create(now, LockDetails{
+ Root: root,
+ Duration: infiniteTimeout,
+ ZeroDepth: true,
+ })
+ if err != nil {
+ if err == ErrLocked {
+ return "", StatusLocked, err
+ }
+ return "", http.StatusInternalServerError, err
+ }
+ return token, 0, nil
+}
+
+func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
+ hdr := r.Header.Get("If")
+ if hdr == "" {
+ // An empty If header means that the client hasn't previously created locks.
+ // Even if this client doesn't care about locks, we still need to check that
+ // the resources aren't locked by another client, so we create temporary
+ // locks that would conflict with another client's locks. These temporary
+ // locks are unlocked at the end of the HTTP request.
+ now, srcToken, dstToken := time.Now(), "", ""
+ if src != "" {
+ srcToken, status, err = h.lock(now, src)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ if dst != "" {
+ dstToken, status, err = h.lock(now, dst)
+ if err != nil {
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ return nil, status, err
+ }
+ }
+
+ return func() {
+ if dstToken != "" {
+ h.LockSystem.Unlock(now, dstToken)
+ }
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ }, 0, nil
+ }
+
+ ih, ok := parseIfHeader(hdr)
+ if !ok {
+ return nil, http.StatusBadRequest, errInvalidIfHeader
+ }
+ // ih is a disjunction (OR) of ifLists, so any ifList will do.
+ for _, l := range ih.lists {
+ lsrc := l.resourceTag
+ if lsrc == "" {
+ lsrc = src
+ } else {
+ u, err := url.Parse(lsrc)
+ if err != nil {
+ continue
+ }
+ if u.Host != r.Host {
+ continue
+ }
+ lsrc, status, err = h.stripPrefix(u.Path)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
+ if err == ErrConfirmationFailed {
+ continue
+ }
+ if err != nil {
+ return nil, http.StatusInternalServerError, err
+ }
+ return release, 0, nil
+ }
+ // Section 10.4.1 says that "If this header is evaluated and all state lists
+ // fail, then the request must fail with a 412 (Precondition Failed) status."
+ // We follow the spec even though the cond_put_corrupt_token test case from
+ // the litmus test warns on seeing a 412 instead of a 423 (Locked).
+ return nil, http.StatusPreconditionFailed, ErrLocked
+}
+
+func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ allow := "OPTIONS, LOCK, PUT, MKCOL"
+ if fi, err := h.FileSystem.Stat(reqPath); err == nil {
+ if fi.IsDir() {
+ allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
+ } else {
+ allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
+ }
+ }
+ w.Header().Set("Allow", allow)
+ // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
+ w.Header().Set("DAV", "1, 2")
+ // http://msdn.microsoft.com/en-au/library/cc250217.aspx
+ w.Header().Set("MS-Author-Via", "DAV")
+ return 0, nil
+}
+
+func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ // TODO: check locks for read-only access??
+ f, err := h.FileSystem.OpenFile(reqPath, os.O_RDONLY, 0)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ if fi.IsDir() {
+ return http.StatusMethodNotAllowed, nil
+ }
+ etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ // Let ServeContent determine the Content-Type header.
+ http.ServeContent(w, r, reqPath, fi.ModTime(), f)
+ return 0, nil
+}
+
+func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // TODO: return MultiStatus where appropriate.
+
+ // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
+ // returns nil (no error)." WebDAV semantics are that it should return a
+ // "404 Not Found". We therefore have to Stat before we RemoveAll.
+ if _, err := h.FileSystem.Stat(reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ if err := h.FileSystem.RemoveAll(reqPath); err != nil {
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusNoContent, nil
+}
+
+func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+ // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
+ // comments in http.checkEtag.
+
+ f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ _, copyErr := io.Copy(f, r.Body)
+ fi, statErr := f.Stat()
+ closeErr := f.Close()
+ // TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
+ if copyErr != nil {
+ return http.StatusMethodNotAllowed, copyErr
+ }
+ if statErr != nil {
+ return http.StatusMethodNotAllowed, statErr
+ }
+ if closeErr != nil {
+ return http.StatusMethodNotAllowed, closeErr
+ }
+ etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ if r.ContentLength > 0 {
+ return http.StatusUnsupportedMediaType, nil
+ }
+ if err := h.FileSystem.Mkdir(reqPath, 0777); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ hdr := r.Header.Get("Destination")
+ if hdr == "" {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ u, err := url.Parse(hdr)
+ if err != nil {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ if u.Host != r.Host {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+
+ src, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+
+ dst, status, err := h.stripPrefix(u.Path)
+ if err != nil {
+ return status, err
+ }
+
+ if dst == "" {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+ if dst == src {
+ return http.StatusForbidden, errDestinationEqualsSource
+ }
+
+ if r.Method == "COPY" {
+ // Section 7.5.1 says that a COPY only needs to lock the destination,
+ // not both destination and source. Strictly speaking, this is racy,
+ // even though a COPY doesn't modify the source, if a concurrent
+ // operation modifies the source. However, the litmus test explicitly
+ // checks that COPYing a locked-by-another source is OK.
+ release, status, err := h.confirmLocks(r, "", dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.8.3 says that "The COPY method on a collection without a Depth
+ // header must act as if a Depth header with value "infinity" was included".
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.8.3 says that "A client may submit a Depth header on a
+ // COPY on a collection with a value of "0" or "infinity"."
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return copyFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
+ }
+
+ release, status, err := h.confirmLocks(r, src, dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.9.2 says that "The MOVE method on a collection must act as if
+ // a "Depth: infinity" header was used on it. A client must not submit a
+ // Depth header on a MOVE on a collection with any value but "infinity"."
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ if parseDepth(hdr) != infiniteDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return moveFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
+}
+
+func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
+ duration, err := parseTimeout(r.Header.Get("Timeout"))
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ li, status, err := readLockInfo(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ token, ld, now, created := "", LockDetails{}, time.Now(), false
+ if li == (lockInfo{}) {
+ // An empty lockInfo means to refresh the lock.
+ ih, ok := parseIfHeader(r.Header.Get("If"))
+ if !ok {
+ return http.StatusBadRequest, errInvalidIfHeader
+ }
+ if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
+ token = ih.lists[0].conditions[0].Token
+ }
+ if token == "" {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ ld, err = h.LockSystem.Refresh(now, token, duration)
+ if err != nil {
+ if err == ErrNoSuchLock {
+ return http.StatusPreconditionFailed, err
+ }
+ return http.StatusInternalServerError, err
+ }
+
+ } else {
+ // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
+ // then the request MUST act as if a "Depth:infinity" had been submitted."
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.10.3 says that "Values other than 0 or infinity must not be
+ // used with the Depth header on a LOCK method".
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ld = LockDetails{
+ Root: reqPath,
+ Duration: duration,
+ OwnerXML: li.Owner.InnerXML,
+ ZeroDepth: depth == 0,
+ }
+ token, err = h.LockSystem.Create(now, ld)
+ if err != nil {
+ if err == ErrLocked {
+ return StatusLocked, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer func() {
+ if retErr != nil {
+ h.LockSystem.Unlock(now, token)
+ }
+ }()
+
+ // Create the resource if it didn't previously exist.
+ if _, err := h.FileSystem.Stat(reqPath); err != nil {
+ f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ // TODO: detect missing intermediate dirs and return http.StatusConflict?
+ return http.StatusInternalServerError, err
+ }
+ f.Close()
+ created = true
+ }
+
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We add angle brackets.
+ w.Header().Set("Lock-Token", "<"+token+">")
+ }
+
+ w.Header().Set("Content-Type", "application/xml; charset=utf-8")
+ if created {
+ // This is "w.WriteHeader(http.StatusCreated)" and not "return
+ // http.StatusCreated, nil" because we write our own (XML) response to w
+ // and Handler.ServeHTTP would otherwise write "Created".
+ w.WriteHeader(http.StatusCreated)
+ }
+ writeLockInfo(w, token, ld)
+ return 0, nil
+}
+
+func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We strip its angle brackets.
+ t := r.Header.Get("Lock-Token")
+ if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ t = t[1 : len(t)-1]
+
+ switch err = h.LockSystem.Unlock(time.Now(), t); err {
+ case nil:
+ return http.StatusNoContent, err
+ case ErrForbidden:
+ return http.StatusForbidden, err
+ case ErrLocked:
+ return StatusLocked, err
+ case ErrNoSuchLock:
+ return http.StatusConflict, err
+ default:
+ return http.StatusInternalServerError, err
+ }
+}
+
+func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ fi, err := h.FileSystem.Stat(reqPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth == invalidDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ pf, status, err := readPropfind(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ mw := multistatusWriter{w: w}
+
+ walkFn := func(reqPath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ var pstats []Propstat
+ if pf.Propname != nil {
+ pnames, err := propnames(h.FileSystem, h.LockSystem, reqPath)
+ if err != nil {
+ return err
+ }
+ pstat := Propstat{Status: http.StatusOK}
+ for _, xmlname := range pnames {
+ pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
+ }
+ pstats = append(pstats, pstat)
+ } else if pf.Allprop != nil {
+ pstats, err = allprop(h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ } else {
+ pstats, err = props(h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ }
+ if err != nil {
+ return err
+ }
+ return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))
+ }
+
+ walkErr := walkFS(h.FileSystem, depth, reqPath, fi, walkFn)
+ closeErr := mw.close()
+ if walkErr != nil {
+ return http.StatusInternalServerError, walkErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ if _, err := h.FileSystem.Stat(reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ patches, status, err := readProppatch(r.Body)
+ if err != nil {
+ return status, err
+ }
+ pstats, err := patch(h.FileSystem, h.LockSystem, reqPath, patches)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ mw := multistatusWriter{w: w}
+ writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
+ closeErr := mw.close()
+ if writeErr != nil {
+ return http.StatusInternalServerError, writeErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func makePropstatResponse(href string, pstats []Propstat) *response {
+ resp := response{
+ Href: []string{(&url.URL{Path: href}).EscapedPath()},
+ Propstat: make([]propstat, 0, len(pstats)),
+ }
+ for _, p := range pstats {
+ var xmlErr *xmlError
+ if p.XMLError != "" {
+ xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
+ }
+ resp.Propstat = append(resp.Propstat, propstat{
+ Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
+ Prop: p.Props,
+ ResponseDescription: p.ResponseDescription,
+ Error: xmlErr,
+ })
+ }
+ return &resp
+}
+
+const (
+ infiniteDepth = -1
+ invalidDepth = -2
+)
+
+// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
+// infiniteDepth. Parsing any other string returns invalidDepth.
+//
+// Different WebDAV methods have further constraints on valid depths:
+// - PROPFIND has no further restrictions, as per section 9.1.
+// - COPY accepts only "0" or "infinity", as per section 9.8.3.
+// - MOVE accepts only "infinity", as per section 9.9.2.
+// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
+// These constraints are enforced by the handleXxx methods.
+func parseDepth(s string) int {
+ switch s {
+ case "0":
+ return 0
+ case "1":
+ return 1
+ case "infinity":
+ return infiniteDepth
+ }
+ return invalidDepth
+}
+
+// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+const (
+ StatusMulti = 207
+ StatusUnprocessableEntity = 422
+ StatusLocked = 423
+ StatusFailedDependency = 424
+ StatusInsufficientStorage = 507
+)
+
+func StatusText(code int) string {
+ switch code {
+ case StatusMulti:
+ return "Multi-Status"
+ case StatusUnprocessableEntity:
+ return "Unprocessable Entity"
+ case StatusLocked:
+ return "Locked"
+ case StatusFailedDependency:
+ return "Failed Dependency"
+ case StatusInsufficientStorage:
+ return "Insufficient Storage"
+ }
+ return http.StatusText(code)
+}
+
+var (
+ errDestinationEqualsSource = errors.New("webdav: destination equals source")
+ errDirectoryNotEmpty = errors.New("webdav: directory not empty")
+ errInvalidDepth = errors.New("webdav: invalid depth")
+ errInvalidDestination = errors.New("webdav: invalid destination")
+ errInvalidIfHeader = errors.New("webdav: invalid If header")
+ errInvalidLockInfo = errors.New("webdav: invalid lock info")
+ errInvalidLockToken = errors.New("webdav: invalid lock token")
+ errInvalidPropfind = errors.New("webdav: invalid propfind")
+ errInvalidProppatch = errors.New("webdav: invalid proppatch")
+ errInvalidResponse = errors.New("webdav: invalid response")
+ errInvalidTimeout = errors.New("webdav: invalid timeout")
+ errNoFileSystem = errors.New("webdav: no file system")
+ errNoLockSystem = errors.New("webdav: no lock system")
+ errNotADirectory = errors.New("webdav: not a directory")
+ errPrefixMismatch = errors.New("webdav: prefix mismatch")
+ errRecursionTooDeep = errors.New("webdav: recursion too deep")
+ errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
+ errUnsupportedMethod = errors.New("webdav: unsupported method")
+)
diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go
new file mode 100644
index 000000000..b068aab32
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/webdav_test.go
@@ -0,0 +1,285 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+)
+
+// TODO: add tests to check XML responses with the expected prefix path
+func TestPrefix(t *testing.T) {
+ const dst, blah = "Destination", "blah blah blah"
+
+ // createLockBody comes from the example in Section 9.10.7.
+ const createLockBody = `<?xml version="1.0" encoding="utf-8" ?>
+ <D:lockinfo xmlns:D='DAV:'>
+ <D:lockscope><D:exclusive/></D:lockscope>
+ <D:locktype><D:write/></D:locktype>
+ <D:owner>
+ <D:href>http://example.org/~ejw/contact.html</D:href>
+ </D:owner>
+ </D:lockinfo>
+ `
+
+ do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) {
+ var bodyReader io.Reader
+ if body != "" {
+ bodyReader = strings.NewReader(body)
+ }
+ req, err := http.NewRequest(method, urlStr, bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ for len(headers) >= 2 {
+ req.Header.Add(headers[0], headers[1])
+ headers = headers[2:]
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != wantStatusCode {
+ return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode)
+ }
+ return res.Header, nil
+ }
+
+ prefixes := []string{
+ "/",
+ "/a/",
+ "/a/b/",
+ "/a/b/c/",
+ }
+ for _, prefix := range prefixes {
+ fs := NewMemFS()
+ h := &Handler{
+ FileSystem: fs,
+ LockSystem: NewMemLS(),
+ }
+ mux := http.NewServeMux()
+ if prefix != "/" {
+ h.Prefix = prefix
+ }
+ mux.Handle(prefix, h)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ // The script is:
+ // MKCOL /a
+ // MKCOL /a/b
+ // PUT /a/b/c
+ // COPY /a/b/c /a/b/d
+ // MKCOL /a/b/e
+ // MOVE /a/b/d /a/b/e/f
+ // LOCK /a/b/e/g
+ // PUT /a/b/e/g
+ // which should yield the (possibly stripped) filenames /a/b/c,
+ // /a/b/e/f and /a/b/e/g, plus their parent directories.
+
+ wantA := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusMovedPermanently,
+ "/a/b/": http.StatusNotFound,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err)
+ continue
+ }
+
+ wantB := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusMovedPermanently,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err)
+ continue
+ }
+
+ wantC := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusMovedPermanently,
+ }[prefix]
+ if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil {
+ t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err)
+ continue
+ }
+
+ wantD := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusMovedPermanently,
+ }[prefix]
+ if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil {
+ t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err)
+ continue
+ }
+
+ wantE := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err)
+ continue
+ }
+
+ wantF := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil {
+ t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err)
+ continue
+ }
+
+ var lockToken string
+ wantG := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil {
+ t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err)
+ continue
+ } else {
+ lockToken = h.Get("Lock-Token")
+ }
+
+ ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken)
+ wantH := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil {
+ t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err)
+ continue
+ }
+
+ got, err := find(nil, fs, "/")
+ if err != nil {
+ t.Errorf("prefix=%-9q find: %v", prefix, err)
+ continue
+ }
+ sort.Strings(got)
+ want := map[string][]string{
+ "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"},
+ "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"},
+ "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"},
+ "/a/b/c/": {"/"},
+ }[prefix]
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want)
+ continue
+ }
+ }
+}
+
+func TestFilenameEscape(t *testing.T) {
+ re := regexp.MustCompile(`<D:href>([^<]*)</D:href>`)
+ do := func(method, urlStr string) (string, error) {
+ req, err := http.NewRequest(method, urlStr, nil)
+ if err != nil {
+ return "", err
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer res.Body.Close()
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", err
+ }
+ m := re.FindStringSubmatch(string(b))
+ if len(m) != 2 {
+ return "", errors.New("D:href not found")
+ }
+
+ return m[1], nil
+ }
+
+ testCases := []struct {
+ name, want string
+ }{{
+ name: `/foo%bar`,
+ want: `/foo%25bar`,
+ }, {
+ name: `/ã“ã‚“ã«ã¡ã‚世界`,
+ want: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`,
+ }, {
+ name: `/Program Files/`,
+ want: `/Program%20Files`,
+ }, {
+ name: `/go+lang`,
+ want: `/go+lang`,
+ }, {
+ name: `/go&lang`,
+ want: `/go&amp;lang`,
+ }}
+ fs := NewMemFS()
+ for _, tc := range testCases {
+ if strings.HasSuffix(tc.name, "/") {
+ if err := fs.Mkdir(tc.name, 0755); err != nil {
+ t.Fatalf("name=%q: Mkdir: %v", tc.name, err)
+ }
+ } else {
+ f, err := fs.OpenFile(tc.name, os.O_CREATE, 0644)
+ if err != nil {
+ t.Fatalf("name=%q: OpenFile: %v", tc.name, err)
+ }
+ f.Close()
+ }
+ }
+
+ srv := httptest.NewServer(&Handler{
+ FileSystem: fs,
+ LockSystem: NewMemLS(),
+ })
+ defer srv.Close()
+
+ u, err := url.Parse(srv.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tc := range testCases {
+ u.Path = tc.name
+ got, err := do("PROPFIND", u.String())
+ if err != nil {
+ t.Errorf("name=%q: PROPFIND: %v", tc.name, err)
+ continue
+ }
+ if got != tc.want {
+ t.Errorf("name=%q: got %q, want %q", tc.name, got, tc.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/xml.go b/vendor/golang.org/x/net/webdav/xml.go
new file mode 100644
index 000000000..790dc8169
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/xml.go
@@ -0,0 +1,519 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The XML encoding is covered by Section 14.
+// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ // As of https://go-review.googlesource.com/#/c/12772/ which was submitted
+ // in July 2015, this package uses an internal fork of the standard
+ // library's encoding/xml package, due to changes in the way namespaces
+ // were encoded. Such changes were introduced in the Go 1.5 cycle, but were
+ // rolled back in response to https://github.com/golang/go/issues/11841
+ //
+ // However, this package's exported API, specifically the Property and
+ // DeadPropsHolder types, need to refer to the standard library's version
+ // of the xml.Name type, as code that imports this package cannot refer to
+ // the internal version.
+ //
+ // This file therefore imports both the internal and external versions, as
+ // ixml and xml, and converts between them.
+ //
+ // In the long term, this package should use the standard library's version
+ // only, and the internal fork deleted, once
+ // https://github.com/golang/go/issues/13400 is resolved.
+ ixml "golang.org/x/net/webdav/internal/xml"
+)
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
+type lockInfo struct {
+ XMLName ixml.Name `xml:"lockinfo"`
+ Exclusive *struct{} `xml:"lockscope>exclusive"`
+ Shared *struct{} `xml:"lockscope>shared"`
+ Write *struct{} `xml:"locktype>write"`
+ Owner owner `xml:"owner"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
+type owner struct {
+ InnerXML string `xml:",innerxml"`
+}
+
+func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
+ c := &countingReader{r: r}
+ if err = ixml.NewDecoder(c).Decode(&li); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to refresh the lock.
+ // http://www.webdav.org/specs/rfc4918.html#refreshing-locks
+ return lockInfo{}, 0, nil
+ }
+ err = errInvalidLockInfo
+ }
+ return lockInfo{}, http.StatusBadRequest, err
+ }
+ // We only support exclusive (non-shared) write locks. In practice, these are
+ // the only types of locks that seem to matter.
+ if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
+ return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
+ }
+ return li, 0, nil
+}
+
+type countingReader struct {
+ n int
+ r io.Reader
+}
+
+func (c *countingReader) Read(p []byte) (int, error) {
+ n, err := c.r.Read(p)
+ c.n += n
+ return n, err
+}
+
+func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
+ depth := "infinity"
+ if ld.ZeroDepth {
+ depth = "0"
+ }
+ timeout := ld.Duration / time.Second
+ return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
+ "<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
+ " <D:locktype><D:write/></D:locktype>\n"+
+ " <D:lockscope><D:exclusive/></D:lockscope>\n"+
+ " <D:depth>%s</D:depth>\n"+
+ " <D:owner>%s</D:owner>\n"+
+ " <D:timeout>Second-%d</D:timeout>\n"+
+ " <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
+ " <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
+ "</D:activelock></D:lockdiscovery></D:prop>",
+ depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
+ )
+}
+
+func escape(s string) string {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"', '&', '\'', '<', '>':
+ b := bytes.NewBuffer(nil)
+ ixml.EscapeText(b, []byte(s))
+ return b.String()
+ }
+ }
+ return s
+}
+
+// Next returns the next token, if any, in the XML stream of d.
+// RFC 4918 requires to ignore comments, processing instructions
+// and directives.
+// http://www.webdav.org/specs/rfc4918.html#property_values
+// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
+func next(d *ixml.Decoder) (ixml.Token, error) {
+ for {
+ t, err := d.Token()
+ if err != nil {
+ return t, err
+ }
+ switch t.(type) {
+ case ixml.Comment, ixml.Directive, ixml.ProcInst:
+ continue
+ default:
+ return t, nil
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
+type propfindProps []xml.Name
+
+// UnmarshalXML appends the property names enclosed within start to pn.
+//
+// It returns an error if start does not contain any properties or if
+// properties contain values. Character data between properties is ignored.
+func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch t.(type) {
+ case ixml.EndElement:
+ if len(*pn) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ name := t.(ixml.StartElement).Name
+ t, err = next(d)
+ if err != nil {
+ return err
+ }
+ if _, ok := t.(ixml.EndElement); !ok {
+ return fmt.Errorf("unexpected token %T", t)
+ }
+ *pn = append(*pn, xml.Name(name))
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
+type propfind struct {
+ XMLName ixml.Name `xml:"DAV: propfind"`
+ Allprop *struct{} `xml:"DAV: allprop"`
+ Propname *struct{} `xml:"DAV: propname"`
+ Prop propfindProps `xml:"DAV: prop"`
+ Include propfindProps `xml:"DAV: include"`
+}
+
+func readPropfind(r io.Reader) (pf propfind, status int, err error) {
+ c := countingReader{r: r}
+ if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to propfind allprop.
+ // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+ return propfind{Allprop: new(struct{})}, 0, nil
+ }
+ err = errInvalidPropfind
+ }
+ return propfind{}, http.StatusBadRequest, err
+ }
+
+ if pf.Allprop == nil && pf.Include != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Prop != nil && pf.Propname != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ return pf, 0, nil
+}
+
+// Property represents a single DAV resource property as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
+type Property struct {
+ // XMLName is the fully qualified name that identifies this property.
+ XMLName xml.Name
+
+ // Lang is an optional xml:lang attribute.
+ Lang string `xml:"xml:lang,attr,omitempty"`
+
+ // InnerXML contains the XML representation of the property value.
+ // See http://www.webdav.org/specs/rfc4918.html#property_values
+ //
+ // Property values of complex type or mixed-content must have fully
+ // expanded XML namespaces or be self-contained with according
+ // XML namespace declarations. They must not rely on any XML
+ // namespace declarations within the scope of the XML document,
+ // even including the DAV: namespace.
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// ixmlProperty is the same as the Property type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlProperty struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
+// See multistatusWriter for the "D:" namespace prefix.
+type xmlError struct {
+ XMLName ixml.Name `xml:"D:error"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+// See multistatusWriter for the "D:" namespace prefix.
+type propstat struct {
+ Prop []Property `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlPropstat struct {
+ Prop []ixmlProperty `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
+// before encoding. See multistatusWriter.
+func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
+ // Convert from a propstat to an ixmlPropstat.
+ ixmlPs := ixmlPropstat{
+ Prop: make([]ixmlProperty, len(ps.Prop)),
+ Status: ps.Status,
+ Error: ps.Error,
+ ResponseDescription: ps.ResponseDescription,
+ }
+ for k, prop := range ps.Prop {
+ ixmlPs.Prop[k] = ixmlProperty{
+ XMLName: ixml.Name(prop.XMLName),
+ Lang: prop.Lang,
+ InnerXML: prop.InnerXML,
+ }
+ }
+
+ for k, prop := range ixmlPs.Prop {
+ if prop.XMLName.Space == "DAV:" {
+ prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
+ ixmlPs.Prop[k] = prop
+ }
+ }
+ // Distinct type to avoid infinite recursion of MarshalXML.
+ type newpropstat ixmlPropstat
+ return e.EncodeElement(newpropstat(ixmlPs), start)
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
+// See multistatusWriter for the "D:" namespace prefix.
+type response struct {
+ XMLName ixml.Name `xml:"D:response"`
+ Href []string `xml:"D:href"`
+ Propstat []propstat `xml:"D:propstat"`
+ Status string `xml:"D:status,omitempty"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MultistatusWriter marshals one or more Responses into a XML
+// multistatus response.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
+// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
+// "DAV:" on this element, is prepended on the nested response, as well as on all
+// its nested elements. All property names in the DAV: namespace are prefixed as
+// well. This is because some versions of Mini-Redirector (on windows 7) ignore
+// elements with a default namespace (no prefixed namespace). A less intrusive fix
+// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
+type multistatusWriter struct {
+ // ResponseDescription contains the optional responsedescription
+ // of the multistatus XML element. Only the latest content before
+ // close will be emitted. Empty response descriptions are not
+ // written.
+ responseDescription string
+
+ w http.ResponseWriter
+ enc *ixml.Encoder
+}
+
+// Write validates and emits a DAV response as part of a multistatus response
+// element.
+//
+// It sets the HTTP status code of its underlying http.ResponseWriter to 207
+// (Multi-Status) and populates the Content-Type header. If r is the
+// first, valid response to be written, Write prepends the XML representation
+// of r with a multistatus tag. Callers must call close after the last response
+// has been written.
+func (w *multistatusWriter) write(r *response) error {
+ switch len(r.Href) {
+ case 0:
+ return errInvalidResponse
+ case 1:
+ if len(r.Propstat) > 0 != (r.Status == "") {
+ return errInvalidResponse
+ }
+ default:
+ if len(r.Propstat) > 0 || r.Status == "" {
+ return errInvalidResponse
+ }
+ }
+ err := w.writeHeader()
+ if err != nil {
+ return err
+ }
+ return w.enc.Encode(r)
+}
+
+// writeHeader writes a XML multistatus start element on w's underlying
+// http.ResponseWriter and returns the result of the write operation.
+// After the first write attempt, writeHeader becomes a no-op.
+func (w *multistatusWriter) writeHeader() error {
+ if w.enc != nil {
+ return nil
+ }
+ w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
+ w.w.WriteHeader(StatusMulti)
+ _, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
+ if err != nil {
+ return err
+ }
+ w.enc = ixml.NewEncoder(w.w)
+ return w.enc.EncodeToken(ixml.StartElement{
+ Name: ixml.Name{
+ Space: "DAV:",
+ Local: "multistatus",
+ },
+ Attr: []ixml.Attr{{
+ Name: ixml.Name{Space: "xmlns", Local: "D"},
+ Value: "DAV:",
+ }},
+ })
+}
+
+// Close completes the marshalling of the multistatus response. It returns
+// an error if the multistatus response could not be completed. If both the
+// return value and field enc of w are nil, then no multistatus response has
+// been written.
+func (w *multistatusWriter) close() error {
+ if w.enc == nil {
+ return nil
+ }
+ var end []ixml.Token
+ if w.responseDescription != "" {
+ name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
+ end = append(end,
+ ixml.StartElement{Name: name},
+ ixml.CharData(w.responseDescription),
+ ixml.EndElement{Name: name},
+ )
+ }
+ end = append(end, ixml.EndElement{
+ Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
+ })
+ for _, t := range end {
+ err := w.enc.EncodeToken(t)
+ if err != nil {
+ return err
+ }
+ }
+ return w.enc.Flush()
+}
+
+var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
+
+func xmlLang(s ixml.StartElement, d string) string {
+ for _, attr := range s.Attr {
+ if attr.Name == xmlLangName {
+ return attr.Value
+ }
+ }
+ return d
+}
+
+type xmlValue []byte
+
+func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ // The XML value of a property can be arbitrary, mixed-content XML.
+ // To make sure that the unmarshalled value contains all required
+ // namespaces, we encode all the property value XML tokens into a
+ // buffer. This forces the encoder to redeclare any used namespaces.
+ var b bytes.Buffer
+ e := ixml.NewEncoder(&b)
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
+ break
+ }
+ if err = e.EncodeToken(t); err != nil {
+ return err
+ }
+ }
+ err := e.Flush()
+ if err != nil {
+ return err
+ }
+ *v = b.Bytes()
+ return nil
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
+type proppatchProps []Property
+
+// UnmarshalXML appends the property names and values enclosed within start
+// to ps.
+//
+// An xml:lang attribute that is defined either on the DAV:prop or property
+// name XML element is propagated to the property's Lang field.
+//
+// UnmarshalXML returns an error if start does not contain any properties or if
+// property values contain syntactically incorrect XML.
+func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ lang := xmlLang(start, "")
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch elem := t.(type) {
+ case ixml.EndElement:
+ if len(*ps) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ p := Property{
+ XMLName: xml.Name(t.(ixml.StartElement).Name),
+ Lang: xmlLang(t.(ixml.StartElement), lang),
+ }
+ err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
+ if err != nil {
+ return err
+ }
+ *ps = append(*ps, p)
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
+type setRemove struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ Prop proppatchProps `xml:"DAV: prop"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
+type propertyupdate struct {
+ XMLName ixml.Name `xml:"DAV: propertyupdate"`
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ SetRemove []setRemove `xml:",any"`
+}
+
+func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
+ var pu propertyupdate
+ if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
+ return nil, http.StatusBadRequest, err
+ }
+ for _, op := range pu.SetRemove {
+ remove := false
+ switch op.XMLName {
+ case ixml.Name{Space: "DAV:", Local: "set"}:
+ // No-op.
+ case ixml.Name{Space: "DAV:", Local: "remove"}:
+ for _, p := range op.Prop {
+ if len(p.InnerXML) > 0 {
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ }
+ remove = true
+ default:
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
+ }
+ return patches, 0, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go
new file mode 100644
index 000000000..a3d9e1ed8
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/xml_test.go
@@ -0,0 +1,906 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ ixml "golang.org/x/net/webdav/internal/xml"
+)
+
+func TestReadLockInfo(t *testing.T) {
+ // The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ input string
+ wantLI lockInfo
+ wantStatus int
+ }{{
+ "bad: junk",
+ "xxx",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: invalid owner XML",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href> no end tag \n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: invalid UTF-8",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href> \xff </D:href>\n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: unfinished XML #1",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: unfinished XML #2",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "good: empty",
+ "",
+ lockInfo{},
+ 0,
+ }, {
+ "good: plain-text owner",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>gopher</D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{
+ XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
+ Exclusive: new(struct{}),
+ Write: new(struct{}),
+ Owner: owner{
+ InnerXML: "gopher",
+ },
+ },
+ 0,
+ }, {
+ "section 9.10.7",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href>http://example.org/~ejw/contact.html</D:href>\n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{
+ XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
+ Exclusive: new(struct{}),
+ Write: new(struct{}),
+ Owner: owner{
+ InnerXML: "\n <D:href>http://example.org/~ejw/contact.html</D:href>\n ",
+ },
+ },
+ 0,
+ }}
+
+ for _, tc := range testCases {
+ li, status, err := readLockInfo(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus {
+ t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v",
+ tc.desc, li, status, tc.wantLI, tc.wantStatus)
+ continue
+ }
+ }
+}
+
+func TestReadPropfind(t *testing.T) {
+ testCases := []struct {
+ desc string
+ input string
+ wantPF propfind
+ wantStatus int
+ }{{
+ desc: "propfind: propname",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Propname: new(struct{}),
+ },
+ }, {
+ desc: "propfind: empty body means allprop",
+ input: "",
+ wantPF: propfind{
+ Allprop: new(struct{}),
+ },
+ }, {
+ desc: "propfind: allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ },
+ }, {
+ desc: "propfind: allprop followed by include",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ " <A:include><A:displayname/></A:include>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: include followed by allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:include><A:displayname/></A:include>\n" +
+ " <A:allprop/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:displayname/></A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: prop with ignored comments",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>\n" +
+ " <!-- ignore -->\n" +
+ " <A:displayname><!-- ignore --></A:displayname>\n" +
+ " </A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind with ignored whitespace",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop> <A:displayname/></A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind with ignored mixed-content",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>foo<A:displayname/>bar</A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propname with ignored element (section A.4)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>\n" +
+ " <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Propname: new(struct{}),
+ },
+ }, {
+ desc: "propfind: bad: junk",
+ input: "xxx",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: propname and allprop (section A.3)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>" +
+ " <A:allprop/>" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: propname and prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:displayname/></A:prop>\n" +
+ " <A:propname/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: allprop and prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ " <A:prop><A:foo/><A:/prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: empty propfind with ignored element (section A.4)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <E:expired-props/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: empty prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: prop with just chardata",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>foo</A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: interrupted prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo></A:prop>\n",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: malformed end element prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo/></A:bar></A:prop>\n",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: property with chardata value",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo>bar</A:foo></A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: property with whitespace value",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo> </A:foo></A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: include without allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:include><A:foo/></A:include>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }}
+
+ for _, tc := range testCases {
+ pf, status, err := readPropfind(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus {
+ t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v",
+ tc.desc, pf, status, tc.wantPF, tc.wantStatus)
+ continue
+ }
+ }
+}
+
+func TestMultistatusWriter(t *testing.T) {
+ ///The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ responses []response
+ respdesc string
+ writeHeader bool
+ wantXML string
+ wantCode int
+ wantErr error
+ }{{
+ desc: "section 9.2.2 (failed dependency)",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://ns.example.com/",
+ Local: "Authors",
+ },
+ }},
+ Status: "HTTP/1.1 424 Failed Dependency",
+ }, {
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://ns.example.com/",
+ Local: "Copyright-Owner",
+ },
+ }},
+ Status: "HTTP/1.1 409 Conflict",
+ }},
+ ResponseDescription: "Copyright Owner cannot be deleted or altered.",
+ }},
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <Authors xmlns="http://ns.example.com/"></Authors>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 424 Failed Dependency</status>` +
+ ` </propstat>` +
+ ` <propstat xmlns="DAV:">` +
+ ` <prop>` +
+ ` <Copyright-Owner xmlns="http://ns.example.com/"></Copyright-Owner>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 409 Conflict</status>` +
+ ` </propstat>` +
+ ` <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` +
+ `</response>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "section 9.6.2 (lock-token-submitted)",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Status: "HTTP/1.1 423 Locked",
+ Error: &xmlError{
+ InnerXML: []byte(`<lock-token-submitted xmlns="DAV:"/>`),
+ },
+ }},
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <status>HTTP/1.1 423 Locked</status>` +
+ ` <error><lock-token-submitted xmlns="DAV:"/></error>` +
+ ` </response>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "section 9.1.3",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"},
+ InnerXML: []byte(`` +
+ `<BoxType xmlns="http://ns.example.com/boxschema/">` +
+ `Box type A` +
+ `</BoxType>`),
+ }, {
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"},
+ InnerXML: []byte(`` +
+ `<Name xmlns="http://ns.example.com/boxschema/">` +
+ `J.J. Johnson` +
+ `</Name>`),
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }, {
+ Prop: []Property{{
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"},
+ }, {
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"},
+ }},
+ Status: "HTTP/1.1 403 Forbidden",
+ ResponseDescription: "The user does not have access to the DingALing property.",
+ }},
+ }},
+ respdesc: "There has been an access violation error.",
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:" xmlns:B="http://ns.example.com/boxschema/">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` +
+ ` <B:author><B:Name>J.J. Johnson</B:Name></B:author>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 200 OK</status>` +
+ ` </propstat>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <B:DingALing/>` +
+ ` <B:Random/>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 403 Forbidden</status>` +
+ ` <responsedescription>The user does not have access to the DingALing property.</responsedescription>` +
+ ` </propstat>` +
+ ` </response>` +
+ ` <responsedescription>There has been an access violation error.</responsedescription>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "no response written",
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "no response written (with description)",
+ respdesc: "too bad",
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "empty multistatus with header",
+ writeHeader: true,
+ wantXML: `<multistatus xmlns="DAV:"></multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "bad: no href",
+ responses: []response{{
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: multiple hrefs and no status",
+ responses: []response{{
+ Href: []string{"http://example.com/foo", "http://example.com/bar"},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: one href and no propstat",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: status with one href and propstat",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: multiple hrefs and propstat",
+ responses: []response{{
+ Href: []string{
+ "http://example.com/foo",
+ "http://example.com/bar",
+ },
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }}
+
+ n := xmlNormalizer{omitWhitespace: true}
+loop:
+ for _, tc := range testCases {
+ rec := httptest.NewRecorder()
+ w := multistatusWriter{w: rec, responseDescription: tc.respdesc}
+ if tc.writeHeader {
+ if err := w.writeHeader(); err != nil {
+ t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err)
+ continue
+ }
+ }
+ for _, r := range tc.responses {
+ if err := w.write(&r); err != nil {
+ if err != tc.wantErr {
+ t.Errorf("%s: got write error %v, want %v",
+ tc.desc, err, tc.wantErr)
+ }
+ continue loop
+ }
+ }
+ if err := w.close(); err != tc.wantErr {
+ t.Errorf("%s: got close error %v, want %v",
+ tc.desc, err, tc.wantErr)
+ continue
+ }
+ if rec.Code != tc.wantCode {
+ t.Errorf("%s: got HTTP status code %d, want %d\n",
+ tc.desc, rec.Code, tc.wantCode)
+ continue
+ }
+ gotXML := rec.Body.String()
+ eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML))
+ if err != nil {
+ t.Errorf("%s: equalXML: %v", tc.desc, err)
+ continue
+ }
+ if !eq {
+ t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML)
+ }
+ }
+}
+
+func TestReadProppatch(t *testing.T) {
+ ppStr := func(pps []Proppatch) string {
+ var outer []string
+ for _, pp := range pps {
+ var inner []string
+ for _, p := range pp.Props {
+ inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}",
+ p.XMLName, p.Lang, p.InnerXML))
+ }
+ outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}",
+ pp.Remove, strings.Join(inner, ", ")))
+ }
+ return "[" + strings.Join(outer, ", ") + "]"
+ }
+
+ testCases := []struct {
+ desc string
+ input string
+ wantPP []Proppatch
+ wantStatus int
+ }{{
+ desc: "proppatch: section 9.2 (with simple property value)",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:set>` +
+ ` <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` +
+ ` </D:set>` +
+ ` <D:remove>` +
+ ` <D:prop><Z:Copyright-Owner/></D:prop>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantPP: []Proppatch{{
+ Props: []Property{{
+ xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"},
+ "",
+ []byte(`somevalue`),
+ }},
+ }, {
+ Remove: true,
+ Props: []Property{{
+ xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"},
+ "",
+ nil,
+ }},
+ }},
+ }, {
+ desc: "proppatch: lang attribute on prop",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:">` +
+ ` <D:set>` +
+ ` <D:prop xml:lang="en">` +
+ ` <foo xmlns="http://example.com/ns"/>` +
+ ` </D:prop>` +
+ ` </D:set>` +
+ `</D:propertyupdate>`,
+ wantPP: []Proppatch{{
+ Props: []Property{{
+ xml.Name{Space: "http://example.com/ns", Local: "foo"},
+ "en",
+ nil,
+ }},
+ }},
+ }, {
+ desc: "bad: remove with value",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:remove>` +
+ ` <D:prop>` +
+ ` <Z:Authors>` +
+ ` <Z:Author>Jim Whitehead</Z:Author>` +
+ ` </Z:Authors>` +
+ ` </D:prop>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: empty propertyupdate",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: empty prop",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:remove>` +
+ ` <D:prop/>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }}
+
+ for _, tc := range testCases {
+ pp, status, err := readProppatch(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if status != tc.wantStatus {
+ t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus)
+ continue
+ }
+ if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus {
+ t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP))
+ }
+ }
+}
+
+func TestUnmarshalXMLValue(t *testing.T) {
+ testCases := []struct {
+ desc string
+ input string
+ wantVal string
+ }{{
+ desc: "simple char data",
+ input: "<root>foo</root>",
+ wantVal: "foo",
+ }, {
+ desc: "empty element",
+ input: "<root><foo/></root>",
+ wantVal: "<foo/>",
+ }, {
+ desc: "preserve namespace",
+ input: `<root><foo xmlns="bar"/></root>`,
+ wantVal: `<foo xmlns="bar"/>`,
+ }, {
+ desc: "preserve root element namespace",
+ input: `<root xmlns:bar="bar"><bar:foo/></root>`,
+ wantVal: `<foo xmlns="bar"/>`,
+ }, {
+ desc: "preserve whitespace",
+ input: "<root> \t </root>",
+ wantVal: " \t ",
+ }, {
+ desc: "preserve mixed content",
+ input: `<root xmlns="bar"> <foo>a<bam xmlns="baz"/> </foo> </root>`,
+ wantVal: ` <foo xmlns="bar">a<bam xmlns="baz"/> </foo> `,
+ }, {
+ desc: "section 9.2",
+ input: `` +
+ `<Z:Authors xmlns:Z="http://ns.example.com/z/">` +
+ ` <Z:Author>Jim Whitehead</Z:Author>` +
+ ` <Z:Author>Roy Fielding</Z:Author>` +
+ `</Z:Authors>`,
+ wantVal: `` +
+ ` <Author xmlns="http://ns.example.com/z/">Jim Whitehead</Author>` +
+ ` <Author xmlns="http://ns.example.com/z/">Roy Fielding</Author>`,
+ }, {
+ desc: "section 4.3.1 (mixed content)",
+ input: `` +
+ `<x:author ` +
+ ` xmlns:x='http://example.com/ns' ` +
+ ` xmlns:D="DAV:">` +
+ ` <x:name>Jane Doe</x:name>` +
+ ` <!-- Jane's contact info -->` +
+ ` <x:uri type='email'` +
+ ` added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` +
+ ` <x:uri type='web'` +
+ ` added='2005-11-27'>http://www.example.com</x:uri>` +
+ ` <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` +
+ ` Jane has been working way <h:em>too</h:em> long on the` +
+ ` long-awaited revision of <![CDATA[<RFC2518>]]>.` +
+ ` </x:notes>` +
+ `</x:author>`,
+ wantVal: `` +
+ ` <name xmlns="http://example.com/ns">Jane Doe</name>` +
+ ` ` +
+ ` <uri type='email'` +
+ ` xmlns="http://example.com/ns" ` +
+ ` added='2005-11-26'>mailto:jane.doe@example.com</uri>` +
+ ` <uri added='2005-11-27'` +
+ ` type='web'` +
+ ` xmlns="http://example.com/ns">http://www.example.com</uri>` +
+ ` <notes xmlns="http://example.com/ns" ` +
+ ` xmlns:h="http://www.w3.org/1999/xhtml">` +
+ ` Jane has been working way <h:em>too</h:em> long on the` +
+ ` long-awaited revision of &lt;RFC2518&gt;.` +
+ ` </notes>`,
+ }}
+
+ var n xmlNormalizer
+ for _, tc := range testCases {
+ d := ixml.NewDecoder(strings.NewReader(tc.input))
+ var v xmlValue
+ if err := d.Decode(&v); err != nil {
+ t.Errorf("%s: got error %v, want nil", tc.desc, err)
+ continue
+ }
+ eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal))
+ if err != nil {
+ t.Errorf("%s: equalXML: %v", tc.desc, err)
+ continue
+ }
+ if !eq {
+ t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal)
+ }
+ }
+}
+
+// xmlNormalizer normalizes XML.
+type xmlNormalizer struct {
+ // omitWhitespace instructs to ignore whitespace between element tags.
+ omitWhitespace bool
+ // omitComments instructs to ignore XML comments.
+ omitComments bool
+}
+
+// normalize writes the normalized XML content of r to w. It applies the
+// following rules
+//
+// * Rename namespace prefixes according to an internal heuristic.
+// * Remove unnecessary namespace declarations.
+// * Sort attributes in XML start elements in lexical order of their
+// fully qualified name.
+// * Remove XML directives and processing instructions.
+// * Remove CDATA between XML tags that only contains whitespace, if
+// instructed to do so.
+// * Remove comments, if instructed to do so.
+//
+func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error {
+ d := ixml.NewDecoder(r)
+ e := ixml.NewEncoder(w)
+ for {
+ t, err := d.Token()
+ if err != nil {
+ if t == nil && err == io.EOF {
+ break
+ }
+ return err
+ }
+ switch val := t.(type) {
+ case ixml.Directive, ixml.ProcInst:
+ continue
+ case ixml.Comment:
+ if n.omitComments {
+ continue
+ }
+ case ixml.CharData:
+ if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 {
+ continue
+ }
+ case ixml.StartElement:
+ start, _ := ixml.CopyToken(val).(ixml.StartElement)
+ attr := start.Attr[:0]
+ for _, a := range start.Attr {
+ if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" {
+ continue
+ }
+ attr = append(attr, a)
+ }
+ sort.Sort(byName(attr))
+ start.Attr = attr
+ t = start
+ }
+ err = e.EncodeToken(t)
+ if err != nil {
+ return err
+ }
+ }
+ return e.Flush()
+}
+
+// equalXML tests for equality of the normalized XML contents of a and b.
+func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) {
+ var buf bytes.Buffer
+ if err := n.normalize(&buf, a); err != nil {
+ return false, err
+ }
+ normA := buf.String()
+ buf.Reset()
+ if err := n.normalize(&buf, b); err != nil {
+ return false, err
+ }
+ normB := buf.String()
+ return normA == normB, nil
+}
+
+type byName []ixml.Attr
+
+func (a byName) Len() int { return len(a) }
+func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool {
+ if a[i].Name.Space != a[j].Name.Space {
+ return a[i].Name.Space < a[j].Name.Space
+ }
+ return a[i].Name.Local < a[j].Name.Local
+}
diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 000000000..20d1e1e38
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "crypto/tls"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ var client net.Conn
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+ switch config.Location.Scheme {
+ case "ws":
+ client, err = net.Dial("tcp", parseAuthority(config.Location))
+
+ case "wss":
+ client, err = tls.Dial("tcp", parseAuthority(config.Location), config.TlsConfig)
+
+ default:
+ err = ErrBadScheme
+ }
+ if err != nil {
+ goto Error
+ }
+
+ ws, err = NewClient(config, client)
+ if err != nil {
+ client.Close()
+ goto Error
+ }
+ return
+
+Error:
+ return nil, &DialError{config, err}
+}
diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go
new file mode 100644
index 000000000..72bb9d48e
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/exampledial_test.go
@@ -0,0 +1,31 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket_test
+
+import (
+ "fmt"
+ "log"
+
+ "golang.org/x/net/websocket"
+)
+
+// This example demonstrates a trivial client.
+func ExampleDial() {
+ origin := "http://localhost/"
+ url := "ws://localhost:12345/ws"
+ ws, err := websocket.Dial(url, "", origin)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := ws.Write([]byte("hello, world!\n")); err != nil {
+ log.Fatal(err)
+ }
+ var msg = make([]byte, 512)
+ var n int
+ if n, err = ws.Read(msg); err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("Received: %s.\n", msg[:n])
+}
diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go
new file mode 100644
index 000000000..f22a98fcd
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go
@@ -0,0 +1,26 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket_test
+
+import (
+ "io"
+ "net/http"
+
+ "golang.org/x/net/websocket"
+)
+
+// Echo the data received on the WebSocket.
+func EchoServer(ws *websocket.Conn) {
+ io.Copy(ws, ws)
+}
+
+// This example demonstrates a trivial echo server.
+func ExampleHandler() {
+ http.Handle("/echo", websocket.Handler(EchoServer))
+ err := http.ListenAndServe(":12345", nil)
+ if err != nil {
+ panic("ListenAndServe: " + err.Error())
+ }
+}
diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 000000000..8cffdd16c
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,583 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(ioutil.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifer from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go
new file mode 100644
index 000000000..9504aa2d3
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/hybi_test.go
@@ -0,0 +1,608 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+// Test the getNonceAccept function with values in
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+func TestSecWebSocketAccept(t *testing.T) {
+ nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==")
+ expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=")
+ accept, err := getNonceAccept(nonce)
+ if err != nil {
+ t.Errorf("getNonceAccept: returned error %v", err)
+ return
+ }
+ if !bytes.Equal(expected, accept) {
+ t.Errorf("getNonceAccept: expected %q got %q", expected, accept)
+ }
+}
+
+func TestHybiClientHandshake(t *testing.T) {
+ type test struct {
+ url, host string
+ }
+ tests := []test{
+ {"ws://server.example.com/chat", "server.example.com"},
+ {"ws://127.0.0.1/chat", "127.0.0.1"},
+ }
+ if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil {
+ tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"})
+ }
+
+ for _, tt := range tests {
+ var b bytes.Buffer
+ bw := bufio.NewWriter(&b)
+ br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols
+Upgrade: websocket
+Connection: Upgrade
+Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
+Sec-WebSocket-Protocol: chat
+
+`))
+ var err error
+ var config Config
+ config.Location, err = url.ParseRequestURI(tt.url)
+ if err != nil {
+ t.Fatal("location url", err)
+ }
+ config.Origin, err = url.ParseRequestURI("http://example.com")
+ if err != nil {
+ t.Fatal("origin url", err)
+ }
+ config.Protocol = append(config.Protocol, "chat")
+ config.Protocol = append(config.Protocol, "superchat")
+ config.Version = ProtocolVersionHybi13
+ config.handshakeData = map[string]string{
+ "key": "dGhlIHNhbXBsZSBub25jZQ==",
+ }
+ if err := hybiClientHandshake(&config, br, bw); err != nil {
+ t.Fatal("handshake", err)
+ }
+ req, err := http.ReadRequest(bufio.NewReader(&b))
+ if err != nil {
+ t.Fatal("read request", err)
+ }
+ if req.Method != "GET" {
+ t.Errorf("request method expected GET, but got %s", req.Method)
+ }
+ if req.URL.Path != "/chat" {
+ t.Errorf("request path expected /chat, but got %s", req.URL.Path)
+ }
+ if req.Proto != "HTTP/1.1" {
+ t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto)
+ }
+ if req.Host != tt.host {
+ t.Errorf("request host expected %s, but got %s", tt.host, req.Host)
+ }
+ var expectedHeader = map[string]string{
+ "Connection": "Upgrade",
+ "Upgrade": "websocket",
+ "Sec-Websocket-Key": config.handshakeData["key"],
+ "Origin": config.Origin.String(),
+ "Sec-Websocket-Protocol": "chat, superchat",
+ "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13),
+ }
+ for k, v := range expectedHeader {
+ if req.Header.Get(k) != v {
+ t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k))
+ }
+ }
+ }
+}
+
+func TestHybiClientHandshakeWithHeader(t *testing.T) {
+ b := bytes.NewBuffer([]byte{})
+ bw := bufio.NewWriter(b)
+ br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols
+Upgrade: websocket
+Connection: Upgrade
+Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
+Sec-WebSocket-Protocol: chat
+
+`))
+ var err error
+ config := new(Config)
+ config.Location, err = url.ParseRequestURI("ws://server.example.com/chat")
+ if err != nil {
+ t.Fatal("location url", err)
+ }
+ config.Origin, err = url.ParseRequestURI("http://example.com")
+ if err != nil {
+ t.Fatal("origin url", err)
+ }
+ config.Protocol = append(config.Protocol, "chat")
+ config.Protocol = append(config.Protocol, "superchat")
+ config.Version = ProtocolVersionHybi13
+ config.Header = http.Header(make(map[string][]string))
+ config.Header.Add("User-Agent", "test")
+
+ config.handshakeData = map[string]string{
+ "key": "dGhlIHNhbXBsZSBub25jZQ==",
+ }
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ t.Errorf("handshake failed: %v", err)
+ }
+ req, err := http.ReadRequest(bufio.NewReader(b))
+ if err != nil {
+ t.Fatalf("read request: %v", err)
+ }
+ if req.Method != "GET" {
+ t.Errorf("request method expected GET, but got %q", req.Method)
+ }
+ if req.URL.Path != "/chat" {
+ t.Errorf("request path expected /chat, but got %q", req.URL.Path)
+ }
+ if req.Proto != "HTTP/1.1" {
+ t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto)
+ }
+ if req.Host != "server.example.com" {
+ t.Errorf("request Host expected server.example.com, but got %v", req.Host)
+ }
+ var expectedHeader = map[string]string{
+ "Connection": "Upgrade",
+ "Upgrade": "websocket",
+ "Sec-Websocket-Key": config.handshakeData["key"],
+ "Origin": config.Origin.String(),
+ "Sec-Websocket-Protocol": "chat, superchat",
+ "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13),
+ "User-Agent": "test",
+ }
+ for k, v := range expectedHeader {
+ if req.Header.Get(k) != v {
+ t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k)))
+ }
+ }
+}
+
+func TestHybiServerHandshake(t *testing.T) {
+ config := new(Config)
+ handshaker := &hybiServerHandshaker{Config: config}
+ br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
+Host: server.example.com
+Upgrade: websocket
+Connection: Upgrade
+Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
+Origin: http://example.com
+Sec-WebSocket-Protocol: chat, superchat
+Sec-WebSocket-Version: 13
+
+`))
+ req, err := http.ReadRequest(br)
+ if err != nil {
+ t.Fatal("request", err)
+ }
+ code, err := handshaker.ReadHandshake(br, req)
+ if err != nil {
+ t.Errorf("handshake failed: %v", err)
+ }
+ if code != http.StatusSwitchingProtocols {
+ t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
+ }
+ expectedProtocols := []string{"chat", "superchat"}
+ if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) {
+ t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol)
+ }
+ b := bytes.NewBuffer([]byte{})
+ bw := bufio.NewWriter(b)
+
+ config.Protocol = config.Protocol[:1]
+
+ err = handshaker.AcceptHandshake(bw)
+ if err != nil {
+ t.Errorf("handshake response failed: %v", err)
+ }
+ expectedResponse := strings.Join([]string{
+ "HTTP/1.1 101 Switching Protocols",
+ "Upgrade: websocket",
+ "Connection: Upgrade",
+ "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
+ "Sec-WebSocket-Protocol: chat",
+ "", ""}, "\r\n")
+
+ if b.String() != expectedResponse {
+ t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
+ }
+}
+
+func TestHybiServerHandshakeNoSubProtocol(t *testing.T) {
+ config := new(Config)
+ handshaker := &hybiServerHandshaker{Config: config}
+ br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
+Host: server.example.com
+Upgrade: websocket
+Connection: Upgrade
+Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
+Origin: http://example.com
+Sec-WebSocket-Version: 13
+
+`))
+ req, err := http.ReadRequest(br)
+ if err != nil {
+ t.Fatal("request", err)
+ }
+ code, err := handshaker.ReadHandshake(br, req)
+ if err != nil {
+ t.Errorf("handshake failed: %v", err)
+ }
+ if code != http.StatusSwitchingProtocols {
+ t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
+ }
+ if len(config.Protocol) != 0 {
+ t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol))
+ }
+ b := bytes.NewBuffer([]byte{})
+ bw := bufio.NewWriter(b)
+
+ err = handshaker.AcceptHandshake(bw)
+ if err != nil {
+ t.Errorf("handshake response failed: %v", err)
+ }
+ expectedResponse := strings.Join([]string{
+ "HTTP/1.1 101 Switching Protocols",
+ "Upgrade: websocket",
+ "Connection: Upgrade",
+ "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
+ "", ""}, "\r\n")
+
+ if b.String() != expectedResponse {
+ t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
+ }
+}
+
+func TestHybiServerHandshakeHybiBadVersion(t *testing.T) {
+ config := new(Config)
+ handshaker := &hybiServerHandshaker{Config: config}
+ br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
+Host: server.example.com
+Upgrade: websocket
+Connection: Upgrade
+Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
+Sec-WebSocket-Origin: http://example.com
+Sec-WebSocket-Protocol: chat, superchat
+Sec-WebSocket-Version: 9
+
+`))
+ req, err := http.ReadRequest(br)
+ if err != nil {
+ t.Fatal("request", err)
+ }
+ code, err := handshaker.ReadHandshake(br, req)
+ if err != ErrBadWebSocketVersion {
+ t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err)
+ }
+ if code != http.StatusBadRequest {
+ t.Errorf("status expected %q but got %q", http.StatusBadRequest, code)
+ }
+}
+
+func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) {
+ b := bytes.NewBuffer([]byte{})
+ frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false}
+ w, _ := frameWriterFactory.NewFrameWriter(TextFrame)
+ w.(*hybiFrameWriter).header = frameHeader
+ _, err := w.Write(testPayload)
+ w.Close()
+ if err != nil {
+ t.Errorf("Write error %q", err)
+ }
+ var expectedFrame []byte
+ expectedFrame = append(expectedFrame, testHeader...)
+ expectedFrame = append(expectedFrame, testMaskedPayload...)
+ if !bytes.Equal(expectedFrame, b.Bytes()) {
+ t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes())
+ }
+ frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)}
+ r, err := frameReaderFactory.NewFrameReader()
+ if err != nil {
+ t.Errorf("Read error %q", err)
+ }
+ if header := r.HeaderReader(); header == nil {
+ t.Errorf("no header")
+ } else {
+ actualHeader := make([]byte, r.Len())
+ n, err := header.Read(actualHeader)
+ if err != nil {
+ t.Errorf("Read header error %q", err)
+ } else {
+ if n < len(testHeader) {
+ t.Errorf("header too short %q got %q", testHeader, actualHeader[:n])
+ }
+ if !bytes.Equal(testHeader, actualHeader[:n]) {
+ t.Errorf("header expected %q got %q", testHeader, actualHeader[:n])
+ }
+ }
+ }
+ if trailer := r.TrailerReader(); trailer != nil {
+ t.Errorf("unexpected trailer %q", trailer)
+ }
+ frame := r.(*hybiFrameReader)
+ if frameHeader.Fin != frame.header.Fin ||
+ frameHeader.OpCode != frame.header.OpCode ||
+ len(testPayload) != int(frame.header.Length) {
+ t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame)
+ }
+ payload := make([]byte, len(testPayload))
+ _, err = r.Read(payload)
+ if err != nil && err != io.EOF {
+ t.Errorf("read %v", err)
+ }
+ if !bytes.Equal(testPayload, payload) {
+ t.Errorf("payload %q vs %q", testPayload, payload)
+ }
+}
+
+func TestHybiShortTextFrame(t *testing.T) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}
+ payload := []byte("hello")
+ testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader)
+
+ payload = make([]byte, 125)
+ testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader)
+}
+
+func TestHybiShortMaskedTextFrame(t *testing.T) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame,
+ MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}}
+ payload := []byte("hello")
+ maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3}
+ header := []byte{0x81, 0x85}
+ header = append(header, frameHeader.MaskingKey...)
+ testHybiFrame(t, header, payload, maskedPayload, frameHeader)
+}
+
+func TestHybiShortBinaryFrame(t *testing.T) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame}
+ payload := []byte("hello")
+ testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader)
+
+ payload = make([]byte, 125)
+ testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader)
+}
+
+func TestHybiControlFrame(t *testing.T) {
+ payload := []byte("hello")
+
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame}
+ testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader)
+
+ frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame}
+ testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader)
+
+ frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame}
+ testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader)
+
+ frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame}
+ testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader)
+
+ frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame}
+ payload = []byte{0x03, 0xe8} // 1000
+ testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader)
+}
+
+func TestHybiLongFrame(t *testing.T) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}
+ payload := make([]byte, 126)
+ testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader)
+
+ payload = make([]byte, 65535)
+ testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader)
+
+ payload = make([]byte, 65536)
+ testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader)
+}
+
+func TestHybiClientRead(t *testing.T) {
+ wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',
+ 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping
+ 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}
+ br := bufio.NewReader(bytes.NewBuffer(wireData))
+ bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
+ conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
+
+ msg := make([]byte, 512)
+ n, err := conn.Read(msg)
+ if err != nil {
+ t.Errorf("read 1st frame, error %q", err)
+ }
+ if n != 5 {
+ t.Errorf("read 1st frame, expect 5, got %d", n)
+ }
+ if !bytes.Equal(wireData[2:7], msg[:n]) {
+ t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n])
+ }
+ n, err = conn.Read(msg)
+ if err != nil {
+ t.Errorf("read 2nd frame, error %q", err)
+ }
+ if n != 5 {
+ t.Errorf("read 2nd frame, expect 5, got %d", n)
+ }
+ if !bytes.Equal(wireData[16:21], msg[:n]) {
+ t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n])
+ }
+ n, err = conn.Read(msg)
+ if err == nil {
+ t.Errorf("read not EOF")
+ }
+ if n != 0 {
+ t.Errorf("expect read 0, got %d", n)
+ }
+}
+
+func TestHybiShortRead(t *testing.T) {
+ wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',
+ 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping
+ 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}
+ br := bufio.NewReader(bytes.NewBuffer(wireData))
+ bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
+ conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
+
+ step := 0
+ pos := 0
+ expectedPos := []int{2, 5, 16, 19}
+ expectedLen := []int{3, 2, 3, 2}
+ for {
+ msg := make([]byte, 3)
+ n, err := conn.Read(msg)
+ if step >= len(expectedPos) {
+ if err == nil {
+ t.Errorf("read not EOF")
+ }
+ if n != 0 {
+ t.Errorf("expect read 0, got %d", n)
+ }
+ return
+ }
+ pos = expectedPos[step]
+ endPos := pos + expectedLen[step]
+ if err != nil {
+ t.Errorf("read from %d, got error %q", pos, err)
+ return
+ }
+ if n != endPos-pos {
+ t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n)
+ }
+ if !bytes.Equal(wireData[pos:endPos], msg[:n]) {
+ t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n])
+ }
+ step++
+ }
+}
+
+func TestHybiServerRead(t *testing.T) {
+ wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,
+ 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello
+ 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20,
+ 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello
+ 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24,
+ 0x9a, 0xec, 0xc6, 0x48, 0x89, // world
+ }
+ br := bufio.NewReader(bytes.NewBuffer(wireData))
+ bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
+ conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request))
+
+ expected := [][]byte{[]byte("hello"), []byte("world")}
+
+ msg := make([]byte, 512)
+ n, err := conn.Read(msg)
+ if err != nil {
+ t.Errorf("read 1st frame, error %q", err)
+ }
+ if n != 5 {
+ t.Errorf("read 1st frame, expect 5, got %d", n)
+ }
+ if !bytes.Equal(expected[0], msg[:n]) {
+ t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n])
+ }
+
+ n, err = conn.Read(msg)
+ if err != nil {
+ t.Errorf("read 2nd frame, error %q", err)
+ }
+ if n != 5 {
+ t.Errorf("read 2nd frame, expect 5, got %d", n)
+ }
+ if !bytes.Equal(expected[1], msg[:n]) {
+ t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n])
+ }
+
+ n, err = conn.Read(msg)
+ if err == nil {
+ t.Errorf("read not EOF")
+ }
+ if n != 0 {
+ t.Errorf("expect read 0, got %d", n)
+ }
+}
+
+func TestHybiServerReadWithoutMasking(t *testing.T) {
+ wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'}
+ br := bufio.NewReader(bytes.NewBuffer(wireData))
+ bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
+ conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request))
+ // server MUST close the connection upon receiving a non-masked frame.
+ msg := make([]byte, 512)
+ _, err := conn.Read(msg)
+ if err != io.EOF {
+ t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err)
+ }
+}
+
+func TestHybiClientReadWithMasking(t *testing.T) {
+ wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,
+ 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello
+ }
+ br := bufio.NewReader(bytes.NewBuffer(wireData))
+ bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
+ conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
+
+ // client MUST close the connection upon receiving a masked frame.
+ msg := make([]byte, 512)
+ _, err := conn.Read(msg)
+ if err != io.EOF {
+ t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err)
+ }
+}
+
+// Test the hybiServerHandshaker supports firefox implementation and
+// checks Connection request header include (but it's not necessary
+// equal to) "upgrade"
+func TestHybiServerFirefoxHandshake(t *testing.T) {
+ config := new(Config)
+ handshaker := &hybiServerHandshaker{Config: config}
+ br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
+Host: server.example.com
+Upgrade: websocket
+Connection: keep-alive, upgrade
+Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
+Origin: http://example.com
+Sec-WebSocket-Protocol: chat, superchat
+Sec-WebSocket-Version: 13
+
+`))
+ req, err := http.ReadRequest(br)
+ if err != nil {
+ t.Fatal("request", err)
+ }
+ code, err := handshaker.ReadHandshake(br, req)
+ if err != nil {
+ t.Errorf("handshake failed: %v", err)
+ }
+ if code != http.StatusSwitchingProtocols {
+ t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
+ }
+ b := bytes.NewBuffer([]byte{})
+ bw := bufio.NewWriter(b)
+
+ config.Protocol = []string{"chat"}
+
+ err = handshaker.AcceptHandshake(bw)
+ if err != nil {
+ t.Errorf("handshake response failed: %v", err)
+ }
+ expectedResponse := strings.Join([]string{
+ "HTTP/1.1 101 Switching Protocols",
+ "Upgrade: websocket",
+ "Connection: Upgrade",
+ "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
+ "Sec-WebSocket-Protocol: chat",
+ "", ""}, "\r\n")
+
+ if b.String() != expectedResponse {
+ t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
+ }
+}
diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 000000000..0895dea19
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 000000000..9412191de
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,411 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(ioutil.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(ioutil.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ payloadType := frame.PayloadType()
+ data, err := ioutil.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go
new file mode 100644
index 000000000..05b7e5356
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/websocket_test.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+var serverAddr string
+var once sync.Once
+
+func echoServer(ws *Conn) {
+ defer ws.Close()
+ io.Copy(ws, ws)
+}
+
+type Count struct {
+ S string
+ N int
+}
+
+func countServer(ws *Conn) {
+ defer ws.Close()
+ for {
+ var count Count
+ err := JSON.Receive(ws, &count)
+ if err != nil {
+ return
+ }
+ count.N++
+ count.S = strings.Repeat(count.S, count.N)
+ err = JSON.Send(ws, count)
+ if err != nil {
+ return
+ }
+ }
+}
+
+type testCtrlAndDataHandler struct {
+ hybiFrameHandler
+}
+
+func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) {
+ h.hybiFrameHandler.conn.wio.Lock()
+ defer h.hybiFrameHandler.conn.wio.Unlock()
+ w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err := w.Write(b)
+ w.Close()
+ return n, err
+}
+
+func ctrlAndDataServer(ws *Conn) {
+ defer ws.Close()
+ h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}}
+ ws.frameHandler = h
+
+ go func() {
+ for i := 0; ; i++ {
+ var b []byte
+ if i%2 != 0 { // with or without payload
+ b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i))
+ }
+ if _, err := h.WritePing(b); err != nil {
+ break
+ }
+ if _, err := h.WritePong(b); err != nil { // unsolicited pong
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ }()
+
+ b := make([]byte, 128)
+ for {
+ n, err := ws.Read(b)
+ if err != nil {
+ break
+ }
+ if _, err := ws.Write(b[:n]); err != nil {
+ break
+ }
+ }
+}
+
+func subProtocolHandshake(config *Config, req *http.Request) error {
+ for _, proto := range config.Protocol {
+ if proto == "chat" {
+ config.Protocol = []string{proto}
+ return nil
+ }
+ }
+ return ErrBadWebSocketProtocol
+}
+
+func subProtoServer(ws *Conn) {
+ for _, proto := range ws.Config().Protocol {
+ io.WriteString(ws, proto)
+ }
+}
+
+func startServer() {
+ http.Handle("/echo", Handler(echoServer))
+ http.Handle("/count", Handler(countServer))
+ http.Handle("/ctrldata", Handler(ctrlAndDataServer))
+ subproto := Server{
+ Handshake: subProtocolHandshake,
+ Handler: Handler(subProtoServer),
+ }
+ http.Handle("/subproto", subproto)
+ server := httptest.NewServer(nil)
+ serverAddr = server.Listener.Addr().String()
+ log.Print("Test WebSocket server listening on ", serverAddr)
+}
+
+func newConfig(t *testing.T, path string) *Config {
+ config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost")
+ return config
+}
+
+func TestEcho(t *testing.T) {
+ once.Do(startServer)
+
+ // websocket.Dial()
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+ conn, err := NewClient(newConfig(t, "/echo"), client)
+ if err != nil {
+ t.Errorf("WebSocket handshake error: %v", err)
+ return
+ }
+
+ msg := []byte("hello, world\n")
+ if _, err := conn.Write(msg); err != nil {
+ t.Errorf("Write: %v", err)
+ }
+ var actual_msg = make([]byte, 512)
+ n, err := conn.Read(actual_msg)
+ if err != nil {
+ t.Errorf("Read: %v", err)
+ }
+ actual_msg = actual_msg[0:n]
+ if !bytes.Equal(msg, actual_msg) {
+ t.Errorf("Echo: expected %q got %q", msg, actual_msg)
+ }
+ conn.Close()
+}
+
+func TestAddr(t *testing.T) {
+ once.Do(startServer)
+
+ // websocket.Dial()
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+ conn, err := NewClient(newConfig(t, "/echo"), client)
+ if err != nil {
+ t.Errorf("WebSocket handshake error: %v", err)
+ return
+ }
+
+ ra := conn.RemoteAddr().String()
+ if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") {
+ t.Errorf("Bad remote addr: %v", ra)
+ }
+ la := conn.LocalAddr().String()
+ if !strings.HasPrefix(la, "http://") {
+ t.Errorf("Bad local addr: %v", la)
+ }
+ conn.Close()
+}
+
+func TestCount(t *testing.T) {
+ once.Do(startServer)
+
+ // websocket.Dial()
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+ conn, err := NewClient(newConfig(t, "/count"), client)
+ if err != nil {
+ t.Errorf("WebSocket handshake error: %v", err)
+ return
+ }
+
+ var count Count
+ count.S = "hello"
+ if err := JSON.Send(conn, count); err != nil {
+ t.Errorf("Write: %v", err)
+ }
+ if err := JSON.Receive(conn, &count); err != nil {
+ t.Errorf("Read: %v", err)
+ }
+ if count.N != 1 {
+ t.Errorf("count: expected %d got %d", 1, count.N)
+ }
+ if count.S != "hello" {
+ t.Errorf("count: expected %q got %q", "hello", count.S)
+ }
+ if err := JSON.Send(conn, count); err != nil {
+ t.Errorf("Write: %v", err)
+ }
+ if err := JSON.Receive(conn, &count); err != nil {
+ t.Errorf("Read: %v", err)
+ }
+ if count.N != 2 {
+ t.Errorf("count: expected %d got %d", 2, count.N)
+ }
+ if count.S != "hellohello" {
+ t.Errorf("count: expected %q got %q", "hellohello", count.S)
+ }
+ conn.Close()
+}
+
+func TestWithQuery(t *testing.T) {
+ once.Do(startServer)
+
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+
+ config := newConfig(t, "/echo")
+ config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr))
+ if err != nil {
+ t.Fatal("location url", err)
+ }
+
+ ws, err := NewClient(config, client)
+ if err != nil {
+ t.Errorf("WebSocket handshake: %v", err)
+ return
+ }
+ ws.Close()
+}
+
+func testWithProtocol(t *testing.T, subproto []string) (string, error) {
+ once.Do(startServer)
+
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+
+ config := newConfig(t, "/subproto")
+ config.Protocol = subproto
+
+ ws, err := NewClient(config, client)
+ if err != nil {
+ return "", err
+ }
+ msg := make([]byte, 16)
+ n, err := ws.Read(msg)
+ if err != nil {
+ return "", err
+ }
+ ws.Close()
+ return string(msg[:n]), nil
+}
+
+func TestWithProtocol(t *testing.T) {
+ proto, err := testWithProtocol(t, []string{"chat"})
+ if err != nil {
+ t.Errorf("SubProto: unexpected error: %v", err)
+ }
+ if proto != "chat" {
+ t.Errorf("SubProto: expected %q, got %q", "chat", proto)
+ }
+}
+
+func TestWithTwoProtocol(t *testing.T) {
+ proto, err := testWithProtocol(t, []string{"test", "chat"})
+ if err != nil {
+ t.Errorf("SubProto: unexpected error: %v", err)
+ }
+ if proto != "chat" {
+ t.Errorf("SubProto: expected %q, got %q", "chat", proto)
+ }
+}
+
+func TestWithBadProtocol(t *testing.T) {
+ _, err := testWithProtocol(t, []string{"test"})
+ if err != ErrBadStatus {
+ t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err)
+ }
+}
+
+func TestHTTP(t *testing.T) {
+ once.Do(startServer)
+
+ // If the client did not send a handshake that matches the protocol
+ // specification, the server MUST return an HTTP response with an
+ // appropriate error code (such as 400 Bad Request)
+ resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
+ if err != nil {
+ t.Errorf("Get: error %#v", err)
+ return
+ }
+ if resp == nil {
+ t.Error("Get: resp is null")
+ return
+ }
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode)
+ }
+}
+
+func TestTrailingSpaces(t *testing.T) {
+ // http://code.google.com/p/go/issues/detail?id=955
+ // The last runs of this create keys with trailing spaces that should not be
+ // generated by the client.
+ once.Do(startServer)
+ config := newConfig(t, "/echo")
+ for i := 0; i < 30; i++ {
+ // body
+ ws, err := DialConfig(config)
+ if err != nil {
+ t.Errorf("Dial #%d failed: %v", i, err)
+ break
+ }
+ ws.Close()
+ }
+}
+
+func TestDialConfigBadVersion(t *testing.T) {
+ once.Do(startServer)
+ config := newConfig(t, "/echo")
+ config.Version = 1234
+
+ _, err := DialConfig(config)
+
+ if dialerr, ok := err.(*DialError); ok {
+ if dialerr.Err != ErrBadProtocolVersion {
+ t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err)
+ }
+ }
+}
+
+func TestSmallBuffer(t *testing.T) {
+ // http://code.google.com/p/go/issues/detail?id=1145
+ // Read should be able to handle reading a fragment of a frame.
+ once.Do(startServer)
+
+ // websocket.Dial()
+ client, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+ conn, err := NewClient(newConfig(t, "/echo"), client)
+ if err != nil {
+ t.Errorf("WebSocket handshake error: %v", err)
+ return
+ }
+
+ msg := []byte("hello, world\n")
+ if _, err := conn.Write(msg); err != nil {
+ t.Errorf("Write: %v", err)
+ }
+ var small_msg = make([]byte, 8)
+ n, err := conn.Read(small_msg)
+ if err != nil {
+ t.Errorf("Read: %v", err)
+ }
+ if !bytes.Equal(msg[:len(small_msg)], small_msg) {
+ t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg)
+ }
+ var second_msg = make([]byte, len(msg))
+ n, err = conn.Read(second_msg)
+ if err != nil {
+ t.Errorf("Read: %v", err)
+ }
+ second_msg = second_msg[0:n]
+ if !bytes.Equal(msg[len(small_msg):], second_msg) {
+ t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg)
+ }
+ conn.Close()
+}
+
+var parseAuthorityTests = []struct {
+ in *url.URL
+ out string
+}{
+ {
+ &url.URL{
+ Scheme: "ws",
+ Host: "www.google.com",
+ },
+ "www.google.com:80",
+ },
+ {
+ &url.URL{
+ Scheme: "wss",
+ Host: "www.google.com",
+ },
+ "www.google.com:443",
+ },
+ {
+ &url.URL{
+ Scheme: "ws",
+ Host: "www.google.com:80",
+ },
+ "www.google.com:80",
+ },
+ {
+ &url.URL{
+ Scheme: "wss",
+ Host: "www.google.com:443",
+ },
+ "www.google.com:443",
+ },
+ // some invalid ones for parseAuthority. parseAuthority doesn't
+ // concern itself with the scheme unless it actually knows about it
+ {
+ &url.URL{
+ Scheme: "http",
+ Host: "www.google.com",
+ },
+ "www.google.com",
+ },
+ {
+ &url.URL{
+ Scheme: "http",
+ Host: "www.google.com:80",
+ },
+ "www.google.com:80",
+ },
+ {
+ &url.URL{
+ Scheme: "asdf",
+ Host: "127.0.0.1",
+ },
+ "127.0.0.1",
+ },
+ {
+ &url.URL{
+ Scheme: "asdf",
+ Host: "www.google.com",
+ },
+ "www.google.com",
+ },
+}
+
+func TestParseAuthority(t *testing.T) {
+ for _, tt := range parseAuthorityTests {
+ out := parseAuthority(tt.in)
+ if out != tt.out {
+ t.Errorf("got %v; want %v", out, tt.out)
+ }
+ }
+}
+
+type closerConn struct {
+ net.Conn
+ closed int // count of the number of times Close was called
+}
+
+func (c *closerConn) Close() error {
+ c.closed++
+ return c.Conn.Close()
+}
+
+func TestClose(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("see golang.org/issue/11454")
+ }
+
+ once.Do(startServer)
+
+ conn, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+
+ cc := closerConn{Conn: conn}
+
+ client, err := NewClient(newConfig(t, "/echo"), &cc)
+ if err != nil {
+ t.Fatalf("WebSocket handshake: %v", err)
+ }
+
+ // set the deadline to ten minutes ago, which will have expired by the time
+ // client.Close sends the close status frame.
+ conn.SetDeadline(time.Now().Add(-10 * time.Minute))
+
+ if err := client.Close(); err == nil {
+ t.Errorf("ws.Close(): expected error, got %v", err)
+ }
+ if cc.closed < 1 {
+ t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed)
+ }
+}
+
+var originTests = []struct {
+ req *http.Request
+ origin *url.URL
+}{
+ {
+ req: &http.Request{
+ Header: http.Header{
+ "Origin": []string{"http://www.example.com"},
+ },
+ },
+ origin: &url.URL{
+ Scheme: "http",
+ Host: "www.example.com",
+ },
+ },
+ {
+ req: &http.Request{},
+ },
+}
+
+func TestOrigin(t *testing.T) {
+ conf := newConfig(t, "/echo")
+ conf.Version = ProtocolVersionHybi13
+ for i, tt := range originTests {
+ origin, err := Origin(conf, tt.req)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(origin, tt.origin) {
+ t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin)
+ continue
+ }
+ }
+}
+
+func TestCtrlAndData(t *testing.T) {
+ once.Do(startServer)
+
+ c, err := net.Dial("tcp", serverAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ws, err := NewClient(newConfig(t, "/ctrldata"), c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ws.Close()
+
+ h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}}
+ ws.frameHandler = h
+
+ b := make([]byte, 128)
+ for i := 0; i < 2; i++ {
+ data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i))
+ if _, err := ws.Write(data); err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ var ctrl []byte
+ if i%2 != 0 { // with or without payload
+ ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i))
+ }
+ if _, err := h.WritePing(ctrl); err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ n, err := ws.Read(b)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ if !bytes.Equal(b[:n], data) {
+ t.Fatalf("#%d: got %v; want %v", i, b[:n], data)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go
new file mode 100644
index 000000000..8d2187872
--- /dev/null
+++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go
@@ -0,0 +1,88 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xsrftoken provides methods for generating and validating secure XSRF tokens.
+package xsrftoken // import "golang.org/x/net/xsrftoken"
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Timeout is the duration for which XSRF tokens are valid.
+// It is exported so clients may set cookie timeouts that match generated tokens.
+const Timeout = 24 * time.Hour
+
+// clean sanitizes a string for inclusion in a token by replacing all ":"s.
+func clean(s string) string {
+ return strings.Replace(s, ":", "_", -1)
+}
+
+// Generate returns a URL-safe secure XSRF token that expires in 24 hours.
+//
+// key is a secret key for your application.
+// userID is a unique identifier for the user.
+// actionID is the action the user is taking (e.g. POSTing to a particular path).
+func Generate(key, userID, actionID string) string {
+ return generateTokenAtTime(key, userID, actionID, time.Now())
+}
+
+// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now.
+func generateTokenAtTime(key, userID, actionID string, now time.Time) string {
+ // Round time up and convert to milliseconds.
+ milliTime := (now.UnixNano() + 1e6 - 1) / 1e6
+
+ h := hmac.New(sha1.New, []byte(key))
+ fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime)
+
+ // Get the padded base64 string then removing the padding.
+ tok := string(h.Sum(nil))
+ tok = base64.URLEncoding.EncodeToString([]byte(tok))
+ tok = strings.TrimRight(tok, "=")
+
+ return fmt.Sprintf("%s:%d", tok, milliTime)
+}
+
+// Valid reports whether a token is a valid, unexpired token returned by Generate.
+func Valid(token, key, userID, actionID string) bool {
+ return validTokenAtTime(token, key, userID, actionID, time.Now())
+}
+
+// validTokenAtTime reports whether a token is valid at the given time.
+func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool {
+ // Extract the issue time of the token.
+ sep := strings.LastIndex(token, ":")
+ if sep < 0 {
+ return false
+ }
+ millis, err := strconv.ParseInt(token[sep+1:], 10, 64)
+ if err != nil {
+ return false
+ }
+ issueTime := time.Unix(0, millis*1e6)
+
+ // Check that the token is not expired.
+ if now.Sub(issueTime) >= Timeout {
+ return false
+ }
+
+ // Check that the token is not from the future.
+ // Allow 1 minute grace period in case the token is being verified on a
+ // machine whose clock is behind the machine that issued the token.
+ if issueTime.After(now.Add(1 * time.Minute)) {
+ return false
+ }
+
+ expected := generateTokenAtTime(key, userID, actionID, issueTime)
+
+ // Check that the token matches the expected value.
+ // Use constant time comparison to avoid timing attacks.
+ return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1
+}
diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go
new file mode 100644
index 000000000..9933f8671
--- /dev/null
+++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xsrftoken
+
+import (
+ "encoding/base64"
+ "testing"
+ "time"
+)
+
+const (
+ key = "quay"
+ userID = "12345678"
+ actionID = "POST /form"
+)
+
+var (
+ now = time.Now()
+ oneMinuteFromNow = now.Add(1 * time.Minute)
+)
+
+func TestValidToken(t *testing.T) {
+ tok := generateTokenAtTime(key, userID, actionID, now)
+ if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) {
+ t.Error("One second later: Expected token to be valid")
+ }
+ if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) {
+ t.Error("Just before timeout: Expected token to be valid")
+ }
+ if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) {
+ t.Error("One minute in the past: Expected token to be valid")
+ }
+}
+
+// TestSeparatorReplacement tests that separators are being correctly substituted
+func TestSeparatorReplacement(t *testing.T) {
+ tok := generateTokenAtTime("foo:bar", "baz", "wah", now)
+ tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now)
+ if tok == tok2 {
+ t.Errorf("Expected generated tokens to be different")
+ }
+}
+
+func TestInvalidToken(t *testing.T) {
+ invalidTokenTests := []struct {
+ name, key, userID, actionID string
+ t time.Time
+ }{
+ {"Bad key", "foobar", userID, actionID, oneMinuteFromNow},
+ {"Bad userID", key, "foobar", actionID, oneMinuteFromNow},
+ {"Bad actionID", key, userID, "foobar", oneMinuteFromNow},
+ {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)},
+ {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)},
+ }
+
+ tok := generateTokenAtTime(key, userID, actionID, now)
+ for _, itt := range invalidTokenTests {
+ if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) {
+ t.Errorf("%v: Expected token to be invalid", itt.name)
+ }
+ }
+}
+
+// TestValidateBadData primarily tests that no unexpected panics are triggered
+// during parsing
+func TestValidateBadData(t *testing.T) {
+ badDataTests := []struct {
+ name, tok string
+ }{
+ {"Invalid Base64", "ASDab24(@)$*=="},
+ {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))},
+ {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))},
+ {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)},
+ }
+
+ for _, bdt := range badDataTests {
+ if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) {
+ t.Errorf("%v: Expected token to be invalid", bdt.name)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/time/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/time/CONTRIBUTING.md b/vendor/golang.org/x/time/CONTRIBUTING.md
new file mode 100644
index 000000000..88dff59bc
--- /dev/null
+++ b/vendor/golang.org/x/time/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/time/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/time/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/time/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/time/README b/vendor/golang.org/x/time/README
new file mode 100644
index 000000000..144e347b4
--- /dev/null
+++ b/vendor/golang.org/x/time/README
@@ -0,0 +1 @@
+This repository provides supplementary Go time packages.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
new file mode 100644
index 000000000..feab629bb
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -0,0 +1,370 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rate provides a rate limiter.
+package rate
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Limit defines the maximum frequency of some events.
+// Limit is represented as number of events per second.
+// A zero Limit allows no events.
+type Limit float64
+
+// Inf is the infinite rate limit; it allows all events (even if burst is zero).
+const Inf = Limit(math.MaxFloat64)
+
+// Every converts a minimum time interval between events to a Limit.
+func Every(interval time.Duration) Limit {
+ if interval <= 0 {
+ return Inf
+ }
+ return 1 / Limit(interval.Seconds())
+}
+
+// A Limiter controls how frequently events are allowed to happen.
+// It implements a "token bucket" of size b, initially full and refilled
+// at rate r tokens per second.
+// Informally, in any large enough time interval, the Limiter limits the
+// rate to r tokens per second, with a maximum burst size of b events.
+// As a special case, if r == Inf (the infinite rate), b is ignored.
+// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
+//
+// The zero value is a valid Limiter, but it will reject all events.
+// Use NewLimiter to create non-zero Limiters.
+//
+// Limiter has three main methods, Allow, Reserve, and Wait.
+// Most callers should use Wait.
+//
+// Each of the three methods consumes a single token.
+// They differ in their behavior when no token is available.
+// If no token is available, Allow returns false.
+// If no token is available, Reserve returns a reservation for a future token
+// and the amount of time the caller must wait before using it.
+// If no token is available, Wait blocks until one can be obtained
+// or its associated context.Context is canceled.
+//
+// The methods AllowN, ReserveN, and WaitN consume n tokens.
+type Limiter struct {
+ limit Limit
+ burst int
+
+ mu sync.Mutex
+ tokens float64
+ // last is the last time the limiter's tokens field was updated
+ last time.Time
+ // lastEvent is the latest time of a rate-limited event (past or future)
+ lastEvent time.Time
+}
+
+// Limit returns the maximum overall event rate.
+func (lim *Limiter) Limit() Limit {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+ return lim.limit
+}
+
+// Burst returns the maximum burst size. Burst is the maximum number of tokens
+// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
+// Burst values allow more events to happen at once.
+// A zero Burst allows no events, unless limit == Inf.
+func (lim *Limiter) Burst() int {
+ return lim.burst
+}
+
+// NewLimiter returns a new Limiter that allows events up to rate r and permits
+// bursts of at most b tokens.
+func NewLimiter(r Limit, b int) *Limiter {
+ return &Limiter{
+ limit: r,
+ burst: b,
+ }
+}
+
+// Allow is shorthand for AllowN(time.Now(), 1).
+func (lim *Limiter) Allow() bool {
+ return lim.AllowN(time.Now(), 1)
+}
+
+// AllowN reports whether n events may happen at time now.
+// Use this method if you intend to drop / skip events that exceed the rate limit.
+// Otherwise use Reserve or Wait.
+func (lim *Limiter) AllowN(now time.Time, n int) bool {
+ return lim.reserveN(now, n, 0).ok
+}
+
+// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
+// A Reservation may be canceled, which may enable the Limiter to permit additional events.
+type Reservation struct {
+ ok bool
+ lim *Limiter
+ tokens int
+ timeToAct time.Time
+ // This is the Limit at reservation time, it can change later.
+ limit Limit
+}
+
+// OK returns whether the limiter can provide the requested number of tokens
+// within the maximum wait time. If OK is false, Delay returns InfDuration, and
+// Cancel does nothing.
+func (r *Reservation) OK() bool {
+ return r.ok
+}
+
+// Delay is shorthand for DelayFrom(time.Now()).
+func (r *Reservation) Delay() time.Duration {
+ return r.DelayFrom(time.Now())
+}
+
+// InfDuration is the duration returned by Delay when a Reservation is not OK.
+const InfDuration = time.Duration(1<<63 - 1)
+
+// DelayFrom returns the duration for which the reservation holder must wait
+// before taking the reserved action. Zero duration means act immediately.
+// InfDuration means the limiter cannot grant the tokens requested in this
+// Reservation within the maximum wait time.
+func (r *Reservation) DelayFrom(now time.Time) time.Duration {
+ if !r.ok {
+ return InfDuration
+ }
+ delay := r.timeToAct.Sub(now)
+ if delay < 0 {
+ return 0
+ }
+ return delay
+}
+
+// Cancel is shorthand for CancelAt(time.Now()).
+func (r *Reservation) Cancel() {
+ r.CancelAt(time.Now())
+ return
+}
+
+// CancelAt indicates that the reservation holder will not perform the reserved action
+// and reverses the effects of this Reservation on the rate limit as much as possible,
+// considering that other reservations may have already been made.
+func (r *Reservation) CancelAt(now time.Time) {
+ if !r.ok {
+ return
+ }
+
+ r.lim.mu.Lock()
+ defer r.lim.mu.Unlock()
+
+ if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
+ return
+ }
+
+ // calculate tokens to restore
+ // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
+ // after r was obtained. These tokens should not be restored.
+ restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
+ if restoreTokens <= 0 {
+ return
+ }
+ // advance time to now
+ now, _, tokens := r.lim.advance(now)
+ // calculate new number of tokens
+ tokens += restoreTokens
+ if burst := float64(r.lim.burst); tokens > burst {
+ tokens = burst
+ }
+ // update state
+ r.lim.last = now
+ r.lim.tokens = tokens
+ if r.timeToAct == r.lim.lastEvent {
+ prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
+ if !prevEvent.Before(now) {
+ r.lim.lastEvent = prevEvent
+ }
+ }
+
+ return
+}
+
+// Reserve is shorthand for ReserveN(time.Now(), 1).
+func (lim *Limiter) Reserve() *Reservation {
+ return lim.ReserveN(time.Now(), 1)
+}
+
+// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
+// The Limiter takes this Reservation into account when allowing future events.
+// ReserveN returns false if n exceeds the Limiter's burst size.
+// Usage example:
+// r, ok := lim.ReserveN(time.Now(), 1)
+// if !ok {
+// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
+// }
+// time.Sleep(r.Delay())
+// Act()
+// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
+// If you need to respect a deadline or cancel the delay, use Wait instead.
+// To drop or skip events exceeding rate limit, use Allow instead.
+func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
+ r := lim.reserveN(now, n, InfDuration)
+ return &r
+}
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) (err error) {
+ return lim.WaitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+// The burst limit is ignored if the rate limit is Inf.
+func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
+ if n > lim.burst && lim.limit != Inf {
+ return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
+ }
+ // Check if ctx is already cancelled
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ // Determine wait limit
+ now := time.Now()
+ waitLimit := InfDuration
+ if deadline, ok := ctx.Deadline(); ok {
+ waitLimit = deadline.Sub(now)
+ }
+ // Reserve
+ r := lim.reserveN(now, n, waitLimit)
+ if !r.ok {
+ return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
+ }
+ // Wait
+ t := time.NewTimer(r.DelayFrom(now))
+ defer t.Stop()
+ select {
+ case <-t.C:
+ // We can proceed.
+ return nil
+ case <-ctx.Done():
+ // Context was canceled before we could proceed. Cancel the
+ // reservation, which may permit other events to proceed sooner.
+ r.Cancel()
+ return ctx.Err()
+ }
+}
+
+// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
+func (lim *Limiter) SetLimit(newLimit Limit) {
+ lim.SetLimitAt(time.Now(), newLimit)
+}
+
+// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
+// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
+// before SetLimitAt was called.
+func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+
+ now, _, tokens := lim.advance(now)
+
+ lim.last = now
+ lim.tokens = tokens
+ lim.limit = newLimit
+}
+
+// reserveN is a helper method for AllowN, ReserveN, and WaitN.
+// maxFutureReserve specifies the maximum reservation wait duration allowed.
+// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
+func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
+ lim.mu.Lock()
+
+ if lim.limit == Inf {
+ lim.mu.Unlock()
+ return Reservation{
+ ok: true,
+ lim: lim,
+ tokens: n,
+ timeToAct: now,
+ }
+ }
+
+ now, last, tokens := lim.advance(now)
+
+ // Calculate the remaining number of tokens resulting from the request.
+ tokens -= float64(n)
+
+ // Calculate the wait duration
+ var waitDuration time.Duration
+ if tokens < 0 {
+ waitDuration = lim.limit.durationFromTokens(-tokens)
+ }
+
+ // Decide result
+ ok := n <= lim.burst && waitDuration <= maxFutureReserve
+
+ // Prepare reservation
+ r := Reservation{
+ ok: ok,
+ lim: lim,
+ limit: lim.limit,
+ }
+ if ok {
+ r.tokens = n
+ r.timeToAct = now.Add(waitDuration)
+ }
+
+ // Update state
+ if ok {
+ lim.last = now
+ lim.tokens = tokens
+ lim.lastEvent = r.timeToAct
+ } else {
+ lim.last = last
+ }
+
+ lim.mu.Unlock()
+ return r
+}
+
+// advance calculates and returns an updated state for lim resulting from the passage of time.
+// lim is not changed.
+func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
+ last := lim.last
+ if now.Before(last) {
+ last = now
+ }
+
+ // Avoid making delta overflow below when last is very old.
+ maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
+ elapsed := now.Sub(last)
+ if elapsed > maxElapsed {
+ elapsed = maxElapsed
+ }
+
+ // Calculate the new number of tokens, due to time that passed.
+ delta := lim.limit.tokensFromDuration(elapsed)
+ tokens := lim.tokens + delta
+ if burst := float64(lim.burst); tokens > burst {
+ tokens = burst
+ }
+
+ return now, last, tokens
+}
+
+// durationFromTokens is a unit conversion function from the number of tokens to the duration
+// of time it takes to accumulate them at a rate of limit tokens per second.
+func (limit Limit) durationFromTokens(tokens float64) time.Duration {
+ seconds := tokens / float64(limit)
+ return time.Nanosecond * time.Duration(1e9*seconds)
+}
+
+// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
+// which could be accumulated during that duration at a rate of limit tokens per second.
+func (limit Limit) tokensFromDuration(d time.Duration) float64 {
+ return d.Seconds() * float64(limit)
+}
diff --git a/vendor/golang.org/x/time/rate/rate_test.go b/vendor/golang.org/x/time/rate/rate_test.go
new file mode 100644
index 000000000..cf45d92ef
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate_test.go
@@ -0,0 +1,445 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rate
+
+import (
+ "math"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+func TestLimit(t *testing.T) {
+ if Limit(10) == Inf {
+ t.Errorf("Limit(10) == Inf should be false")
+ }
+}
+
+func closeEnough(a, b Limit) bool {
+ return (math.Abs(float64(a)/float64(b)) - 1.0) < 1e-9
+}
+
+func TestEvery(t *testing.T) {
+ cases := []struct {
+ interval time.Duration
+ lim Limit
+ }{
+ {0, Inf},
+ {-1, Inf},
+ {1 * time.Nanosecond, Limit(1e9)},
+ {1 * time.Microsecond, Limit(1e6)},
+ {1 * time.Millisecond, Limit(1e3)},
+ {10 * time.Millisecond, Limit(100)},
+ {100 * time.Millisecond, Limit(10)},
+ {1 * time.Second, Limit(1)},
+ {2 * time.Second, Limit(0.5)},
+ {time.Duration(2.5 * float64(time.Second)), Limit(0.4)},
+ {4 * time.Second, Limit(0.25)},
+ {10 * time.Second, Limit(0.1)},
+ {time.Duration(math.MaxInt64), Limit(1e9 / float64(math.MaxInt64))},
+ }
+ for _, tc := range cases {
+ lim := Every(tc.interval)
+ if !closeEnough(lim, tc.lim) {
+ t.Errorf("Every(%v) = %v want %v", tc.interval, lim, tc.lim)
+ }
+ }
+}
+
+const (
+ d = 100 * time.Millisecond
+)
+
+var (
+ t0 = time.Now()
+ t1 = t0.Add(time.Duration(1) * d)
+ t2 = t0.Add(time.Duration(2) * d)
+ t3 = t0.Add(time.Duration(3) * d)
+ t4 = t0.Add(time.Duration(4) * d)
+ t5 = t0.Add(time.Duration(5) * d)
+ t9 = t0.Add(time.Duration(9) * d)
+)
+
+type allow struct {
+ t time.Time
+ n int
+ ok bool
+}
+
+func run(t *testing.T, lim *Limiter, allows []allow) {
+ for i, allow := range allows {
+ ok := lim.AllowN(allow.t, allow.n)
+ if ok != allow.ok {
+ t.Errorf("step %d: lim.AllowN(%v, %v) = %v want %v",
+ i, allow.t, allow.n, ok, allow.ok)
+ }
+ }
+}
+
+func TestLimiterBurst1(t *testing.T) {
+ run(t, NewLimiter(10, 1), []allow{
+ {t0, 1, true},
+ {t0, 1, false},
+ {t0, 1, false},
+ {t1, 1, true},
+ {t1, 1, false},
+ {t1, 1, false},
+ {t2, 2, false}, // burst size is 1, so n=2 always fails
+ {t2, 1, true},
+ {t2, 1, false},
+ })
+}
+
+func TestLimiterBurst3(t *testing.T) {
+ run(t, NewLimiter(10, 3), []allow{
+ {t0, 2, true},
+ {t0, 2, false},
+ {t0, 1, true},
+ {t0, 1, false},
+ {t1, 4, false},
+ {t2, 1, true},
+ {t3, 1, true},
+ {t4, 1, true},
+ {t4, 1, true},
+ {t4, 1, false},
+ {t4, 1, false},
+ {t9, 3, true},
+ {t9, 0, true},
+ })
+}
+
+func TestLimiterJumpBackwards(t *testing.T) {
+ run(t, NewLimiter(10, 3), []allow{
+ {t1, 1, true}, // start at t1
+ {t0, 1, true}, // jump back to t0, two tokens remain
+ {t0, 1, true},
+ {t0, 1, false},
+ {t0, 1, false},
+ {t1, 1, true}, // got a token
+ {t1, 1, false},
+ {t1, 1, false},
+ {t2, 1, true}, // got another token
+ {t2, 1, false},
+ {t2, 1, false},
+ })
+}
+
+func TestSimultaneousRequests(t *testing.T) {
+ const (
+ limit = 1
+ burst = 5
+ numRequests = 15
+ )
+ var (
+ wg sync.WaitGroup
+ numOK = uint32(0)
+ )
+
+ // Very slow replenishing bucket.
+ lim := NewLimiter(limit, burst)
+
+ // Tries to take a token, atomically updates the counter and decreases the wait
+ // group counter.
+ f := func() {
+ defer wg.Done()
+ if ok := lim.Allow(); ok {
+ atomic.AddUint32(&numOK, 1)
+ }
+ }
+
+ wg.Add(numRequests)
+ for i := 0; i < numRequests; i++ {
+ go f()
+ }
+ wg.Wait()
+ if numOK != burst {
+ t.Errorf("numOK = %d, want %d", numOK, burst)
+ }
+}
+
+func TestLongRunningQPS(t *testing.T) {
+ if runtime.GOOS == "openbsd" {
+ t.Skip("low resolution time.Sleep invalidates test (golang.org/issue/14183)")
+ return
+ }
+
+ // The test runs for a few seconds executing many requests and then checks
+ // that overall number of requests is reasonable.
+ const (
+ limit = 100
+ burst = 100
+ )
+ var numOK = int32(0)
+
+ lim := NewLimiter(limit, burst)
+
+ var wg sync.WaitGroup
+ f := func() {
+ if ok := lim.Allow(); ok {
+ atomic.AddInt32(&numOK, 1)
+ }
+ wg.Done()
+ }
+
+ start := time.Now()
+ end := start.Add(5 * time.Second)
+ for time.Now().Before(end) {
+ wg.Add(1)
+ go f()
+
+ // This will still offer ~500 requests per second, but won't consume
+ // outrageous amount of CPU.
+ time.Sleep(2 * time.Millisecond)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ ideal := burst + (limit * float64(elapsed) / float64(time.Second))
+
+ // We should never get more requests than allowed.
+ if want := int32(ideal + 1); numOK > want {
+ t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal)
+ }
+ // We should get very close to the number of requests allowed.
+ if want := int32(0.999 * ideal); numOK < want {
+ t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal)
+ }
+}
+
+type request struct {
+ t time.Time
+ n int
+ act time.Time
+ ok bool
+}
+
+// dFromDuration converts a duration to a multiple of the global constant d
+func dFromDuration(dur time.Duration) int {
+ // Adding a millisecond to be swallowed by the integer division
+ // because we don't care about small inaccuracies
+ return int((dur + time.Millisecond) / d)
+}
+
+// dSince returns multiples of d since t0
+func dSince(t time.Time) int {
+ return dFromDuration(t.Sub(t0))
+}
+
+func runReserve(t *testing.T, lim *Limiter, req request) *Reservation {
+ return runReserveMax(t, lim, req, InfDuration)
+}
+
+func runReserveMax(t *testing.T, lim *Limiter, req request, maxReserve time.Duration) *Reservation {
+ r := lim.reserveN(req.t, req.n, maxReserve)
+ if r.ok && (dSince(r.timeToAct) != dSince(req.act)) || r.ok != req.ok {
+ t.Errorf("lim.reserveN(t%d, %v, %v) = (t%d, %v) want (t%d, %v)",
+ dSince(req.t), req.n, maxReserve, dSince(r.timeToAct), r.ok, dSince(req.act), req.ok)
+ }
+ return &r
+}
+
+func TestSimpleReserve(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ runReserve(t, lim, request{t0, 2, t2, true})
+ runReserve(t, lim, request{t3, 2, t4, true})
+}
+
+func TestMix(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst
+ runReserve(t, lim, request{t0, 2, t0, true})
+ run(t, lim, []allow{{t1, 2, false}}) // not enought tokens - don't allow
+ runReserve(t, lim, request{t1, 2, t2, true})
+ run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow
+ run(t, lim, []allow{{t3, 1, true}})
+}
+
+func TestCancelInvalid(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 3, t3, false})
+ r.CancelAt(t0) // should have no effect
+ runReserve(t, lim, request{t0, 2, t2, true}) // did not get extra tokens
+}
+
+func TestCancelLast(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 2, t2, true})
+ r.CancelAt(t1) // got 2 tokens back
+ runReserve(t, lim, request{t1, 2, t2, true})
+}
+
+func TestCancelTooLate(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 2, t2, true})
+ r.CancelAt(t3) // too late to cancel - should have no effect
+ runReserve(t, lim, request{t3, 2, t4, true})
+}
+
+func TestCancel0Tokens(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 1, t1, true})
+ runReserve(t, lim, request{t0, 1, t2, true})
+ r.CancelAt(t0) // got 0 tokens back
+ runReserve(t, lim, request{t0, 1, t3, true})
+}
+
+func TestCancel1Token(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 2, t2, true})
+ runReserve(t, lim, request{t0, 1, t3, true})
+ r.CancelAt(t2) // got 1 token back
+ runReserve(t, lim, request{t2, 2, t4, true})
+}
+
+func TestCancelMulti(t *testing.T) {
+ lim := NewLimiter(10, 4)
+
+ runReserve(t, lim, request{t0, 4, t0, true})
+ rA := runReserve(t, lim, request{t0, 3, t3, true})
+ runReserve(t, lim, request{t0, 1, t4, true})
+ rC := runReserve(t, lim, request{t0, 1, t5, true})
+ rC.CancelAt(t1) // get 1 token back
+ rA.CancelAt(t1) // get 2 tokens back, as if C was never reserved
+ runReserve(t, lim, request{t1, 3, t5, true})
+}
+
+func TestReserveJumpBack(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t1, 2, t1, true}) // start at t1
+ runReserve(t, lim, request{t0, 1, t1, true}) // should violate Limit,Burst
+ runReserve(t, lim, request{t2, 2, t3, true})
+}
+
+func TestReserveJumpBackCancel(t *testing.T) {
+ lim := NewLimiter(10, 2)
+
+ runReserve(t, lim, request{t1, 2, t1, true}) // start at t1
+ r := runReserve(t, lim, request{t1, 2, t3, true})
+ runReserve(t, lim, request{t1, 1, t4, true})
+ r.CancelAt(t0) // cancel at t0, get 1 token back
+ runReserve(t, lim, request{t1, 2, t4, true}) // should violate Limit,Burst
+}
+
+func TestReserveSetLimit(t *testing.T) {
+ lim := NewLimiter(5, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ runReserve(t, lim, request{t0, 2, t4, true})
+ lim.SetLimitAt(t2, 10)
+ runReserve(t, lim, request{t2, 1, t4, true}) // violates Limit and Burst
+}
+
+func TestReserveSetLimitCancel(t *testing.T) {
+ lim := NewLimiter(5, 2)
+
+ runReserve(t, lim, request{t0, 2, t0, true})
+ r := runReserve(t, lim, request{t0, 2, t4, true})
+ lim.SetLimitAt(t2, 10)
+ r.CancelAt(t2) // 2 tokens back
+ runReserve(t, lim, request{t2, 2, t3, true})
+}
+
+func TestReserveMax(t *testing.T) {
+ lim := NewLimiter(10, 2)
+ maxT := d
+
+ runReserveMax(t, lim, request{t0, 2, t0, true}, maxT)
+ runReserveMax(t, lim, request{t0, 1, t1, true}, maxT) // reserve for close future
+ runReserveMax(t, lim, request{t0, 1, t2, false}, maxT) // time to act too far in the future
+}
+
+type wait struct {
+ name string
+ ctx context.Context
+ n int
+ delay int // in multiples of d
+ nilErr bool
+}
+
+func runWait(t *testing.T, lim *Limiter, w wait) {
+ start := time.Now()
+ err := lim.WaitN(w.ctx, w.n)
+ delay := time.Now().Sub(start)
+ if (w.nilErr && err != nil) || (!w.nilErr && err == nil) || w.delay != dFromDuration(delay) {
+ errString := "<nil>"
+ if !w.nilErr {
+ errString = "<non-nil error>"
+ }
+ t.Errorf("lim.WaitN(%v, lim, %v) = %v with delay %v ; want %v with delay %v",
+ w.name, w.n, err, delay, errString, d*time.Duration(w.delay))
+ }
+}
+
+func TestWaitSimple(t *testing.T) {
+ lim := NewLimiter(10, 3)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ runWait(t, lim, wait{"already-cancelled", ctx, 1, 0, false})
+
+ runWait(t, lim, wait{"exceed-burst-error", context.Background(), 4, 0, false})
+
+ runWait(t, lim, wait{"act-now", context.Background(), 2, 0, true})
+ runWait(t, lim, wait{"act-later", context.Background(), 3, 2, true})
+}
+
+func TestWaitCancel(t *testing.T) {
+ lim := NewLimiter(10, 3)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) // after this lim.tokens = 1
+ go func() {
+ time.Sleep(d)
+ cancel()
+ }()
+ runWait(t, lim, wait{"will-cancel", ctx, 3, 1, false})
+ // should get 3 tokens back, and have lim.tokens = 2
+ t.Logf("tokens:%v last:%v lastEvent:%v", lim.tokens, lim.last, lim.lastEvent)
+ runWait(t, lim, wait{"act-now-after-cancel", context.Background(), 2, 0, true})
+}
+
+func TestWaitTimeout(t *testing.T) {
+ lim := NewLimiter(10, 3)
+
+ ctx, cancel := context.WithTimeout(context.Background(), d)
+ defer cancel()
+ runWait(t, lim, wait{"act-now", ctx, 2, 0, true})
+ runWait(t, lim, wait{"w-timeout-err", ctx, 3, 0, false})
+}
+
+func TestWaitInf(t *testing.T) {
+ lim := NewLimiter(Inf, 0)
+
+ runWait(t, lim, wait{"exceed-burst-no-error", context.Background(), 3, 0, true})
+}
+
+func BenchmarkAllowN(b *testing.B) {
+ lim := NewLimiter(Every(1*time.Second), 1)
+ now := time.Now()
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ lim.AllowN(now, 1)
+ }
+ })
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc
new file mode 100644
index 000000000..730e569b0
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc
@@ -0,0 +1 @@
+'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file
diff --git a/vendor/gopkg.in/square/go-jose.v1/.gitignore b/vendor/gopkg.in/square/go-jose.v1/.gitignore
new file mode 100644
index 000000000..5b4d73b68
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/.gitignore
@@ -0,0 +1,7 @@
+*~
+.*.swp
+*.out
+*.test
+*.pem
+*.cov
+jose-util/jose-util
diff --git a/vendor/gopkg.in/square/go-jose.v1/.travis.yml b/vendor/gopkg.in/square/go-jose.v1/.travis.yml
new file mode 100644
index 000000000..c38cd007d
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/.travis.yml
@@ -0,0 +1,45 @@
+language: go
+
+sudo: false
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+- 1.3
+- 1.4
+- 1.5
+- 1.6
+- 1.7
+- tip
+
+go_import_path: gopkg.in/square/go-jose.v1
+
+before_script:
+- export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+# Install encrypted gitcookies to get around bandwidth-limits
+# that is causing Travis-CI builds to fail. For more info, see
+# https://github.com/golang/go/issues/12933
+- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
+- bash .gitcookies.sh || true
+- go get github.com/wadey/gocovmerge
+- go get github.com/mattn/goveralls
+- go get golang.org/x/tools/cmd/cover || true
+- go get code.google.com/p/go.tools/cmd/cover || true
+- pip install cram --user `whoami`
+
+script:
+- go test . -v -covermode=count -coverprofile=profile.cov
+- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
+- go test ./json -v # no coverage for forked encoding/json package
+- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t
+- cd ..
+
+after_success:
+- gocovmerge *.cov */*.cov > merged.coverprofile
+- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
+
diff --git a/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md
new file mode 100644
index 000000000..97e61dbb6
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md
@@ -0,0 +1,10 @@
+Serious about security
+======================
+
+Square recognizes the important contributions the security research community
+can make. We therefore encourage reporting security issues with the code
+contained in this repository.
+
+If you believe you have discovered a security vulnerability, please follow the
+guidelines at <https://hackerone.com/square-open-source>.
+
diff --git a/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md
new file mode 100644
index 000000000..61b183651
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+[Individual Contributor License Agreement][1].
+
+ [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
diff --git a/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/gopkg.in/square/go-jose.v1/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/gopkg.in/square/go-jose.v1/README.md
new file mode 100644
index 000000000..60293ffa2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/README.md
@@ -0,0 +1,212 @@
+# Go JOSE
+
+[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE)
+[![release](https://img.shields.io/github/release/square/go-jose.svg?style=flat)](https://github.com/square/go-jose/releases)
+[![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose)
+[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. For the moment, it mainly focuses on encryption
+and signing based on the JSON Web Encryption and JSON Web Signature standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516)
+standard (RFC 7516) and
+[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515)
+standard (RFC 7515). Tables of supported algorithms are shown below.
+The library supports both the compact and full serialization formats, and has
+optional support for multiple recipients. It also comes with a small
+command-line utility
+([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages. If you do not like this behavior, you can use the
+`std_json` build tag to disable it (though we do not recommend doing so).
+
+### Versions
+
+We use [gopkg.in](https://gopkg.in) for versioning.
+
+[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version:
+
+ import "gopkg.in/square/go-jose.v1"
+
+The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain
+backwards compatible. We're currently sketching out ideas for a new version, to
+clean up the interface a bit. If you have ideas or feature requests [please let
+us know](https://github.com/square/go-jose/issues/64)!
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the
+[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+standard where possible. The
+[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants)
+has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES<sup>1</sup>
+ Direct encryption | dir<sup>1</sup>
+
+<sup>1. Not supported in multi-recipient mode</sup>
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Note that if you are creating a new encrypter or signer with a
+JsonWebKey, the key id of the JsonWebKey (if present) will be added to any
+resulting messages.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+ AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+
+## Examples
+
+Encryption/decryption example using RSA:
+
+```Go
+// Generate a public/private key pair to use for this example. The library
+// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+// that can be used to load keys from PEM/DER-encoded data.
+privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+if err != nil {
+ panic(err)
+}
+
+// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would
+// indicate that the selected algorithm(s) are not currently supported.
+publicKey := &privateKey.PublicKey
+encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+if err != nil {
+ panic(err)
+}
+
+// Encrypt a sample plaintext. Calling the encrypter returns an encrypted
+// JWE object, which can then be serialized for output afterwards. An error
+// would indicate a problem in an underlying cryptographic primitive.
+var plaintext = []byte("Lorem ipsum dolor sit amet")
+object, err := encrypter.Encrypt(plaintext)
+if err != nil {
+ panic(err)
+}
+
+// Serialize the encrypted object using the full serialization format.
+// Alternatively you can also use the compact format here by calling
+// object.CompactSerialize() instead.
+serialized := object.FullSerialize()
+
+// Parse the serialized, encrypted JWE object. An error would indicate that
+// the given input did not represent a valid message.
+object, err = ParseEncrypted(serialized)
+if err != nil {
+ panic(err)
+}
+
+// Now we can decrypt and get back our original plaintext. An error here
+// would indicate the the message failed to decrypt, e.g. because the auth
+// tag was broken or the message was tampered with.
+decrypted, err := object.Decrypt(privateKey)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf(string(decrypted))
+// output: Lorem ipsum dolor sit amet
+```
+
+Signing/verification example using RSA:
+
+```Go
+// Generate a public/private key pair to use for this example. The library
+// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+// that can be used to load keys from PEM/DER-encoded data.
+privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+if err != nil {
+ panic(err)
+}
+
+// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key.
+signer, err := NewSigner(PS512, privateKey)
+if err != nil {
+ panic(err)
+}
+
+// Sign a sample payload. Calling the signer returns a protected JWS object,
+// which can then be serialized for output afterwards. An error would
+// indicate a problem in an underlying cryptographic primitive.
+var payload = []byte("Lorem ipsum dolor sit amet")
+object, err := signer.Sign(payload)
+if err != nil {
+ panic(err)
+}
+
+// Serialize the encrypted object using the full serialization format.
+// Alternatively you can also use the compact format here by calling
+// object.CompactSerialize() instead.
+serialized := object.FullSerialize()
+
+// Parse the serialized, protected JWS object. An error would indicate that
+// the given input did not represent a valid message.
+object, err = ParseSigned(serialized)
+if err != nil {
+ panic(err)
+}
+
+// Now we can verify the signature on the payload. An error here would
+// indicate the the message failed to verify, e.g. because the signature was
+// broken or the message was tampered with.
+output, err := object.Verify(&privateKey.PublicKey)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf(string(output))
+// output: Lorem ipsum dolor sit amet
+```
+
+More examples can be found in the [Godoc
+reference](https://godoc.org/github.com/square/go-jose) for this package. The
+[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)
+subdirectory also contains a small command-line utility which might
+be useful as an example.
diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go
new file mode 100644
index 000000000..cd36c21da
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go
@@ -0,0 +1,520 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "gopkg.in/square/go-jose.v1/cipher"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: &JsonWebKey{
+ Key: &privateKey.PublicKey,
+ },
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: &JsonWebKey{
+ Key: &privateKey.PublicKey,
+ },
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ headers := rawHeader{
+ Epk: &JsonWebKey{
+ Key: &priv.PublicKey,
+ },
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ if headers.Epk == nil {
+ return nil, errors.New("square/go-jose: missing epk header")
+ }
+
+ publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("square/go-jose: invalid public key in epk header")
+ }
+
+ apuData := headers.Apu.bytes()
+ apvData := headers.Apv.bytes()
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ switch KeyAlgorithm(headers.Alg) {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.Enc), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(headers.Alg, keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outpus (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("square/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go
new file mode 100644
index 000000000..018ad2e2d
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go
@@ -0,0 +1,468 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "math/big"
+ "testing"
+)
+
+func TestVectorsRSA(t *testing.T) {
+ // Sources:
+ // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm
+ // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt
+ priv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: fromHexInt(`
+ a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8
+ ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c
+ bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd
+ bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93
+ ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`),
+ E: 65537,
+ },
+ D: fromHexInt(`
+ 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195
+ 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d
+ 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6
+ 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb
+ 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`),
+ Primes: []*big.Int{
+ fromHexInt(`
+ d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262
+ 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c
+ 2f26a471dcad212eac7ca39d`),
+ fromHexInt(`
+ cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3
+ 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af
+ 72bfe9a030e860b0288b5d77`),
+ },
+ }
+
+ input := fromHexBytes(
+ "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34")
+
+ expectedPKCS := fromHexBytes(`
+ 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808
+ 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d
+ 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb
+ 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b
+ cd23b528ceab3c31`)
+
+ expectedOAEP := fromHexBytes(`
+ 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4
+ 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618
+ c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6
+ 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5
+ 210035d47ac72e8a`)
+
+ // Mock random reader
+ randReader = bytes.NewReader(fromHexBytes(`
+ 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3
+ a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5
+ 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98
+ 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`))
+ defer resetRandReader()
+
+ // RSA-PKCS1v1.5 encrypt
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ encryptedPKCS, err := enc.encrypt(input, RSA1_5)
+ if err != nil {
+ t.Error("Encryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 {
+ t.Error("Output does not match expected value (PKCS1v1.5)")
+ }
+
+ // RSA-OAEP encrypt
+ encryptedOAEP, err := enc.encrypt(input, RSA_OAEP)
+ if err != nil {
+ t.Error("Encryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 {
+ t.Error("Output does not match expected value (OAEP)")
+ }
+
+ // Need fake cipher for PKCS1v1.5 decrypt
+ resetRandReader()
+ aes := newAESGCM(len(input))
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ // RSA-PKCS1v1.5 decrypt
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen)
+ if err != nil {
+ t.Error("Decryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(input, decryptedPKCS) != 0 {
+ t.Error("Output does not match expected value (PKCS1v1.5)")
+ }
+
+ // RSA-OAEP decrypt
+ decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen)
+ if err != nil {
+ t.Error("decryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(input, decryptedOAEP) != 0 {
+ t.Error("output does not match expected value (OAEP)")
+ }
+}
+
+func TestInvalidAlgorithmsRSA(t *testing.T) {
+ _, err := newRSARecipient("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = newRSASigner("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &rsaTestKey.PublicKey
+ _, err = enc.encryptKey([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ err = enc.verifyPayload([]byte{}, []byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = rsaTestKey
+ _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16})
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = dec.signPayload([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+type failingKeyGenerator struct{}
+
+func (ctx failingKeyGenerator) keySize() int {
+ return 0
+}
+
+func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ return nil, rawHeader{}, errors.New("failed to generate key")
+}
+
+func TestPKCSKeyGeneratorFailure(t *testing.T) {
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = rsaTestKey
+ generator := failingKeyGenerator{}
+ _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator)
+ if err != ErrCryptoFailure {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+func TestInvalidAlgorithmsEC(t *testing.T) {
+ _, err := newECDHRecipient("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = newECDSASigner("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ enc := new(ecEncrypterVerifier)
+ enc.publicKey = &ecTestKey256.PublicKey
+ _, err = enc.encryptKey([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+func TestInvalidECKeyGen(t *testing.T) {
+ gen := ecKeyGenerator{
+ size: 16,
+ algID: "A128GCM",
+ publicKey: &ecTestKey256.PublicKey,
+ }
+
+ if gen.keySize() != 16 {
+ t.Error("ec key generator reported incorrect key size")
+ }
+
+ _, _, err := gen.genKey()
+ if err != nil {
+ t.Error("ec key generator failed to generate key", err)
+ }
+}
+
+func TestInvalidECDecrypt(t *testing.T) {
+ dec := ecDecrypterSigner{
+ privateKey: ecTestKey256,
+ }
+
+ generator := randomKeyGenerator{size: 16}
+
+ // Missing epk header
+ headers := rawHeader{
+ Alg: string(ECDH_ES),
+ }
+
+ _, err := dec.decryptKey(headers, nil, generator)
+ if err == nil {
+ t.Error("ec decrypter accepted object with missing epk header")
+ }
+
+ // Invalid epk header
+ headers.Epk = &JsonWebKey{}
+
+ _, err = dec.decryptKey(headers, nil, generator)
+ if err == nil {
+ t.Error("ec decrypter accepted object with invalid epk header")
+ }
+}
+
+func TestDecryptWithIncorrectSize(t *testing.T) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ payload := make([]byte, 254)
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err == nil {
+ t.Error("Invalid payload size should return error")
+ }
+
+ payload = make([]byte, 257)
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err == nil {
+ t.Error("Invalid payload size should return error")
+ }
+}
+
+func TestPKCSDecryptNeverFails(t *testing.T) {
+ // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent
+ // side-channel timing attacks (Bleichenbacher attack in particular).
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ for i := 1; i < 50; i++ {
+ payload := make([]byte, 256)
+ _, err := io.ReadFull(rand.Reader, payload)
+ if err != nil {
+ t.Error("Unable to get random data:", err)
+ return
+ }
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err != nil {
+ t.Error("PKCS1v1.5 decrypt should never fail:", err)
+ return
+ }
+ }
+}
+
+func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(32)
+
+ b.StopTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ plaintext := make([]byte, 32)
+ _, err = io.ReadFull(rand.Reader, plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ ciphertext, err := enc.encrypt(plaintext, RSA1_5)
+ if err != nil {
+ panic(err)
+ }
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ b.StartTimer()
+ _, err = dec.decrypt(ciphertext, RSA1_5, keygen)
+ b.StopTimer()
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ b.StopTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ plaintext := make([]byte, 16)
+ _, err = io.ReadFull(rand.Reader, plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ ciphertext, err := enc.encrypt(plaintext, RSA1_5)
+ if err != nil {
+ panic(err)
+ }
+
+ // Do some simple scrambling
+ ciphertext[128] ^= 0xFF
+
+ b.StartTimer()
+ _, err = dec.decrypt(ciphertext, RSA1_5, keygen)
+ b.StopTimer()
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func TestInvalidEllipticCurve(t *testing.T) {
+ signer256 := ecDecrypterSigner{privateKey: ecTestKey256}
+ signer384 := ecDecrypterSigner{privateKey: ecTestKey384}
+ signer521 := ecDecrypterSigner{privateKey: ecTestKey521}
+
+ _, err := signer256.signPayload([]byte{}, ES384)
+ if err == nil {
+ t.Error("should not generate ES384 signature with P-256 key")
+ }
+ _, err = signer256.signPayload([]byte{}, ES512)
+ if err == nil {
+ t.Error("should not generate ES512 signature with P-256 key")
+ }
+ _, err = signer384.signPayload([]byte{}, ES256)
+ if err == nil {
+ t.Error("should not generate ES256 signature with P-384 key")
+ }
+ _, err = signer384.signPayload([]byte{}, ES512)
+ if err == nil {
+ t.Error("should not generate ES512 signature with P-384 key")
+ }
+ _, err = signer521.signPayload([]byte{}, ES256)
+ if err == nil {
+ t.Error("should not generate ES256 signature with P-521 key")
+ }
+ _, err = signer521.signPayload([]byte{}, ES384)
+ if err == nil {
+ t.Error("should not generate ES384 signature with P-521 key")
+ }
+}
+
+func TestInvalidECPublicKey(t *testing.T) {
+ // Invalid key
+ invalid := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("MTEx"),
+ Y: fromBase64Int("MTEx"),
+ },
+ D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="),
+ }
+
+ headers := rawHeader{
+ Alg: string(ECDH_ES),
+ Epk: &JsonWebKey{
+ Key: &invalid.PublicKey,
+ },
+ }
+
+ dec := ecDecrypterSigner{
+ privateKey: ecTestKey256,
+ }
+
+ _, err := dec.decryptKey(headers, nil, randomKeyGenerator{size: 16})
+ if err == nil {
+ t.Fatal("decrypter accepted JWS with invalid ECDH public key")
+ }
+}
+
+func TestInvalidAlgorithmEC(t *testing.T) {
+ err := ecEncrypterVerifier{publicKey: &ecTestKey256.PublicKey}.verifyPayload([]byte{}, []byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Fatal("should not accept invalid/unsupported algorithm")
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go
new file mode 100644
index 000000000..126b85ce2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, []byte(ciphertext[:offset])...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures the the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go
new file mode 100644
index 000000000..40bcb20fa
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go
@@ -0,0 +1,498 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestInvalidInputs(t *testing.T) {
+ key := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }
+
+ nonce := []byte{
+ 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145}
+
+ aead, _ := NewCBCHMAC(key, aes.NewCipher)
+ ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad"))
+
+ // Changed AAD, must fail
+ _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID"))
+ if err == nil {
+ t.Error("must detect invalid aad")
+ }
+
+ // Empty ciphertext, must fail
+ _, err = aead.Open(nil, nonce, []byte{}, []byte("aad"))
+ if err == nil {
+ t.Error("must detect invalid/empty ciphertext")
+ }
+
+ // Corrupt ciphertext, must fail
+ corrupt := make([]byte, len(ciphertext))
+ copy(corrupt, ciphertext)
+ corrupt[0] ^= 0xFF
+
+ _, err = aead.Open(nil, nonce, corrupt, []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt ciphertext")
+ }
+
+ // Corrupt authtag, must fail
+ copy(corrupt, ciphertext)
+ corrupt[len(ciphertext)-1] ^= 0xFF
+
+ _, err = aead.Open(nil, nonce, corrupt, []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt authtag")
+ }
+
+ // Truncated data, must fail
+ _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt authtag")
+ }
+}
+
+func TestVectorsAESCBC128(t *testing.T) {
+ // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2
+ plaintext := []byte{
+ 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32,
+ 112, 114, 111, 115, 112, 101, 114, 46}
+
+ aad := []byte{
+ 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69,
+ 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105,
+ 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85,
+ 50, 73, 110, 48}
+
+ expectedCiphertext := []byte{
+ 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6,
+ 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143,
+ 112, 56, 102}
+
+ expectedAuthtag := []byte{
+ 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100,
+ 191}
+
+ key := []byte{
+ 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206,
+ 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207}
+
+ nonce := []byte{
+ 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101}
+
+ enc, err := NewCBCHMAC(key, aes.NewCipher)
+ out := enc.Seal(nil, nonce, plaintext, aad)
+ if err != nil {
+ t.Error("Unable to encrypt:", err)
+ return
+ }
+
+ if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 {
+ t.Error("Ciphertext did not match")
+ }
+ if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 {
+ t.Error("Auth tag did not match")
+ }
+}
+
+func TestVectorsAESCBC256(t *testing.T) {
+ // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4
+ plaintext := []byte{
+ 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20,
+ 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62,
+ 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69,
+ 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f,
+ 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65}
+
+ aad := []byte{
+ 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63,
+ 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20,
+ 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73}
+
+ expectedCiphertext := []byte{
+ 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd,
+ 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd,
+ 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2,
+ 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b,
+ 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1,
+ 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3,
+ 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e,
+ 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b,
+ 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6}
+
+ expectedAuthtag := []byte{
+ 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf,
+ 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5}
+
+ key := []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f}
+
+ nonce := []byte{
+ 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04}
+
+ enc, err := NewCBCHMAC(key, aes.NewCipher)
+ out := enc.Seal(nil, nonce, plaintext, aad)
+ if err != nil {
+ t.Error("Unable to encrypt:", err)
+ return
+ }
+
+ if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 {
+ t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext)
+ }
+ if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 {
+ t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag)
+ }
+}
+
+func TestAESCBCRoundtrip(t *testing.T) {
+ key128 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ key192 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7}
+
+ key256 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ RunRoundtrip(t, key128, nonce)
+ RunRoundtrip(t, key192, nonce)
+ RunRoundtrip(t, key256, nonce)
+}
+
+func RunRoundtrip(t *testing.T, key, nonce []byte) {
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ if aead.NonceSize() != len(nonce) {
+ panic("invalid nonce")
+ }
+
+ // Test pre-existing data in dst buffer
+ dst := []byte{15, 15, 15, 15}
+ plaintext := []byte{0, 0, 0, 0}
+ aad := []byte{4, 3, 2, 1}
+
+ result := aead.Seal(dst, nonce, plaintext, aad)
+ if bytes.Compare(dst, result[:4]) != 0 {
+ t.Error("Existing data in dst not preserved")
+ }
+
+ // Test pre-existing (empty) dst buffer with sufficient capacity
+ dst = make([]byte, 256)[:0]
+ result, err = aead.Open(dst, nonce, result[4:], aad)
+ if err != nil {
+ panic(err)
+ }
+
+ if bytes.Compare(result, plaintext) != 0 {
+ t.Error("Plaintext does not match output")
+ }
+}
+
+func TestAESCBCOverhead(t *testing.T) {
+ aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ if aead.Overhead() != 32 {
+ t.Error("CBC-HMAC reports incorrect overhead value")
+ }
+}
+
+func TestPadding(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ slice := make([]byte, i)
+ padded := padBuffer(slice, 16)
+ if len(padded)%16 != 0 {
+ t.Error("failed to pad slice properly", i)
+ return
+ }
+ unpadded, err := unpadBuffer(padded, 16)
+ if err != nil || len(unpadded) != i {
+ t.Error("failed to unpad slice properly", i)
+ return
+ }
+ }
+}
+
+func TestInvalidKey(t *testing.T) {
+ key := make([]byte, 30)
+ _, err := NewCBCHMAC(key, aes.NewCipher)
+ if err == nil {
+ t.Error("should not be able to instantiate CBC-HMAC with invalid key")
+ }
+}
+
+func TestTruncatedCiphertext(t *testing.T) {
+ key := make([]byte, 32)
+ nonce := make([]byte, 16)
+ data := make([]byte, 32)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ ctx := aead.(*cbcAEAD)
+ ct := aead.Seal(nil, nonce, data, nil)
+
+ // Truncated ciphertext, but with correct auth tag
+ truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], uint64(len(ct))-2)
+ copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes]))
+
+ // Open should fail
+ _, err = aead.Open(nil, nonce, truncated, nil)
+ if err == nil {
+ t.Error("open on truncated ciphertext should fail")
+ }
+}
+
+func TestInvalidPaddingOpen(t *testing.T) {
+ key := make([]byte, 32)
+ nonce := make([]byte, 16)
+
+ // Plaintext with invalid padding
+ plaintext := padBuffer(make([]byte, 28), aes.BlockSize)
+ plaintext[len(plaintext)-1] = 0xFF
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ block, _ := aes.NewCipher(key)
+ cbc := cipher.NewCBCEncrypter(block, nonce)
+ buffer := append([]byte{}, plaintext...)
+ cbc.CryptBlocks(buffer, buffer)
+
+ aead, _ := NewCBCHMAC(key, aes.NewCipher)
+ ctx := aead.(*cbcAEAD)
+
+ // Mutated ciphertext, but with correct auth tag
+ size := uint64(len(buffer))
+ ciphertext, tail := resize(buffer, size+(uint64(len(key))/2))
+ copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size]))
+
+ // Open should fail (b/c of invalid padding, even though tag matches)
+ _, err := aead.Open(nil, nonce, ciphertext, nil)
+ if err == nil || !strings.Contains(err.Error(), "invalid padding") {
+ t.Error("no or unexpected error on open with invalid padding:", err)
+ }
+}
+
+func TestInvalidPadding(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ slice := make([]byte, i)
+ padded := padBuffer(slice, 16)
+ if len(padded)%16 != 0 {
+ t.Error("failed to pad slice properly", i)
+ return
+ }
+
+ paddingBytes := 16 - (i % 16)
+
+ // Mutate padding for testing
+ for j := 1; j <= paddingBytes; j++ {
+ mutated := make([]byte, len(padded))
+ copy(mutated, padded)
+ mutated[len(mutated)-j] ^= 0xFF
+
+ _, err := unpadBuffer(mutated, 16)
+ if err == nil {
+ t.Error("unpad on invalid padding should fail", i)
+ return
+ }
+ }
+
+ // Test truncated padding
+ _, err := unpadBuffer(padded[:len(padded)-1], 16)
+ if err == nil {
+ t.Error("unpad on truncated padding should fail", i)
+ return
+ }
+ }
+}
+
+func TestZeroLengthPadding(t *testing.T) {
+ data := make([]byte, 16)
+ data, err := unpadBuffer(data, 16)
+ if err == nil {
+ t.Error("padding with 0x00 should never be valid")
+ }
+}
+
+func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) {
+ key := make([]byte, keySize*2)
+ nonce := make([]byte, 16)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ chunk := make([]byte, chunkSize)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ b.SetBytes(int64(chunkSize))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ aead.Seal(nil, nonce, chunk, nil)
+ }
+}
+
+func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) {
+ key := make([]byte, keySize*2)
+ nonce := make([]byte, 16)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ chunk := make([]byte, chunkSize)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ out := aead.Seal(nil, nonce, chunk, nil)
+
+ b.SetBytes(int64(chunkSize))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ aead.Open(nil, nonce, out, nil)
+ }
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 1024)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 65536)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 1048576)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 67108864)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 1024)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 65536)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 1048576)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 67108864)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 65536)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 1048576)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 67108864)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 1024)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 65536)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 1048576)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 67108864)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 65536)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 1048576)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 67108864)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 1032)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 65536)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 1048576)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 67108864)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go
new file mode 100644
index 000000000..f62c3bdba
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go
new file mode 100644
index 000000000..48219b3e1
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go
@@ -0,0 +1,150 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "testing"
+)
+
+// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt
+func TestVectorConcatKDF(t *testing.T) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77}
+
+ ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101}
+ ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98}
+
+ supPubInfo := []byte{0, 0, 0, 128}
+ supPrivInfo := []byte{}
+
+ expected := []byte{
+ 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26}
+
+ ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ out0 := make([]byte, 9)
+ out1 := make([]byte, 7)
+
+ read0, err := ckdf.Read(out0)
+ if err != nil {
+ t.Error("error when reading from concat kdf reader", err)
+ return
+ }
+
+ read1, err := ckdf.Read(out1)
+ if err != nil {
+ t.Error("error when reading from concat kdf reader", err)
+ return
+ }
+
+ if read0+read1 != len(out0)+len(out1) {
+ t.Error("did not receive enough bytes from concat kdf reader")
+ return
+ }
+
+ out := []byte{}
+ out = append(out, out0...)
+ out = append(out, out1...)
+
+ if bytes.Compare(out, expected) != 0 {
+ t.Error("did not receive expected output from concat kdf reader")
+ return
+ }
+}
+
+func TestCache(t *testing.T) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}
+
+ ptyUInfo := []byte{1, 2, 3, 4}
+ ptyVInfo := []byte{4, 3, 2, 1}
+
+ supPubInfo := []byte{}
+ supPrivInfo := []byte{}
+
+ outputs := [][]byte{}
+
+ // Read the same amount of data in different chunk sizes
+ chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512}
+
+ for _, c := range chunkSizes {
+ out := make([]byte, 1024)
+ reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ for i := 0; i < 1024; i += c {
+ _, _ = reader.Read(out[i : i+c])
+ }
+
+ outputs = append(outputs, out)
+ }
+
+ for i := range outputs {
+ if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 {
+ t.Error("not all outputs from KDF matched")
+ }
+ }
+}
+
+func benchmarkKDF(b *testing.B, total int) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}
+
+ ptyUInfo := []byte{1, 2, 3, 4}
+ ptyVInfo := []byte{4, 3, 2, 1}
+
+ supPubInfo := []byte{}
+ supPrivInfo := []byte{}
+
+ out := make([]byte, total)
+ reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ b.ResetTimer()
+ b.SetBytes(int64(total))
+ for i := 0; i < b.N; i++ {
+ _, _ = reader.Read(out)
+ }
+}
+
+func BenchmarkConcatKDF_1k(b *testing.B) {
+ benchmarkKDF(b, 1024)
+}
+
+func BenchmarkConcatKDF_64k(b *testing.B) {
+ benchmarkKDF(b, 65536)
+}
+
+func BenchmarkConcatKDF_1MB(b *testing.B) {
+ benchmarkKDF(b, 1048576)
+}
+
+func BenchmarkConcatKDF_64MB(b *testing.B) {
+ benchmarkKDF(b, 67108864)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go
new file mode 100644
index 000000000..f23d49e1f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go
@@ -0,0 +1,62 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+ return key
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go
new file mode 100644
index 000000000..ca2c508dd
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go
@@ -0,0 +1,115 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/base64"
+ "math/big"
+ "testing"
+)
+
+// Example keys from JWA, Appendix C
+var aliceKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="),
+ Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="),
+ },
+ D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="),
+}
+
+var bobKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="),
+ Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="),
+ },
+ D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="),
+}
+
+// Build big int from base64-encoded string. Strips whitespace (for testing).
+func fromBase64Int(data string) *big.Int {
+ val, err := base64.URLEncoding.DecodeString(data)
+ if err != nil {
+ panic("Invalid test data")
+ }
+ return new(big.Int).SetBytes(val)
+}
+
+func TestVectorECDHES(t *testing.T) {
+ apuData := []byte("Alice")
+ apvData := []byte("Bob")
+
+ expected := []byte{
+ 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26}
+
+ output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16)
+
+ if bytes.Compare(output, expected) != 0 {
+ t.Error("output did not match what we expect, got", output, "wanted", expected)
+ }
+}
+
+func TestInvalidECPublicKey(t *testing.T) {
+ defer func() { recover() }()
+
+ // Invalid key
+ invalid := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("MTEx"),
+ Y: fromBase64Int("MTEx"),
+ },
+ D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="),
+ }
+
+ DeriveECDHES("A128GCM", []byte{}, []byte{}, bobKey, &invalid.PublicKey, 16)
+ t.Fatal("should panic if public key was invalid")
+}
+
+func BenchmarkECDHES_128(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16)
+ }
+}
+
+func BenchmarkECDHES_192(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24)
+ }
+}
+
+func BenchmarkECDHES_256(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go
new file mode 100644
index 000000000..1d36d5015
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("square/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go
new file mode 100644
index 000000000..ceecf812b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go
@@ -0,0 +1,133 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/aes"
+ "encoding/hex"
+ "testing"
+)
+
+func TestAesKeyWrap(t *testing.T) {
+ // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf
+ kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF")
+
+ expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5")
+
+ kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617")
+ cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF")
+
+ expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D")
+
+ kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F")
+ cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607")
+
+ expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1")
+
+ block0, _ := aes.NewCipher(kek0)
+ block1, _ := aes.NewCipher(kek1)
+ block2, _ := aes.NewCipher(kek2)
+
+ out0, _ := KeyWrap(block0, cek0)
+ out1, _ := KeyWrap(block1, cek1)
+ out2, _ := KeyWrap(block2, cek2)
+
+ if bytes.Compare(out0, expected0) != 0 {
+ t.Error("output 0 not as expected, got", out0, "wanted", expected0)
+ }
+
+ if bytes.Compare(out1, expected1) != 0 {
+ t.Error("output 1 not as expected, got", out1, "wanted", expected1)
+ }
+
+ if bytes.Compare(out2, expected2) != 0 {
+ t.Error("output 2 not as expected, got", out2, "wanted", expected2)
+ }
+
+ unwrap0, _ := KeyUnwrap(block0, out0)
+ unwrap1, _ := KeyUnwrap(block1, out1)
+ unwrap2, _ := KeyUnwrap(block2, out2)
+
+ if bytes.Compare(unwrap0, cek0) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0)
+ }
+
+ if bytes.Compare(unwrap1, cek1) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1)
+ }
+
+ if bytes.Compare(unwrap2, cek2) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2)
+ }
+}
+
+func TestAesKeyWrapInvalid(t *testing.T) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+
+ // Invalid unwrap input (bit flipped)
+ input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5")
+
+ block, _ := aes.NewCipher(kek)
+
+ _, err := KeyUnwrap(block, input0)
+ if err == nil {
+ t.Error("key unwrap failed to detect invalid input")
+ }
+
+ // Invalid unwrap input (truncated)
+ input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF")
+
+ _, err = KeyUnwrap(block, input1)
+ if err == nil {
+ t.Error("key unwrap failed to detect truncated input")
+ }
+
+ // Invalid wrap input (not multiple of 8)
+ input2, _ := hex.DecodeString("0123456789ABCD")
+
+ _, err = KeyWrap(block, input2)
+ if err == nil {
+ t.Error("key wrap accepted invalid input")
+ }
+
+}
+
+func BenchmarkAesKeyWrap(b *testing.B) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
+
+ block, _ := aes.NewCipher(kek)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ KeyWrap(block, key)
+ }
+}
+
+func BenchmarkAesKeyUnwrap(b *testing.B) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5")
+
+ block, _ := aes.NewCipher(kek)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ KeyUnwrap(block, input)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/gopkg.in/square/go-jose.v1/crypter.go
new file mode 100644
index 000000000..b3bdaec80
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/crypter.go
@@ -0,0 +1,416 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JsonWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
+ SetCompression(alg CompressionAlgorithm)
+}
+
+// MultiEncrypter represents an encrypter which supports multiple recipients.
+type MultiEncrypter interface {
+ Encrypt(plaintext []byte) (*JsonWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
+ SetCompression(alg CompressionAlgorithm)
+ AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// SetCompression sets a compression algorithm to be applied before encryption.
+func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) {
+ ctx.compressionAlg = compressionAlg
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ compressionAlg: NONE,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := encryptionKey.(type) {
+ case *JsonWebKey:
+ keyID = encryptionKey.KeyID
+ rawKey = encryptionKey.Key
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch alg {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: rawKey.([]byte),
+ }
+ recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte))
+ if keyID != "" {
+ recipient.keyID = keyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipient}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ typeOf := reflect.TypeOf(rawKey)
+ if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: rawKey.(*ecdsa.PublicKey),
+ }
+ recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey))
+ if keyID != "" {
+ recipient.keyID = keyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipient}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.AddRecipient(alg, encryptionKey)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ compressionAlg: NONE,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) {
+ var recipient recipientKeyInfo
+
+ switch alg {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg)
+ }
+
+ recipient, err = makeJWERecipient(alg, encryptionKey)
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipient)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case *JsonWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ if err == nil && encryptionKey.KeyID != "" {
+ recipient.keyID = encryptionKey.KeyID
+ }
+ return recipient, err
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+ }
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case *JsonWebKey:
+ return newDecrypter(decryptionKey.Key)
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) {
+ obj := &JsonWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{
+ Enc: ctx.contentAlg,
+ }
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ recipient.header.Alg = string(info.keyAlg)
+ if info.keyID != "" {
+ recipient.header.Kid = info.keyID
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.Zip = ctx.compressionAlg
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+// Decrypt and validate the object and return the plaintext. Note that this
+// function does not support multi-recipient, if you desire multi-recipient
+// decryption use DecryptMulti instead.
+func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ if len(headers.Crit) > 0 {
+ return nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.Enc)
+ if cipher == nil {
+ return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if obj.protected.Zip != "" {
+ plaintext, err = decompress(obj.protected.Zip, plaintext)
+ }
+
+ return plaintext, err
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+func (obj JsonWebEncryption) DecryptMulti(decryptionKey interface{}) (int, JoseHeader, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ if len(globalHeaders.Crit) > 0 {
+ return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return -1, JoseHeader{}, nil, err
+ }
+
+ cipher := getContentCipher(globalHeaders.Enc)
+ if cipher == nil {
+ return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(globalHeaders.Enc))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil || err != nil {
+ return -1, JoseHeader{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if obj.protected.Zip != "" {
+ plaintext, err = decompress(obj.protected.Zip, plaintext)
+ }
+
+ return index, headers.sanitized(), plaintext, err
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/gopkg.in/square/go-jose.v1/crypter_test.go
new file mode 100644
index 000000000..431f65378
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/crypter_test.go
@@ -0,0 +1,785 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+ "io"
+ "testing"
+)
+
+// We generate only a single RSA and EC key for testing, speeds up tests.
+var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048)
+
+var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+
+func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error {
+ enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey)
+ if err != nil {
+ return fmt.Errorf("error on new encrypter: %s", err)
+ }
+
+ enc.SetCompression(compressionAlg)
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := enc.EncryptWithAuthData(input, aad)
+ if err != nil {
+ return fmt.Errorf("error in encrypt: %s", err)
+ }
+
+ msg, err := serializer(obj)
+ if err != nil {
+ return fmt.Errorf("error in serializer: %s", err)
+ }
+
+ parsed, err := ParseEncrypted(msg)
+ if err != nil {
+ return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg)
+ }
+
+ // (Maybe) mangle object
+ skip := corrupter(parsed)
+ if skip {
+ return fmt.Errorf("corrupter indicated message should be skipped")
+ }
+
+ if bytes.Compare(parsed.GetAuthData(), aad) != 0 {
+ return fmt.Errorf("auth data in parsed object does not match")
+ }
+
+ output, err := parsed.Decrypt(decryptionKey)
+ if err != nil {
+ return fmt.Errorf("error on decrypt: %s", err)
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input)
+ }
+
+ return nil
+}
+
+func TestRoundtripsJWE(t *testing.T) {
+ // Test matrix
+ keyAlgs := []KeyAlgorithm{
+ DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW,
+ RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+ zipAlgs := []CompressionAlgorithm{NONE, DEFLATE}
+
+ serializers := []func(*JsonWebEncryption) (string, error){
+ func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ // Note: can't use AAD with compact serialization
+ aads := [][]byte{
+ nil,
+ []byte("Ut enim ad minim veniam"),
+ }
+
+ // Test all different configurations
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for _, zip := range zipAlgs {
+ for i, serializer := range serializers {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec)
+ if err != nil {
+ t.Error(err, alg, enc, zip, i)
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestRoundtripsJWECorrupted(t *testing.T) {
+ // Test matrix
+ keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+ zipAlgs := []CompressionAlgorithm{NONE, DEFLATE}
+
+ serializers := []func(*JsonWebEncryption) (string, error){
+ func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ bitflip := func(slice []byte) bool {
+ if len(slice) > 0 {
+ slice[0] ^= 0xFF
+ return false
+ }
+ return true
+ }
+
+ corrupters := []func(*JsonWebEncryption) bool{
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid ciphertext
+ return bitflip(obj.ciphertext)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid auth tag
+ return bitflip(obj.tag)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid AAD
+ return bitflip(obj.aad)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Mess with encrypted key
+ return bitflip(obj.recipients[0].encryptedKey)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Mess with GCM-KW auth tag
+ return bitflip(obj.protected.Tag.bytes())
+ },
+ }
+
+ // Note: can't use AAD with compact serialization
+ aads := [][]byte{
+ nil,
+ []byte("Ut enim ad minim veniam"),
+ }
+
+ // Test all different configurations
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for _, zip := range zipAlgs {
+ for i, serializer := range serializers {
+ for j, corrupter := range corrupters {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec)
+ if err == nil {
+ t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestEncrypterWithJWKAndKeyID(t *testing.T) {
+ enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{
+ KeyID: "test-id",
+ Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ })
+ if err != nil {
+ t.Error(err)
+ }
+
+ ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet"))
+
+ serialized1, _ := ciphertext.CompactSerialize()
+ serialized2 := ciphertext.FullSerialize()
+
+ parsed1, _ := ParseEncrypted(serialized1)
+ parsed2, _ := ParseEncrypted(serialized2)
+
+ if parsed1.Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID)
+ }
+ if parsed2.Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID)
+ }
+}
+
+func TestEncrypterWithBrokenRand(t *testing.T) {
+ keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+
+ serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ // Break rand reader
+ readers := []func() io.Reader{
+ // Totally broken
+ func() io.Reader { return bytes.NewReader([]byte{}) },
+ // Not enough bytes
+ func() io.Reader { return io.LimitReader(rand.Reader, 20) },
+ }
+
+ defer resetRandReader()
+
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for i, getReader := range readers {
+ randReader = getReader()
+ err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec)
+ if err == nil {
+ t.Error("encrypter should fail if rand is broken", i)
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestNewEncrypterErrors(t *testing.T) {
+ _, err := NewEncrypter("XYZ", "XYZ", nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid cipher")
+ }
+
+ _, err = NewMultiEncrypter("XYZ")
+ if err == nil {
+ t.Error("was able to instantiate multi-encrypter with invalid cipher")
+ }
+
+ _, err = NewEncrypter(DIRECT, A128GCM, nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid direct key")
+ }
+
+ _, err = NewEncrypter(ECDH_ES, A128GCM, nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid EC key")
+ }
+}
+
+func TestMultiRecipientJWE(t *testing.T) {
+ enc, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey)
+ if err != nil {
+ t.Fatal("error when adding RSA recipient", err)
+ }
+
+ sharedKey := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }
+
+ err = enc.AddRecipient(A256GCMKW, sharedKey)
+ if err != nil {
+ t.Fatal("error when adding AES recipient: ", err)
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := enc.Encrypt(input)
+ if err != nil {
+ t.Fatal("error in encrypt: ", err)
+ }
+
+ msg := obj.FullSerialize()
+
+ parsed, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Fatal("error in parse: ", err)
+ }
+
+ i, _, output, err := parsed.DecryptMulti(rsaTestKey)
+ if err != nil {
+ t.Fatal("error on decrypt with RSA: ", err)
+ }
+
+ if i != 0 {
+ t.Fatal("recipient index should be 0 for RSA key")
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ t.Fatal("Decrypted output does not match input: ", output, input)
+ }
+
+ i, _, output, err = parsed.DecryptMulti(sharedKey)
+ if err != nil {
+ t.Fatal("error on decrypt with AES: ", err)
+ }
+
+ if i != 1 {
+ t.Fatal("recipient index should be 1 for shared key")
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ t.Fatal("Decrypted output does not match input", output, input)
+ }
+}
+
+func TestMultiRecipientErrors(t *testing.T) {
+ enc, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ _, err = enc.Encrypt(input)
+ if err == nil {
+ t.Error("should fail when encrypting to zero recipients")
+ }
+
+ err = enc.AddRecipient(DIRECT, nil)
+ if err == nil {
+ t.Error("should reject DIRECT mode when encrypting to multiple recipients")
+ }
+
+ err = enc.AddRecipient(ECDH_ES, nil)
+ if err == nil {
+ t.Error("should reject ECDH_ES mode when encrypting to multiple recipients")
+ }
+
+ err = enc.AddRecipient(RSA1_5, nil)
+ if err == nil {
+ t.Error("should reject invalid recipient key")
+ }
+}
+
+type testKey struct {
+ enc, dec interface{}
+}
+
+func symmetricTestKey(size int) []testKey {
+ key, _, _ := randomKeyGenerator{size: size}.genKey()
+
+ return []testKey{
+ testKey{
+ enc: key,
+ dec: key,
+ },
+ testKey{
+ enc: &JsonWebKey{KeyID: "test", Key: key},
+ dec: &JsonWebKey{KeyID: "test", Key: key},
+ },
+ }
+}
+
+func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey {
+ switch keyAlg {
+ case DIRECT:
+ return symmetricTestKey(getContentCipher(encAlg).keySize())
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ return []testKey{
+ testKey{
+ dec: ecTestKey256,
+ enc: &ecTestKey256.PublicKey,
+ },
+ testKey{
+ dec: ecTestKey384,
+ enc: &ecTestKey384.PublicKey,
+ },
+ testKey{
+ dec: ecTestKey521,
+ enc: &ecTestKey521.PublicKey,
+ },
+ testKey{
+ dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256},
+ enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey},
+ },
+ }
+ case A128GCMKW, A128KW:
+ return symmetricTestKey(16)
+ case A192GCMKW, A192KW:
+ return symmetricTestKey(24)
+ case A256GCMKW, A256KW:
+ return symmetricTestKey(32)
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ return []testKey{testKey{
+ dec: rsaTestKey,
+ enc: &rsaTestKey.PublicKey,
+ }}
+ }
+
+ panic("Must update test case")
+}
+
+func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) {
+ serializer := func(obj *JsonWebEncryption) (string, error) {
+ return obj.CompactSerialize()
+ }
+
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+var (
+ chunks = map[string][]byte{
+ "1B": make([]byte, 1),
+ "64B": make([]byte, 64),
+ "1KB": make([]byte, 1024),
+ "64KB": make([]byte, 65536),
+ "1MB": make([]byte, 1048576),
+ "64MB": make([]byte, 67108864),
+ }
+
+ symKey, _, _ = randomKeyGenerator{size: 32}.genKey()
+
+ encrypters = map[string]Encrypter{
+ "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey),
+ "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey),
+ "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey),
+ "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey),
+ "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey),
+ "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey),
+ "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey),
+ "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey),
+ "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey),
+ "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey),
+ "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey),
+ "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey),
+ "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey),
+ }
+)
+
+func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) }
+func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) }
+
+func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) }
+func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) }
+
+func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) }
+func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) }
+
+func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) }
+func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) }
+
+func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) }
+func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) }
+func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) }
+func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) }
+func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) }
+func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) }
+
+func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) }
+func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) }
+func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) }
+func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) }
+func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) }
+func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) }
+
+func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) }
+func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) }
+func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) }
+func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) }
+func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) }
+func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) }
+
+func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) }
+func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) }
+func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) }
+func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) }
+func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) }
+func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) }
+
+func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) }
+
+func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) }
+
+func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP256AndGCM128", b)
+}
+
+func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP384AndGCM128", b)
+}
+
+func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP521AndGCM128", b)
+}
+
+func benchEncrypt(chunkKey, primKey string, b *testing.B) {
+ data, ok := chunks[chunkKey]
+ if !ok {
+ b.Fatalf("unknown chunk size %s", chunkKey)
+ }
+
+ enc, ok := encrypters[primKey]
+ if !ok {
+ b.Fatalf("unknown encrypter %s", primKey)
+ }
+
+ b.SetBytes(int64(len(data)))
+ for i := 0; i < b.N; i++ {
+ enc.Encrypt(data)
+ }
+}
+
+var (
+ decryptionKeys = map[string]interface{}{
+ "OAEPAndGCM": rsaTestKey,
+ "PKCSAndGCM": rsaTestKey,
+ "OAEPAndCBC": rsaTestKey,
+ "PKCSAndCBC": rsaTestKey,
+
+ "DirectGCM128": symKey,
+ "DirectCBC128": symKey,
+ "DirectGCM256": symKey,
+ "DirectCBC256": symKey,
+
+ "AESKWAndGCM128": symKey,
+ "AESKWAndCBC256": symKey,
+
+ "ECDHOnP256AndGCM128": ecTestKey256,
+ "ECDHOnP384AndGCM128": ecTestKey384,
+ "ECDHOnP521AndGCM128": ecTestKey521,
+ }
+)
+
+func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) }
+func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) }
+
+func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) }
+func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) }
+
+func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) }
+func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) }
+
+func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) }
+func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) }
+
+func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) }
+func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) }
+func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) }
+func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) }
+func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) }
+func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) }
+
+func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) }
+func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) }
+func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) }
+func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) }
+func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) }
+func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) }
+
+func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) }
+func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) }
+func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) }
+func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) }
+func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) }
+func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) }
+
+func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) }
+func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) }
+func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) }
+func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) }
+func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) }
+func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) }
+
+func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) }
+
+func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) }
+
+func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP256AndGCM128", b)
+}
+
+func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP384AndGCM128", b)
+}
+
+func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP521AndGCM128", b)
+}
+
+func benchDecrypt(chunkKey, primKey string, b *testing.B) {
+ chunk, ok := chunks[chunkKey]
+ if !ok {
+ b.Fatalf("unknown chunk size %s", chunkKey)
+ }
+
+ enc, ok := encrypters[primKey]
+ if !ok {
+ b.Fatalf("unknown encrypter %s", primKey)
+ }
+
+ dec, ok := decryptionKeys[primKey]
+ if !ok {
+ b.Fatalf("unknown decryption key %s", primKey)
+ }
+
+ data, err := enc.Encrypt(chunk)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.SetBytes(int64(len(chunk)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ data.Decrypt(dec)
+ }
+}
+
+func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter {
+ enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey)
+ if err != nil {
+ panic(err)
+ }
+ return enc
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/gopkg.in/square/go-jose.v1/doc.go
new file mode 100644
index 000000000..b4cd1e989
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/doc.go
@@ -0,0 +1,26 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. For the moment, it mainly focuses on
+encryption and signing based on the JSON Web Encryption and JSON Web Signature
+standards. The library supports both the compact and full serialization
+formats, and has optional support for multiple recipients.
+
+*/
+package jose // import "gopkg.in/square/go-jose.v1"
diff --git a/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/gopkg.in/square/go-jose.v1/doc_test.go
new file mode 100644
index 000000000..50468295d
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/doc_test.go
@@ -0,0 +1,226 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+)
+
+// Dummy encrypter for use in examples
+var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{})
+
+func Example_jWE() {
+ // Generate a public/private key pair to use for this example. The library
+ // also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+ // that can be used to load keys from PEM/DER-encoded data.
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would
+ // indicate that the selected algorithm(s) are not currently supported.
+ publicKey := &privateKey.PublicKey
+ encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Encrypt a sample plaintext. Calling the encrypter returns an encrypted
+ // JWE object, which can then be serialized for output afterwards. An error
+ // would indicate a problem in an underlying cryptographic primitive.
+ var plaintext = []byte("Lorem ipsum dolor sit amet")
+ object, err := encrypter.Encrypt(plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ // Serialize the encrypted object using the full serialization format.
+ // Alternatively you can also use the compact format here by calling
+ // object.CompactSerialize() instead.
+ serialized := object.FullSerialize()
+
+ // Parse the serialized, encrypted JWE object. An error would indicate that
+ // the given input did not represent a valid message.
+ object, err = ParseEncrypted(serialized)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now we can decrypt and get back our original plaintext. An error here
+ // would indicate the the message failed to decrypt, e.g. because the auth
+ // tag was broken or the message was tampered with.
+ decrypted, err := object.Decrypt(privateKey)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf(string(decrypted))
+ // output: Lorem ipsum dolor sit amet
+}
+
+func Example_jWS() {
+ // Generate a public/private key pair to use for this example. The library
+ // also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+ // that can be used to load keys from PEM/DER-encoded data.
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key.
+ signer, err := NewSigner(PS512, privateKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Sign a sample payload. Calling the signer returns a protected JWS object,
+ // which can then be serialized for output afterwards. An error would
+ // indicate a problem in an underlying cryptographic primitive.
+ var payload = []byte("Lorem ipsum dolor sit amet")
+ object, err := signer.Sign(payload)
+ if err != nil {
+ panic(err)
+ }
+
+ // Serialize the encrypted object using the full serialization format.
+ // Alternatively you can also use the compact format here by calling
+ // object.CompactSerialize() instead.
+ serialized := object.FullSerialize()
+
+ // Parse the serialized, protected JWS object. An error would indicate that
+ // the given input did not represent a valid message.
+ object, err = ParseSigned(serialized)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now we can verify the signature on the payload. An error here would
+ // indicate the the message failed to verify, e.g. because the signature was
+ // broken or the message was tampered with.
+ output, err := object.Verify(&privateKey.PublicKey)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf(string(output))
+ // output: Lorem ipsum dolor sit amet
+}
+
+func ExampleNewEncrypter_publicKey() {
+ var publicKey *rsa.PublicKey
+
+ // Instantiate an encrypter using RSA-OAEP with AES128-GCM.
+ NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+
+ // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC.
+ NewEncrypter(RSA1_5, A128CBC_HS256, publicKey)
+}
+
+func ExampleNewEncrypter_symmetric() {
+ var sharedKey []byte
+
+ // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap.
+ NewEncrypter(A128GCMKW, A128GCM, sharedKey)
+
+ // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping.
+ NewEncrypter(DIRECT, A256GCM, sharedKey)
+}
+
+func ExampleNewSigner_publicKey() {
+ var rsaPrivateKey *rsa.PrivateKey
+ var ecdsaPrivateKey *ecdsa.PrivateKey
+
+ // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256.
+ NewSigner(RS256, rsaPrivateKey)
+
+ // Instantiate a signer using ECDSA with SHA-384.
+ NewSigner(ES384, ecdsaPrivateKey)
+}
+
+func ExampleNewSigner_symmetric() {
+ var sharedKey []byte
+
+ // Instantiate an signer using HMAC-SHA256.
+ NewSigner(HS256, sharedKey)
+
+ // Instantiate an signer using HMAC-SHA512.
+ NewSigner(HS512, sharedKey)
+}
+
+func ExampleNewMultiEncrypter() {
+ var publicKey *rsa.PublicKey
+ var sharedKey []byte
+
+ // Instantiate an encrypter using AES-GCM.
+ encrypter, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using a shared key with AES-GCM key wap
+ err = encrypter.AddRecipient(A128GCMKW, sharedKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using an RSA public key with RSA-OAEP
+ err = encrypter.AddRecipient(RSA_OAEP, publicKey)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleNewMultiSigner() {
+ var privateKey *rsa.PrivateKey
+ var sharedKey []byte
+
+ // Instantiate a signer for multiple recipients.
+ signer := NewMultiSigner()
+
+ // Add a recipient using a shared key with HMAC-SHA256
+ err := signer.AddRecipient(HS256, sharedKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using an RSA private key with RSASSA-PSS with SHA384
+ err = signer.AddRecipient(PS384, privateKey)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleEncrypter_encrypt() {
+ // Encrypt a plaintext in order to get an encrypted JWE object.
+ var plaintext = []byte("This is a secret message")
+
+ encrypter.Encrypt(plaintext)
+}
+
+func ExampleEncrypter_encryptWithAuthData() {
+ // Encrypt a plaintext in order to get an encrypted JWE object. Also attach
+ // some additional authenticated data (AAD) to the object. Note that objects
+ // with attached AAD can only be represented using full serialization.
+ var plaintext = []byte("This is a secret message")
+ var aad = []byte("This is authenticated, but public data")
+
+ encrypter.EncryptWithAuthData(plaintext, aad)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/gopkg.in/square/go-jose.v1/encoding.go
new file mode 100644
index 000000000..dde0a42db
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/encoding.go
@@ -0,0 +1,193 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "regexp"
+ "strings"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+var stripWhitespaceRegex = regexp.MustCompile("\\s")
+
+// Url-safe base64 encode that strips padding
+func base64URLEncode(data []byte) string {
+ var result = base64.URLEncoding.EncodeToString(data)
+ return strings.TrimRight(result, "=")
+}
+
+// Url-safe base64 decoder that adds padding
+func base64URLDecode(data string) ([]byte, error) {
+ var missing = (4 - len(data)%4) % 4
+ data += strings.Repeat("=", missing)
+ return base64.URLEncoding.DecodeString(data)
+}
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/square/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ return stripWhitespaceRegex.ReplaceAllString(data, "")
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Compress with DEFLATE
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// Decompress with DEFLATE
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ _, err := io.Copy(output, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64URLDecode(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64URLEncode(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/gopkg.in/square/go-jose.v1/encoding_test.go
new file mode 100644
index 000000000..e2f8d979c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/encoding_test.go
@@ -0,0 +1,173 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestBase64URLEncode(t *testing.T) {
+ // Test arrays with various sizes
+ if base64URLEncode([]byte{}) != "" {
+ t.Error("failed to encode empty array")
+ }
+
+ if base64URLEncode([]byte{0}) != "AA" {
+ t.Error("failed to encode [0x00]")
+ }
+
+ if base64URLEncode([]byte{0, 1}) != "AAE" {
+ t.Error("failed to encode [0x00, 0x01]")
+ }
+
+ if base64URLEncode([]byte{0, 1, 2}) != "AAEC" {
+ t.Error("failed to encode [0x00, 0x01, 0x02]")
+ }
+
+ if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" {
+ t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]")
+ }
+}
+
+func TestBase64URLDecode(t *testing.T) {
+ // Test arrays with various sizes
+ val, err := base64URLDecode("")
+ if err != nil || !bytes.Equal(val, []byte{}) {
+ t.Error("failed to decode empty array")
+ }
+
+ val, err = base64URLDecode("AA")
+ if err != nil || !bytes.Equal(val, []byte{0}) {
+ t.Error("failed to decode [0x00]")
+ }
+
+ val, err = base64URLDecode("AAE")
+ if err != nil || !bytes.Equal(val, []byte{0, 1}) {
+ t.Error("failed to decode [0x00, 0x01]")
+ }
+
+ val, err = base64URLDecode("AAEC")
+ if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) {
+ t.Error("failed to decode [0x00, 0x01, 0x02]")
+ }
+
+ val, err = base64URLDecode("AAECAw")
+ if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) {
+ t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]")
+ }
+}
+
+func TestDeflateRoundtrip(t *testing.T) {
+ original := []byte("Lorem ipsum dolor sit amet")
+
+ compressed, err := deflate(original)
+ if err != nil {
+ panic(err)
+ }
+
+ output, err := inflate(compressed)
+ if err != nil {
+ panic(err)
+ }
+
+ if bytes.Compare(output, original) != 0 {
+ t.Error("Input and output do not match")
+ }
+}
+
+func TestInvalidCompression(t *testing.T) {
+ _, err := compress("XYZ", []byte{})
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ _, err = decompress("XYZ", []byte{})
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ _, err = decompress(DEFLATE, []byte{1, 2, 3, 4})
+ if err == nil {
+ t.Error("should not accept invalid data")
+ }
+}
+
+func TestByteBufferTrim(t *testing.T) {
+ buf := newBufferFromInt(1)
+ if !bytes.Equal(buf.data, []byte{1}) {
+ t.Error("Byte buffer for integer '1' should contain [0x01]")
+ }
+
+ buf = newBufferFromInt(65537)
+ if !bytes.Equal(buf.data, []byte{1, 0, 1}) {
+ t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]")
+ }
+}
+
+func TestFixedSizeBuffer(t *testing.T) {
+ data0 := []byte{}
+ data1 := []byte{1}
+ data2 := []byte{1, 2}
+ data3 := []byte{1, 2, 3}
+ data4 := []byte{1, 2, 3, 4}
+
+ buf0 := newFixedSizeBuffer(data0, 4)
+ buf1 := newFixedSizeBuffer(data1, 4)
+ buf2 := newFixedSizeBuffer(data2, 4)
+ buf3 := newFixedSizeBuffer(data3, 4)
+ buf4 := newFixedSizeBuffer(data4, 4)
+
+ if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) {
+ t.Error("Invalid padded buffer for buf0")
+ }
+ if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) {
+ t.Error("Invalid padded buffer for buf1")
+ }
+ if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) {
+ t.Error("Invalid padded buffer for buf2")
+ }
+ if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) {
+ t.Error("Invalid padded buffer for buf3")
+ }
+ if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) {
+ t.Error("Invalid padded buffer for buf4")
+ }
+}
+
+func TestSerializeJSONRejectsNil(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil || !strings.Contains(r.(string), "nil pointer") {
+ t.Error("serialize function should not accept nil pointer")
+ }
+ }()
+
+ mustSerializeJSON(nil)
+}
+
+func TestFixedSizeBufferTooLarge(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ t.Error("should not be able to create fixed size buffer with oversized data")
+ }
+ }()
+
+ newFixedSizeBuffer(make([]byte, 2), 1)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md b/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md
new file mode 100644
index 000000000..6cfe6a718
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md
@@ -0,0 +1,59 @@
+# JOSE CLI
+
+The `jose-util` command line utility allows for encryption, decryption, signing
+and verification of JOSE messages. Its main purpose is to facilitate dealing
+with JOSE messages when testing or debugging.
+
+## Usage
+
+The utility includes the subcommands `encrypt`, `decrypt`, `sign`, `verify` and
+`expand`. Examples for each command can be found below.
+
+Algorithms are selected via the `--alg` and `--enc` flags, which influence the
+`alg` and `enc` headers in respectively. For JWE, `--alg` specifies the key
+managment algorithm (e.g. `RSA-OAEP`) and `--enc` specifies the content
+encryption algorithm (e.g. `A128GCM`). For JWS, `--alg` specifies the
+signature algorithm (e.g. `PS256`).
+
+Input and output files can be specified via the `--in` and `--out` flags.
+Either flag can be omitted, in which case `jose-util` uses stdin/stdout for
+input/output respectively. By default each command will output a compact
+message, but it's possible to get the full serialization by supplying the
+`--full` flag.
+
+Keys are specified via the `--key` flag. Supported key types are naked RSA/EC
+keys and X.509 certificates with embedded RSA/EC keys. Keys must be in PEM
+or DER formats.
+
+## Examples
+
+### Encrypt
+
+Takes a plaintext as input, encrypts, and prints the encrypted message.
+
+ jose-util encrypt -k public-key.pem --alg RSA-OAEP --enc A128GCM
+
+### Decrypt
+
+Takes an encrypted message (JWE) as input, decrypts, and prints the plaintext.
+
+ jose-util decrypt -k private-key.pem
+
+### Sign
+
+Takes a payload as input, signs it, and prints the signed message with the embedded payload.
+
+ jose-util sign -k private-key.pem --alg PS256
+
+### Verify
+
+Reads a signed message (JWS), verifies it, and extracts the payload.
+
+ jose-util verify -k public-key.pem
+
+### Expand
+
+Expands a compact message to the full serialization format.
+
+ jose-util expand --format JWE # Expands a compact JWE to full format
+ jose-util expand --format JWS # Expands a compact JWS to full format
diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t b/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t
new file mode 100644
index 000000000..c0d747bb0
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t
@@ -0,0 +1,94 @@
+Set up test keys.
+
+ $ cat > rsa.pub <<EOF
+ > -----BEGIN PUBLIC KEY-----
+ > MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAslWybuiNYR7uOgKuvaBw
+ > qVk8saEutKhOAaW+3hWF65gJei+ZV8QFfYDxs9ZaRZlWAUMtncQPnw7ZQlXO9ogN
+ > 5cMcN50C6qMOOZzghK7danalhF5lUETC4Hk3Eisbi/PR3IfVyXaRmqL6X66MKj/J
+ > AKyD9NFIDVy52K8A198Jojnrw2+XXQW72U68fZtvlyl/BTBWQ9Re5JSTpEcVmpCR
+ > 8FrFc0RPMBm+G5dRs08vvhZNiTT2JACO5V+J5ZrgP3s5hnGFcQFZgDnXLInDUdoi
+ > 1MuCjaAU0ta8/08pHMijNix5kFofdPEB954MiZ9k4kQ5/utt02I9x2ssHqw71ojj
+ > vwIDAQAB
+ > -----END PUBLIC KEY-----
+ > EOF
+
+ $ cat > rsa.key <<EOF
+ > -----BEGIN RSA PRIVATE KEY-----
+ > MIIEogIBAAKCAQEAslWybuiNYR7uOgKuvaBwqVk8saEutKhOAaW+3hWF65gJei+Z
+ > V8QFfYDxs9ZaRZlWAUMtncQPnw7ZQlXO9ogN5cMcN50C6qMOOZzghK7danalhF5l
+ > UETC4Hk3Eisbi/PR3IfVyXaRmqL6X66MKj/JAKyD9NFIDVy52K8A198Jojnrw2+X
+ > XQW72U68fZtvlyl/BTBWQ9Re5JSTpEcVmpCR8FrFc0RPMBm+G5dRs08vvhZNiTT2
+ > JACO5V+J5ZrgP3s5hnGFcQFZgDnXLInDUdoi1MuCjaAU0ta8/08pHMijNix5kFof
+ > dPEB954MiZ9k4kQ5/utt02I9x2ssHqw71ojjvwIDAQABAoIBABrYDYDmXom1BzUS
+ > PE1s/ihvt1QhqA8nmn5i/aUeZkc9XofW7GUqq4zlwPxKEtKRL0IHY7Fw1s0hhhCX
+ > LA0uE7F3OiMg7lR1cOm5NI6kZ83jyCxxrRx1DUSO2nxQotfhPsDMbaDiyS4WxEts
+ > 0cp2SYJhdYd/jTH9uDfmt+DGwQN7Jixio1Dj3vwB7krDY+mdre4SFY7Gbk9VxkDg
+ > LgCLMoq52m+wYufP8CTgpKFpMb2/yJrbLhuJxYZrJ3qd/oYo/91k6v7xlBKEOkwD
+ > 2veGk9Dqi8YPNxaRktTEjnZb6ybhezat93+VVxq4Oem3wMwou1SfXrSUKtgM/p2H
+ > vfw/76ECgYEA2fNL9tC8u9M0wjA+kvvtDG96qO6O66Hksssy6RWInD+Iqk3MtHQt
+ > LeoCjvX+zERqwOb6SI6empk5pZ9E3/9vJ0dBqkxx3nqn4M/nRWnExGgngJsL959t
+ > f50cdxva8y1RjNhT4kCwTrupX/TP8lAG8SfG1Alo2VFR8iWd8hDQcTECgYEA0Xfj
+ > EgqAsVh4U0s3lFxKjOepEyp0G1Imty5J16SvcOEAD1Mrmz94aSSp0bYhXNVdbf7n
+ > Rk77htWC7SE29fGjOzZRS76wxj/SJHF+rktHB2Zt23k1jBeZ4uLMPMnGLY/BJ099
+ > 5DTGo0yU0rrPbyXosx+ukfQLAHFuggX4RNeM5+8CgYB7M1J/hGMLcUpjcs4MXCgV
+ > XXbiw2c6v1r9zmtK4odEe42PZ0cNwpY/XAZyNZAAe7Q0stxL44K4NWEmxC80x7lX
+ > ZKozz96WOpNnO16qGC3IMHAT/JD5Or+04WTT14Ue7UEp8qcIQDTpbJ9DxKk/eglS
+ > jH+SIHeKULOXw7fSu7p4IQKBgBnyVchIUMSnBtCagpn4DKwDjif3nEY+GNmb/D2g
+ > ArNiy5UaYk5qwEmV5ws5GkzbiSU07AUDh5ieHgetk5dHhUayZcOSLWeBRFCLVnvU
+ > i0nZYEZNb1qZGdDG8zGcdNXz9qMd76Qy/WAA/nZT+Zn1AiweAovFxQ8a/etRPf2Z
+ > DbU1AoGAHpCgP7B/4GTBe49H0AQueQHBn4RIkgqMy9xiMeR+U+U0vaY0TlfLhnX+
+ > 5PkNfkPXohXlfL7pxwZNYa6FZhCAubzvhKCdUASivkoGaIEk6g1VTVYS/eDVQ4CA
+ > slfl+elXtLq/l1kQ8C14jlHrQzSXx4PQvjDEnAmaHSJNz4mP9Fg=
+ > -----END RSA PRIVATE KEY-----
+ > EOF
+
+ $ cat > ec.pub <<EOF
+ > -----BEGIN PUBLIC KEY-----
+ > MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE9yoUEAgxTd9svwe9oPqjhcP+f2jcdTL2
+ > Wq8Aw2v9ht1dBy00tFRPNrCxFCkvMcJFhSPoDUV5NL7zfh3/psiSNYziGPrWEJYf
+ > gmYihjSeoOf0ru1erpBrTflImPrMftCy
+ > -----END PUBLIC KEY-----
+ > EOF
+
+ $ cat > ec.key <<EOF
+ > -----BEGIN EC PRIVATE KEY-----
+ > MIGkAgEBBDDvoj/bM1HokUjYWO/IDFs26Jo0GIFtU3tMQQu7ZabKscDMK3dZA0mK
+ > v97ij7BBFbCgBwYFK4EEACKhZANiAAT3KhQQCDFN32y/B72g+qOFw/5/aNx1MvZa
+ > rwDDa/2G3V0HLTS0VE82sLEUKS8xwkWFI+gNRXk0vvN+Hf+myJI1jOIY+tYQlh+C
+ > ZiKGNJ6g5/Su7V6ukGtN+UiY+sx+0LI=
+ > -----END EC PRIVATE KEY-----
+ > EOF
+
+Encrypt and then decrypt a test message (RSA).
+
+ $ echo "Lorem ipsum dolor sit amet" |
+ > jose-util encrypt --alg RSA-OAEP --enc A128GCM --key rsa.pub |
+ > jose-util decrypt --key rsa.key
+ Lorem ipsum dolor sit amet
+
+Encrypt and then decrypt a test message (EC).
+
+ $ echo "Lorem ipsum dolor sit amet" |
+ > jose-util encrypt --alg ECDH-ES+A128KW --enc A128GCM --key ec.pub |
+ > jose-util decrypt --key ec.key
+ Lorem ipsum dolor sit amet
+
+Sign and verify a test message (RSA).
+
+ $ echo "Lorem ipsum dolor sit amet" |
+ > jose-util sign --alg PS256 --key rsa.key |
+ > jose-util verify --key rsa.pub
+ Lorem ipsum dolor sit amet
+
+Sign and verify a test message (EC).
+
+ $ echo "Lorem ipsum dolor sit amet" |
+ > jose-util sign --alg ES384 --key ec.key |
+ > jose-util verify --key ec.pub
+ Lorem ipsum dolor sit amet
+
+Expand a compact message to full format.
+
+ $ echo "eyJhbGciOiJFUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQK.QPU35XY913Im7ZEaN2yHykfbtPqjHZvYp-lV8OcTAJZs67bJFSdTSkQhQWE9ch6tvYrj_7py6HKaWVFLll_s_Rm6bmwq3JszsHrIvFFm1NydruYHhvAnx7rjYiqwOu0W" |
+ > jose-util expand --format JWS
+ {"payload":"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQK","protected":"eyJhbGciOiJFUzM4NCJ9","signature":"QPU35XY913Im7ZEaN2yHykfbtPqjHZvYp-lV8OcTAJZs67bJFSdTSkQhQWE9ch6tvYrj_7py6HKaWVFLll_s_Rm6bmwq3JszsHrIvFFm1NydruYHhvAnx7rjYiqwOu0W"}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go b/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go
new file mode 100644
index 000000000..7ae93ee76
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go
@@ -0,0 +1,189 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "gopkg.in/alecthomas/kingpin.v2"
+ "gopkg.in/square/go-jose.v1"
+)
+
+var (
+ app = kingpin.New("jose-util", "A command-line utility for dealing with JOSE objects.")
+
+ keyFile = app.Flag("key", "Path to key file (PEM or DER-encoded)").ExistingFile()
+ inFile = app.Flag("in", "Path to input file (stdin if missing)").ExistingFile()
+ outFile = app.Flag("out", "Path to output file (stdout if missing)").ExistingFile()
+
+ encryptCommand = app.Command("encrypt", "Encrypt a plaintext, output ciphertext.")
+ algFlag = encryptCommand.Flag("alg", "Key management algorithm (e.g. RSA-OAEP)").Required().String()
+ encFlag = encryptCommand.Flag("enc", "Content encryption algorithm (e.g. A128GCM)").Required().String()
+
+ decryptCommand = app.Command("decrypt", "Decrypt a ciphertext, output plaintext.")
+
+ signCommand = app.Command("sign", "Sign a payload, output signed message.")
+ sigAlgFlag = signCommand.Flag("alg", "Key management algorithm (e.g. RSA-OAEP)").Required().String()
+
+ verifyCommand = app.Command("verify", "Verify a signed message, output payload.")
+
+ expandCommand = app.Command("expand", "Expand JOSE object to full serialization format.")
+ formatFlag = expandCommand.Flag("format", "Type of message to expand (JWS or JWE, defaults to JWE)").String()
+
+ full = app.Flag("full", "Use full serialization format (instead of compact)").Bool()
+)
+
+func main() {
+ app.Version("v1")
+
+ command := kingpin.MustParse(app.Parse(os.Args[1:]))
+
+ var keyBytes []byte
+ var err error
+ if command != "expand" {
+ keyBytes, err = ioutil.ReadFile(*keyFile)
+ exitOnError(err, "unable to read key file")
+ }
+
+ switch command {
+ case "encrypt":
+ pub, err := jose.LoadPublicKey(keyBytes)
+ exitOnError(err, "unable to read public key")
+
+ alg := jose.KeyAlgorithm(*algFlag)
+ enc := jose.ContentEncryption(*encFlag)
+
+ crypter, err := jose.NewEncrypter(alg, enc, pub)
+ exitOnError(err, "unable to instantiate encrypter")
+
+ obj, err := crypter.Encrypt(readInput(*inFile))
+ exitOnError(err, "unable to encrypt")
+
+ var msg string
+ if *full {
+ msg = obj.FullSerialize()
+ } else {
+ msg, err = obj.CompactSerialize()
+ exitOnError(err, "unable to serialize message")
+ }
+
+ writeOutput(*outFile, []byte(msg))
+ case "decrypt":
+ priv, err := jose.LoadPrivateKey(keyBytes)
+ exitOnError(err, "unable to read private key")
+
+ obj, err := jose.ParseEncrypted(string(readInput(*inFile)))
+ exitOnError(err, "unable to parse message")
+
+ plaintext, err := obj.Decrypt(priv)
+ exitOnError(err, "unable to decrypt message")
+
+ writeOutput(*outFile, plaintext)
+ case "sign":
+ signingKey, err := jose.LoadPrivateKey(keyBytes)
+ exitOnError(err, "unable to read private key")
+
+ alg := jose.SignatureAlgorithm(*sigAlgFlag)
+ signer, err := jose.NewSigner(alg, signingKey)
+ exitOnError(err, "unable to make signer")
+
+ obj, err := signer.Sign(readInput(*inFile))
+ exitOnError(err, "unable to sign")
+
+ var msg string
+ if *full {
+ msg = obj.FullSerialize()
+ } else {
+ msg, err = obj.CompactSerialize()
+ exitOnError(err, "unable to serialize message")
+ }
+
+ writeOutput(*outFile, []byte(msg))
+ case "verify":
+ verificationKey, err := jose.LoadPublicKey(keyBytes)
+ exitOnError(err, "unable to read private key")
+
+ obj, err := jose.ParseSigned(string(readInput(*inFile)))
+ exitOnError(err, "unable to parse message")
+
+ plaintext, err := obj.Verify(verificationKey)
+ exitOnError(err, "invalid signature")
+
+ writeOutput(*outFile, plaintext)
+ case "expand":
+ input := string(readInput(*inFile))
+
+ var serialized string
+ var err error
+ switch *formatFlag {
+ case "", "JWE":
+ var jwe *jose.JsonWebEncryption
+ jwe, err = jose.ParseEncrypted(input)
+ if err == nil {
+ serialized = jwe.FullSerialize()
+ }
+ case "JWS":
+ var jws *jose.JsonWebSignature
+ jws, err = jose.ParseSigned(input)
+ if err == nil {
+ serialized = jws.FullSerialize()
+ }
+ }
+
+ exitOnError(err, "unable to expand message")
+ writeOutput(*outFile, []byte(serialized))
+ writeOutput(*outFile, []byte("\n"))
+ }
+}
+
+// Exit and print error message if we encountered a problem
+func exitOnError(err error, msg string) {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", msg, err)
+ os.Exit(1)
+ }
+}
+
+// Read input from file or stdin
+func readInput(path string) []byte {
+ var bytes []byte
+ var err error
+
+ if path != "" {
+ bytes, err = ioutil.ReadFile(path)
+ } else {
+ bytes, err = ioutil.ReadAll(os.Stdin)
+ }
+
+ exitOnError(err, "unable to read input")
+ return bytes
+}
+
+// Write output to file or stdin
+func writeOutput(path string, data []byte) {
+ var err error
+
+ if path != "" {
+ err = ioutil.WriteFile(path, data, 0644)
+ } else {
+ _, err = os.Stdout.Write(data)
+ }
+
+ exitOnError(err, "unable to write output")
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/gopkg.in/square/go-jose.v1/json/README.md
new file mode 100644
index 000000000..86de5e558
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go
new file mode 100644
index 000000000..ed89d1156
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go
@@ -0,0 +1,223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Large data benchmark.
+// The JSON data is a summary of agl's changes in the
+// go, webkit, and chromium open source projects.
+// We benchmark converting between the JSON form
+// and in-memory data structures.
+
+package json
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+type codeResponse struct {
+ Tree *codeNode `json:"tree"`
+ Username string `json:"username"`
+}
+
+type codeNode struct {
+ Name string `json:"name"`
+ Kids []*codeNode `json:"kids"`
+ CLWeight float64 `json:"cl_weight"`
+ Touches int `json:"touches"`
+ MinT int64 `json:"min_t"`
+ MaxT int64 `json:"max_t"`
+ MeanT int64 `json:"mean_t"`
+}
+
+var codeJSON []byte
+var codeStruct codeResponse
+
+func codeInit() {
+ f, err := os.Open("testdata/code.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ gz, err := gzip.NewReader(f)
+ if err != nil {
+ panic(err)
+ }
+ data, err := ioutil.ReadAll(gz)
+ if err != nil {
+ panic(err)
+ }
+
+ codeJSON = data
+
+ if err := Unmarshal(codeJSON, &codeStruct); err != nil {
+ panic("unmarshal code.json: " + err.Error())
+ }
+
+ if data, err = Marshal(&codeStruct); err != nil {
+ panic("marshal code.json: " + err.Error())
+ }
+
+ if !bytes.Equal(data, codeJSON) {
+ println("different lengths", len(data), len(codeJSON))
+ for i := 0; i < len(data) && i < len(codeJSON); i++ {
+ if data[i] != codeJSON[i] {
+ println("re-marshal: changed at byte", i)
+ println("orig: ", string(codeJSON[i-10:i+10]))
+ println("new: ", string(data[i-10:i+10]))
+ break
+ }
+ }
+ panic("re-marshal code.json: different result")
+ }
+}
+
+func BenchmarkCodeEncoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ enc := NewEncoder(ioutil.Discard)
+ for i := 0; i < b.N; i++ {
+ if err := enc.Encode(&codeStruct); err != nil {
+ b.Fatal("Encode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeMarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ if _, err := Marshal(&codeStruct); err != nil {
+ b.Fatal("Marshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeDecoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ buf.Write(codeJSON)
+ // hide EOF
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ if err := dec.Decode(&r); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkDecoderStream(b *testing.B) {
+ b.StopTimer()
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if i%300000 == 0 {
+ buf.WriteString(ones)
+ }
+ x = nil
+ if err := dec.Decode(&x); err != nil || x != 1.0 {
+ b.Fatalf("Decode: %v after %d", err, i)
+ }
+ }
+}
+
+func BenchmarkCodeUnmarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ var r codeResponse
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeUnmarshalReuse(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalString(b *testing.B) {
+ data := []byte(`"hello, world"`)
+ var s string
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &s); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalFloat64(b *testing.B) {
+ var f float64
+ data := []byte(`3.14`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &f); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalInt64(b *testing.B) {
+ var x int64
+ data := []byte(`3`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &x); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkIssue10335(b *testing.B) {
+ b.ReportAllocs()
+ var s struct{}
+ j := []byte(`{"a":{ }}`)
+ for n := 0; n < b.N; n++ {
+ if err := Unmarshal(j, &s); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/gopkg.in/square/go-jose.v1/json/decode.go
new file mode 100644
index 000000000..37457e5a8
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/decode.go
@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go
new file mode 100644
index 000000000..32394654e
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go
@@ -0,0 +1,1474 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "image"
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type T struct {
+ X string
+ Y int
+ Z int `json:"-"`
+}
+
+type U struct {
+ Alphabet string `json:"alpha"`
+}
+
+type V struct {
+ F1 interface{}
+ F2 int32
+ F3 Number
+}
+
+// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and
+// without UseNumber
+var ifaceNumAsFloat64 = map[string]interface{}{
+ "k1": float64(1),
+ "k2": "s",
+ "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)},
+}
+
+var ifaceNumAsNumber = map[string]interface{}{
+ "k1": Number("1"),
+ "k2": "s",
+ "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")},
+}
+
+type tx struct {
+ x int
+}
+
+// A type that can unmarshal itself.
+
+type unmarshaler struct {
+ T bool
+}
+
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
+ *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called.
+ return nil
+}
+
+type ustruct struct {
+ M unmarshaler
+}
+
+type unmarshalerText struct {
+ T bool
+}
+
+// needed for re-marshaling tests
+func (u *unmarshalerText) MarshalText() ([]byte, error) {
+ return []byte(""), nil
+}
+
+func (u *unmarshalerText) UnmarshalText(b []byte) error {
+ *u = unmarshalerText{true} // All we need to see that UnmarshalText is called.
+ return nil
+}
+
+var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil)
+
+type ustructText struct {
+ M unmarshalerText
+}
+
+var (
+ um0, um1 unmarshaler // target2 of unmarshaling
+ ump = &um1
+ umtrue = unmarshaler{true}
+ umslice = []unmarshaler{{true}}
+ umslicep = new([]unmarshaler)
+ umstruct = ustruct{unmarshaler{true}}
+
+ um0T, um1T unmarshalerText // target2 of unmarshaling
+ umpT = &um1T
+ umtrueT = unmarshalerText{true}
+ umsliceT = []unmarshalerText{{true}}
+ umslicepT = new([]unmarshalerText)
+ umstructT = ustructText{unmarshalerText{true}}
+)
+
+// Test data structures for anonymous fields.
+
+type Point struct {
+ Z int
+}
+
+type Top struct {
+ Level0 int
+ Embed0
+ *Embed0a
+ *Embed0b `json:"e,omitempty"` // treated as named
+ Embed0c `json:"-"` // ignored
+ Loop
+ Embed0p // has Point with X, Y, used
+ Embed0q // has Point with Z, used
+ embed // contains exported field
+}
+
+type Embed0 struct {
+ Level1a int // overridden by Embed0a's Level1a with json tag
+ Level1b int // used because Embed0a's Level1b is renamed
+ Level1c int // used because Embed0a's Level1c is ignored
+ Level1d int // annihilated by Embed0a's Level1d
+ Level1e int `json:"x"` // annihilated by Embed0a.Level1e
+}
+
+type Embed0a struct {
+ Level1a int `json:"Level1a,omitempty"`
+ Level1b int `json:"LEVEL1B,omitempty"`
+ Level1c int `json:"-"`
+ Level1d int // annihilated by Embed0's Level1d
+ Level1f int `json:"x"` // annihilated by Embed0's Level1e
+}
+
+type Embed0b Embed0
+
+type Embed0c Embed0
+
+type Embed0p struct {
+ image.Point
+}
+
+type Embed0q struct {
+ Point
+}
+
+type embed struct {
+ Q int
+}
+
+type Loop struct {
+ Loop1 int `json:",omitempty"`
+ Loop2 int `json:",omitempty"`
+ *Loop
+}
+
+// From reflect test:
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// From reflect test:
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+type unmarshalTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+ useNumber bool
+}
+
+type XYZ struct {
+ X interface{}
+ Y interface{}
+ Z interface{}
+}
+
+func sliceAddr(x []int) *[]int { return &x }
+func mapAddr(x map[string]int) *map[string]int { return &x }
+
+var unmarshalTests = []unmarshalTest{
+ // basic types
+ {in: `true`, ptr: new(bool), out: true},
+ {in: `1`, ptr: new(int), out: 1},
+ {in: `1.2`, ptr: new(float64), out: 1.2},
+ {in: `-5`, ptr: new(int16), out: int16(-5)},
+ {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
+ {in: `2`, ptr: new(Number), out: Number("2")},
+ {in: `2`, ptr: new(interface{}), out: float64(2.0)},
+ {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true},
+ {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
+ {in: `"http:\/\/"`, ptr: new(string), out: "http://"},
+ {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
+ {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
+ {in: "null", ptr: new(interface{}), out: nil},
+ {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}},
+ {in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true},
+
+ // raw values with whitespace
+ {in: "\n true ", ptr: new(bool), out: true},
+ {in: "\t 1 ", ptr: new(int), out: 1},
+ {in: "\r 1.2 ", ptr: new(float64), out: 1.2},
+ {in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
+ {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
+
+ // Z has a "-" tag.
+ {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
+
+ {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
+
+ // syntax errors
+ {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
+ {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
+ {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
+
+ // raw value errors
+ {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
+ {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
+ {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
+ {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
+
+ // array tests
+ {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
+ {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
+ {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
+
+ // empty array to interface test
+ {in: `[]`, ptr: new([]interface{}), out: []interface{}{}},
+ {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)},
+ {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}},
+ {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}},
+
+ // composite tests
+ {in: allValueIndent, ptr: new(All), out: allValue},
+ {in: allValueCompact, ptr: new(All), out: allValue},
+ {in: allValueIndent, ptr: new(*All), out: &allValue},
+ {in: allValueCompact, ptr: new(*All), out: &allValue},
+ {in: pallValueIndent, ptr: new(All), out: pallValue},
+ {in: pallValueCompact, ptr: new(All), out: pallValue},
+ {in: pallValueIndent, ptr: new(*All), out: &pallValue},
+ {in: pallValueCompact, ptr: new(*All), out: &pallValue},
+
+ // unmarshal interface test
+ {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `{"T":false}`, ptr: &ump, out: &umtrue},
+ {in: `[{"T":false}]`, ptr: &umslice, out: umslice},
+ {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice},
+ {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct},
+
+ // UnmarshalText interface test
+ {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `"X"`, ptr: &umpT, out: &umtrueT},
+ {in: `["X"]`, ptr: &umsliceT, out: umsliceT},
+ {in: `["X"]`, ptr: &umslicepT, out: &umsliceT},
+ {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT},
+
+ // Overwriting of data.
+ // This is different from package xml, but it's what we've always done.
+ // Now documented and tested.
+ {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}},
+ {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}},
+
+ {
+ in: `{
+ "Level0": 1,
+ "Level1b": 2,
+ "Level1c": 3,
+ "x": 4,
+ "Level1a": 5,
+ "LEVEL1B": 6,
+ "e": {
+ "Level1a": 8,
+ "Level1b": 9,
+ "Level1c": 10,
+ "Level1d": 11,
+ "x": 12
+ },
+ "Loop1": 13,
+ "Loop2": 14,
+ "X": 15,
+ "Y": 16,
+ "Z": 17,
+ "Q": 18
+ }`,
+ ptr: new(Top),
+ out: Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ },
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S5),
+ out: S5{S8: S8{S9: S9{Y: 2}}},
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S10),
+ out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
+ },
+
+ // invalid UTF-8 is coerced to valid UTF-8.
+ {
+ in: "\"hello\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xc2world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
+ },
+
+ // issue 8305
+ {
+ in: `{"2009-11-10T23:00:00Z": "hello world"}`,
+ ptr: &map[time.Time]string{},
+ err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1},
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ b, err := Marshal(allValue)
+ if err != nil {
+ t.Fatalf("Marshal allValue: %v", err)
+ }
+ if string(b) != allValueCompact {
+ t.Errorf("Marshal allValueCompact")
+ diff(t, b, []byte(allValueCompact))
+ return
+ }
+
+ b, err = Marshal(pallValue)
+ if err != nil {
+ t.Fatalf("Marshal pallValue: %v", err)
+ }
+ if string(b) != pallValueCompact {
+ t.Errorf("Marshal pallValueCompact")
+ diff(t, b, []byte(pallValueCompact))
+ return
+ }
+}
+
+var badUTF8 = []struct {
+ in, out string
+}{
+ {"hello\xffworld", `"hello\ufffdworld"`},
+ {"", `""`},
+ {"\xff", `"\ufffd"`},
+ {"\xff\xff", `"\ufffd\ufffd"`},
+ {"a\xffb", `"a\ufffdb"`},
+ {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+}
+
+func TestMarshalBadUTF8(t *testing.T) {
+ for _, tt := range badUTF8 {
+ b, err := Marshal(tt.in)
+ if string(b) != tt.out || err != nil {
+ t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out)
+ }
+ }
+}
+
+func TestMarshalNumberZeroVal(t *testing.T) {
+ var n Number
+ out, err := Marshal(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ outStr := string(out)
+ if outStr != "0" {
+ t.Fatalf("Invalid zero val for Number: %q", outStr)
+ }
+}
+
+func TestMarshalEmbeds(t *testing.T) {
+ top := &Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ }
+ b, err := Marshal(top)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}"
+ if string(b) != want {
+ t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, tt := range unmarshalTests {
+ var scan scanner
+ in := []byte(tt.in)
+ if err := checkValid(in, &scan); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: checkValid: %#v", i, err)
+ continue
+ }
+ }
+ if tt.ptr == nil {
+ continue
+ }
+
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec := NewDecoder(bytes.NewReader(in))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v, want %v", i, err, tt.err)
+ continue
+ } else if err != nil {
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ println(string(data))
+ data, _ = Marshal(tt.out)
+ println(string(data))
+ continue
+ }
+
+ // Check round trip.
+ if tt.err == nil {
+ enc, err := Marshal(v.Interface())
+ if err != nil {
+ t.Errorf("#%d: error re-marshaling: %v", i, err)
+ continue
+ }
+ vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec = NewDecoder(bytes.NewReader(enc))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(vv.Interface()); err != nil {
+ t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
+ t.Errorf(" In: %q", strings.Map(noSpace, string(in)))
+ t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
+ continue
+ }
+ }
+ }
+}
+
+func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
+ var v interface{}
+ if err := Unmarshal(jsonBig, &v); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(jsonBig, b) {
+ t.Errorf("Marshal jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+var numberTests = []struct {
+ in string
+ i int64
+ intErr string
+ f float64
+ floatErr string
+}{
+ {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
+ {in: "-12", i: -12, f: -12.0},
+ {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
+}
+
+// Independent of Decode, basic coverage of the accessors in Number
+func TestNumberAccessors(t *testing.T) {
+ for _, tt := range numberTests {
+ n := Number(tt.in)
+ if s := n.String(); s != tt.in {
+ t.Errorf("Number(%q).String() is %q", tt.in, s)
+ }
+ if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
+ t.Errorf("Number(%q).Int64() is %d", tt.in, i)
+ } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
+ t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err)
+ }
+ if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
+ t.Errorf("Number(%q).Float64() is %g", tt.in, f)
+ } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
+ t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err)
+ }
+ }
+}
+
+func TestLargeByteSlice(t *testing.T) {
+ s0 := make([]byte, 2000)
+ for i := range s0 {
+ s0[i] = byte(i)
+ }
+ b, err := Marshal(s0)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ var s1 []byte
+ if err := Unmarshal(b, &s1); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !bytes.Equal(s0, s1) {
+ t.Errorf("Marshal large byte slice")
+ diff(t, s0, s1)
+ }
+}
+
+type Xint struct {
+ X int
+}
+
+func TestUnmarshalInterface(t *testing.T) {
+ var xint Xint
+ var i interface{} = &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestUnmarshalPtrPtr(t *testing.T) {
+ var xint Xint
+ pxint := &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestEscape(t *testing.T) {
+ const input = `"foobar"<html>` + " [\u2028 \u2029]"
+ const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
+ b, err := Marshal(input)
+ if err != nil {
+ t.Fatalf("Marshal error: %v", err)
+ }
+ if s := string(b); s != expected {
+ t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected)
+ }
+}
+
+// WrongString is a struct that's misusing the ,string modifier.
+type WrongString struct {
+ Message string `json:"result,string"`
+}
+
+type wrongStringTest struct {
+ in, err string
+}
+
+var wrongStringTests = []wrongStringTest{
+ {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
+ {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
+ {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
+ {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
+}
+
+// If people misuse the ,string modifier, the error message should be
+// helpful, telling the user that they're doing it wrong.
+func TestErrorMessageFromMisusedString(t *testing.T) {
+ for n, tt := range wrongStringTests {
+ r := strings.NewReader(tt.in)
+ var s WrongString
+ err := NewDecoder(r).Decode(&s)
+ got := fmt.Sprintf("%v", err)
+ if got != tt.err {
+ t.Errorf("%d. got err = %q, want %q", n, got, tt.err)
+ }
+ }
+}
+
+func noSpace(c rune) rune {
+ if isSpace(byte(c)) { //only used for ascii
+ return -1
+ }
+ return c
+}
+
+type All struct {
+ Bool bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+
+ Foo string `json:"bar"`
+ Foo2 string `json:"bar2,dummyopt"`
+
+ IntStr int64 `json:",string"`
+
+ PBool *bool
+ PInt *int
+ PInt8 *int8
+ PInt16 *int16
+ PInt32 *int32
+ PInt64 *int64
+ PUint *uint
+ PUint8 *uint8
+ PUint16 *uint16
+ PUint32 *uint32
+ PUint64 *uint64
+ PUintptr *uintptr
+ PFloat32 *float32
+ PFloat64 *float64
+
+ String string
+ PString *string
+
+ Map map[string]Small
+ MapP map[string]*Small
+ PMap *map[string]Small
+ PMapP *map[string]*Small
+
+ EmptyMap map[string]Small
+ NilMap map[string]Small
+
+ Slice []Small
+ SliceP []*Small
+ PSlice *[]Small
+ PSliceP *[]*Small
+
+ EmptySlice []Small
+ NilSlice []Small
+
+ StringSlice []string
+ ByteSlice []byte
+
+ Small Small
+ PSmall *Small
+ PPSmall **Small
+
+ Interface interface{}
+ PInterface *interface{}
+
+ unexported int
+}
+
+type Small struct {
+ Tag string
+}
+
+var allValue = All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Uintptr: 12,
+ Float32: 14.1,
+ Float64: 15.1,
+ Foo: "foo",
+ Foo2: "foo2",
+ IntStr: 42,
+ String: "16",
+ Map: map[string]Small{
+ "17": {Tag: "tag17"},
+ "18": {Tag: "tag18"},
+ },
+ MapP: map[string]*Small{
+ "19": {Tag: "tag19"},
+ "20": nil,
+ },
+ EmptyMap: map[string]Small{},
+ Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
+ SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}},
+ EmptySlice: []Small{},
+ StringSlice: []string{"str24", "str25", "str26"},
+ ByteSlice: []byte{27, 28, 29},
+ Small: Small{Tag: "tag30"},
+ PSmall: &Small{Tag: "tag31"},
+ Interface: 5.2,
+}
+
+var pallValue = All{
+ PBool: &allValue.Bool,
+ PInt: &allValue.Int,
+ PInt8: &allValue.Int8,
+ PInt16: &allValue.Int16,
+ PInt32: &allValue.Int32,
+ PInt64: &allValue.Int64,
+ PUint: &allValue.Uint,
+ PUint8: &allValue.Uint8,
+ PUint16: &allValue.Uint16,
+ PUint32: &allValue.Uint32,
+ PUint64: &allValue.Uint64,
+ PUintptr: &allValue.Uintptr,
+ PFloat32: &allValue.Float32,
+ PFloat64: &allValue.Float64,
+ PString: &allValue.String,
+ PMap: &allValue.Map,
+ PMapP: &allValue.MapP,
+ PSlice: &allValue.Slice,
+ PSliceP: &allValue.SliceP,
+ PPSmall: &allValue.PSmall,
+ PInterface: &allValue.Interface,
+}
+
+var allValueIndent = `{
+ "Bool": true,
+ "Int": 2,
+ "Int8": 3,
+ "Int16": 4,
+ "Int32": 5,
+ "Int64": 6,
+ "Uint": 7,
+ "Uint8": 8,
+ "Uint16": 9,
+ "Uint32": 10,
+ "Uint64": 11,
+ "Uintptr": 12,
+ "Float32": 14.1,
+ "Float64": 15.1,
+ "bar": "foo",
+ "bar2": "foo2",
+ "IntStr": "42",
+ "PBool": null,
+ "PInt": null,
+ "PInt8": null,
+ "PInt16": null,
+ "PInt32": null,
+ "PInt64": null,
+ "PUint": null,
+ "PUint8": null,
+ "PUint16": null,
+ "PUint32": null,
+ "PUint64": null,
+ "PUintptr": null,
+ "PFloat32": null,
+ "PFloat64": null,
+ "String": "16",
+ "PString": null,
+ "Map": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "MapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "PMap": null,
+ "PMapP": null,
+ "EmptyMap": {},
+ "NilMap": null,
+ "Slice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "SliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "PSlice": null,
+ "PSliceP": null,
+ "EmptySlice": [],
+ "NilSlice": null,
+ "StringSlice": [
+ "str24",
+ "str25",
+ "str26"
+ ],
+ "ByteSlice": "Gxwd",
+ "Small": {
+ "Tag": "tag30"
+ },
+ "PSmall": {
+ "Tag": "tag31"
+ },
+ "PPSmall": null,
+ "Interface": 5.2,
+ "PInterface": null
+}`
+
+var allValueCompact = strings.Map(noSpace, allValueIndent)
+
+var pallValueIndent = `{
+ "Bool": false,
+ "Int": 0,
+ "Int8": 0,
+ "Int16": 0,
+ "Int32": 0,
+ "Int64": 0,
+ "Uint": 0,
+ "Uint8": 0,
+ "Uint16": 0,
+ "Uint32": 0,
+ "Uint64": 0,
+ "Uintptr": 0,
+ "Float32": 0,
+ "Float64": 0,
+ "bar": "",
+ "bar2": "",
+ "IntStr": "0",
+ "PBool": true,
+ "PInt": 2,
+ "PInt8": 3,
+ "PInt16": 4,
+ "PInt32": 5,
+ "PInt64": 6,
+ "PUint": 7,
+ "PUint8": 8,
+ "PUint16": 9,
+ "PUint32": 10,
+ "PUint64": 11,
+ "PUintptr": 12,
+ "PFloat32": 14.1,
+ "PFloat64": 15.1,
+ "String": "",
+ "PString": "16",
+ "Map": null,
+ "MapP": null,
+ "PMap": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "PMapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "EmptyMap": null,
+ "NilMap": null,
+ "Slice": null,
+ "SliceP": null,
+ "PSlice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "PSliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "EmptySlice": null,
+ "NilSlice": null,
+ "StringSlice": null,
+ "ByteSlice": null,
+ "Small": {
+ "Tag": ""
+ },
+ "PSmall": null,
+ "PPSmall": {
+ "Tag": "tag31"
+ },
+ "Interface": null,
+ "PInterface": 5.2
+}`
+
+var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ R2: 13,
+ R3: new(RefText),
+ }
+ *want.R1 = 12
+ *want.R3 = 13
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
+
+// Test that the empty string doesn't panic decoding when ,string is specified
+// Issue 3450
+func TestEmptyString(t *testing.T) {
+ type T2 struct {
+ Number1 int `json:",string"`
+ Number2 int `json:",string"`
+ }
+ data := `{"Number1":"1", "Number2":""}`
+ dec := NewDecoder(strings.NewReader(data))
+ var t2 T2
+ err := dec.Decode(&t2)
+ if err == nil {
+ t.Fatal("Decode: did not return error")
+ }
+ if t2.Number1 != 1 {
+ t.Fatal("Decode: did not set Number1")
+ }
+}
+
+// Test that a null for ,string is not replaced with the previous quoted string (issue 7046).
+// It should also not be an error (issue 2540, issue 8587).
+func TestNullString(t *testing.T) {
+ type T struct {
+ A int `json:",string"`
+ B int `json:",string"`
+ C *int `json:",string"`
+ }
+ data := []byte(`{"A": "1", "B": null, "C": null}`)
+ var s T
+ s.B = 1
+ s.C = new(int)
+ *s.C = 2
+ err := Unmarshal(data, &s)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if s.B != 1 || s.C != nil {
+ t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C)
+ }
+}
+
+func intp(x int) *int {
+ p := new(int)
+ *p = x
+ return p
+}
+
+func intpp(x *int) **int {
+ pp := new(*int)
+ *pp = x
+ return pp
+}
+
+var interfaceSetTests = []struct {
+ pre interface{}
+ json string
+ post interface{}
+}{
+ {"foo", `"bar"`, "bar"},
+ {"foo", `2`, 2.0},
+ {"foo", `true`, true},
+ {"foo", `null`, nil},
+
+ {nil, `null`, nil},
+ {new(int), `null`, nil},
+ {(*int)(nil), `null`, nil},
+ {new(*int), `null`, new(*int)},
+ {(**int)(nil), `null`, nil},
+ {intp(1), `null`, nil},
+ {intpp(nil), `null`, intpp(nil)},
+ {intpp(intp(1)), `null`, intpp(nil)},
+}
+
+func TestInterfaceSet(t *testing.T) {
+ for _, tt := range interfaceSetTests {
+ b := struct{ X interface{} }{tt.pre}
+ blob := `{"X":` + tt.json + `}`
+ if err := Unmarshal([]byte(blob), &b); err != nil {
+ t.Errorf("Unmarshal %#q: %v", blob, err)
+ continue
+ }
+ if !reflect.DeepEqual(b.X, tt.post) {
+ t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post)
+ }
+ }
+}
+
+// JSON null values should be ignored for primitives and string values instead of resulting in an error.
+// Issue 2540
+func TestUnmarshalNulls(t *testing.T) {
+ jsonData := []byte(`{
+ "Bool" : null,
+ "Int" : null,
+ "Int8" : null,
+ "Int16" : null,
+ "Int32" : null,
+ "Int64" : null,
+ "Uint" : null,
+ "Uint8" : null,
+ "Uint16" : null,
+ "Uint32" : null,
+ "Uint64" : null,
+ "Float32" : null,
+ "Float64" : null,
+ "String" : null}`)
+
+ nulls := All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Float32: 12.1,
+ Float64: 13.1,
+ String: "14"}
+
+ err := Unmarshal(jsonData, &nulls)
+ if err != nil {
+ t.Errorf("Unmarshal of null values failed: %v", err)
+ }
+ if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 ||
+ nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 ||
+ nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" {
+
+ t.Errorf("Unmarshal of null values affected primitives")
+ }
+}
+
+func TestStringKind(t *testing.T) {
+ type stringKind string
+
+ var m1, m2 map[stringKind]int
+ m1 = map[stringKind]int{
+ "foo": 42,
+ }
+
+ data, err := Marshal(m1)
+ if err != nil {
+ t.Errorf("Unexpected error marshaling: %v", err)
+ }
+
+ err = Unmarshal(data, &m2)
+ if err != nil {
+ t.Errorf("Unexpected error unmarshaling: %v", err)
+ }
+
+ if !reflect.DeepEqual(m1, m2) {
+ t.Error("Items should be equal after encoding and then decoding")
+ }
+}
+
+// Custom types with []byte as underlying type could not be marshalled
+// and then unmarshalled.
+// Issue 8962.
+func TestByteKind(t *testing.T) {
+ type byteKind []byte
+
+ a := byteKind("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Error(err)
+ }
+ var b byteKind
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Errorf("expected %v == %v", a, b)
+ }
+}
+
+// The fix for issue 8962 introduced a regression.
+// Issue 12921.
+func TestSliceOfCustomByte(t *testing.T) {
+ type Uint8 uint8
+
+ a := []Uint8("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b []Uint8
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Fatalf("expected %v == %v", a, b)
+ }
+}
+
+var decodeTypeErrorTests = []struct {
+ dest interface{}
+ src string
+}{
+ {new(string), `{"user": "name"}`}, // issue 4628.
+ {new(error), `{}`}, // issue 4222
+ {new(error), `[]`},
+ {new(error), `""`},
+ {new(error), `123`},
+ {new(error), `true`},
+}
+
+func TestUnmarshalTypeError(t *testing.T) {
+ for _, item := range decodeTypeErrorTests {
+ err := Unmarshal([]byte(item.src), item.dest)
+ if _, ok := err.(*UnmarshalTypeError); !ok {
+ t.Errorf("expected type error for Unmarshal(%q, type %T): got %T",
+ item.src, item.dest, err)
+ }
+ }
+}
+
+var unmarshalSyntaxTests = []string{
+ "tru",
+ "fals",
+ "nul",
+ "123e",
+ `"hello`,
+ `[1,2,3`,
+ `{"key":1`,
+ `{"key":1,`,
+}
+
+func TestUnmarshalSyntax(t *testing.T) {
+ var x interface{}
+ for _, src := range unmarshalSyntaxTests {
+ err := Unmarshal([]byte(src), &x)
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err)
+ }
+ }
+}
+
+// Test handling of unexported fields that should be ignored.
+// Issue 4660
+type unexportedFields struct {
+ Name string
+ m map[string]interface{} `json:"-"`
+ m2 map[string]interface{} `json:"abcd"`
+}
+
+func TestUnmarshalUnexported(t *testing.T) {
+ input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}`
+ want := &unexportedFields{Name: "Bob"}
+
+ out := &unexportedFields{}
+ err := Unmarshal([]byte(input), out)
+ if err != nil {
+ t.Errorf("got error %v, expected nil", err)
+ }
+ if !reflect.DeepEqual(out, want) {
+ t.Errorf("got %q, want %q", out, want)
+ }
+}
+
+// Time3339 is a time.Time which encodes to and from JSON
+// as an RFC 3339 time in UTC.
+type Time3339 time.Time
+
+func (t *Time3339) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b)
+ }
+ tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1]))
+ if err != nil {
+ return err
+ }
+ *t = Time3339(tm)
+ return nil
+}
+
+func TestUnmarshalJSONLiteralError(t *testing.T) {
+ var t3 Time3339
+ err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3)
+ if err == nil {
+ t.Fatalf("expected error; got time %v", time.Time(t3))
+ }
+ if !strings.Contains(err.Error(), "range") {
+ t.Errorf("got err = %v; want out of range error", err)
+ }
+}
+
+// Test that extra object elements in an array do not result in a
+// "data changing underfoot" error.
+// Issue 3717
+func TestSkipArrayObjects(t *testing.T) {
+ json := `[{}]`
+ var dest [0]interface{}
+
+ err := Unmarshal([]byte(json), &dest)
+ if err != nil {
+ t.Errorf("got error %q, want nil", err)
+ }
+}
+
+// Test semantics of pre-filled struct fields and pre-filled map fields.
+// Issue 4900.
+func TestPrefilled(t *testing.T) {
+ ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m }
+
+ // Values here change, cannot reuse table across runs.
+ var prefillTests = []struct {
+ in string
+ ptr interface{}
+ out interface{}
+ }{
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
+ out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5},
+ },
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}),
+ out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}),
+ },
+ }
+
+ for _, tt := range prefillTests {
+ ptrstr := fmt.Sprintf("%v", tt.ptr)
+ err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
+ if err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(tt.ptr, tt.out) {
+ t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out)
+ }
+ }
+}
+
+var invalidUnmarshalTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+}
+
+func TestInvalidUnmarshal(t *testing.T) {
+ buf := []byte(`{"a":"1"}`)
+ for _, tt := range invalidUnmarshalTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+var invalidUnmarshalTextTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+ {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"},
+}
+
+func TestInvalidUnmarshalText(t *testing.T) {
+ buf := []byte(`123`)
+ for _, tt := range invalidUnmarshalTextTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+// Test that string option is ignored for invalid types.
+// Issue 9812.
+func TestInvalidStringOption(t *testing.T) {
+ num := 0
+ item := struct {
+ T time.Time `json:",string"`
+ M map[string]string `json:",string"`
+ S []string `json:",string"`
+ A [1]string `json:",string"`
+ I interface{} `json:",string"`
+ P *int `json:",string"`
+ }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num}
+
+ data, err := Marshal(item)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ err = Unmarshal(data, &item)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/gopkg.in/square/go-jose.v1/json/encode.go
new file mode 100644
index 000000000..1dae8bb7c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+ wg.Wait()
+ f(e, v, quoted)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ // In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+ // we keep compatibility so check validity after this.
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ if !isValidNumber(numStr) {
+ e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name)
+ e.WriteByte(':')
+ se.fieldEncs[i](e, fv, f.quoted)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+ var sv stringValues = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(k), false)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), false)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), quoted)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, quoted)
+ } else {
+ ce.elseEnc(e, v, quoted)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, > and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, >, and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Only strings, floats, integers, and booleans can be quoted.
+ quoted := false
+ if opts.Contains("string") {
+ switch ft.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ quoted = true
+ }
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: quoted,
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go b/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go
new file mode 100644
index 000000000..c00491e00
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go
@@ -0,0 +1,538 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+ "testing"
+ "unicode"
+)
+
+type Optionals struct {
+ Sr string `json:"sr"`
+ So string `json:"so,omitempty"`
+ Sw string `json:"-"`
+
+ Ir int `json:"omitempty"` // actually named omitempty, not an option
+ Io int `json:"io,omitempty"`
+
+ Slr []string `json:"slr,random"`
+ Slo []string `json:"slo,omitempty"`
+
+ Mr map[string]interface{} `json:"mr"`
+ Mo map[string]interface{} `json:",omitempty"`
+
+ Fr float64 `json:"fr"`
+ Fo float64 `json:"fo,omitempty"`
+
+ Br bool `json:"br"`
+ Bo bool `json:"bo,omitempty"`
+
+ Ur uint `json:"ur"`
+ Uo uint `json:"uo,omitempty"`
+
+ Str struct{} `json:"str"`
+ Sto struct{} `json:"sto,omitempty"`
+}
+
+var optionalsExpected = `{
+ "sr": "",
+ "omitempty": 0,
+ "slr": null,
+ "mr": {},
+ "fr": 0,
+ "br": false,
+ "ur": 0,
+ "str": {},
+ "sto": {}
+}`
+
+func TestOmitEmpty(t *testing.T) {
+ var o Optionals
+ o.Sw = "something"
+ o.Mr = map[string]interface{}{}
+ o.Mo = map[string]interface{}{}
+
+ got, err := MarshalIndent(&o, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != optionalsExpected {
+ t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
+ }
+}
+
+type StringTag struct {
+ BoolStr bool `json:",string"`
+ IntStr int64 `json:",string"`
+ StrStr string `json:",string"`
+}
+
+var stringTagExpected = `{
+ "BoolStr": "true",
+ "IntStr": "42",
+ "StrStr": "\"xzbit\""
+}`
+
+func TestStringTag(t *testing.T) {
+ var s StringTag
+ s.BoolStr = true
+ s.IntStr = 42
+ s.StrStr = "xzbit"
+ got, err := MarshalIndent(&s, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != stringTagExpected {
+ t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
+ }
+
+ // Verify that it round-trips.
+ var s2 StringTag
+ err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if !reflect.DeepEqual(s, s2) {
+ t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
+ }
+}
+
+// byte slices are special even if they're renamed types.
+type renamedByte byte
+type renamedByteSlice []byte
+type renamedRenamedByteSlice []renamedByte
+
+func TestEncodeRenamedByteSlice(t *testing.T) {
+ s := renamedByteSlice("abc")
+ result, err := Marshal(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := `"YWJj"`
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+ r := renamedRenamedByteSlice("abc")
+ result, err = Marshal(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+}
+
+var unsupportedValues = []interface{}{
+ math.NaN(),
+ math.Inf(-1),
+ math.Inf(1),
+}
+
+func TestUnsupportedValues(t *testing.T) {
+ for _, v := range unsupportedValues {
+ if _, err := Marshal(v); err != nil {
+ if _, ok := err.(*UnsupportedValueError); !ok {
+ t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
+ }
+ } else {
+ t.Errorf("for %v, expected error", v)
+ }
+ }
+}
+
+// Ref has Marshaler and Unmarshaler methods with pointer receiver.
+type Ref int
+
+func (*Ref) MarshalJSON() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *Ref) UnmarshalJSON([]byte) error {
+ *r = 12
+ return nil
+}
+
+// Val has Marshaler methods with value receiver.
+type Val int
+
+func (Val) MarshalJSON() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+// RefText has Marshaler and Unmarshaler methods with pointer receiver.
+type RefText int
+
+func (*RefText) MarshalText() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *RefText) UnmarshalText([]byte) error {
+ *r = 13
+ return nil
+}
+
+// ValText has Marshaler methods with value receiver.
+type ValText int
+
+func (ValText) MarshalText() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+func TestRefValMarshal(t *testing.T) {
+ var s = struct {
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ V0 Val
+ V1 *Val
+ V2 ValText
+ V3 *ValText
+ }{
+ R0: 12,
+ R1: new(Ref),
+ R2: 14,
+ R3: new(RefText),
+ V0: 13,
+ V1: new(Val),
+ V2: 15,
+ V3: new(ValText),
+ }
+ const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
+ b, err := Marshal(&s)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// C implements Marshaler and returns unescaped JSON.
+type C int
+
+func (C) MarshalJSON() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+// CText implements Marshaler and returns unescaped text.
+type CText int
+
+func (CText) MarshalText() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+func TestMarshalerEscaping(t *testing.T) {
+ var c C
+ want := `"\u003c\u0026\u003e"`
+ b, err := Marshal(c)
+ if err != nil {
+ t.Fatalf("Marshal(c): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(c) = %#q, want %#q", got, want)
+ }
+
+ var ct CText
+ want = `"\"\u003c\u0026\u003e\""`
+ b, err = Marshal(ct)
+ if err != nil {
+ t.Fatalf("Marshal(ct): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
+ }
+}
+
+type IntType int
+
+type MyStruct struct {
+ IntType
+}
+
+func TestAnonymousNonstruct(t *testing.T) {
+ var i IntType = 11
+ a := MyStruct{i}
+ const want = `{"IntType":11}`
+
+ b, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+type BugA struct {
+ S string
+}
+
+type BugB struct {
+ BugA
+ S string
+}
+
+type BugC struct {
+ S string
+}
+
+// Legal Go: We never use the repeated embedded field (S).
+type BugX struct {
+ A int
+ BugA
+ BugB
+}
+
+// Issue 5245.
+func TestEmbeddedBug(t *testing.T) {
+ v := BugB{
+ BugA{"A"},
+ "B",
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"B"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+ // Now check that the duplicate field, S, does not appear.
+ x := BugX{
+ A: 23,
+ }
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want = `{"A":23}`
+ got = string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+type BugD struct { // Same as BugA after tagging.
+ XXX string `json:"S"`
+}
+
+// BugD's tagged S field should dominate BugA's.
+type BugY struct {
+ BugA
+ BugD
+}
+
+// Test that a field with a tag dominates untagged fields.
+func TestTaggedFieldDominates(t *testing.T) {
+ v := BugY{
+ BugA{"BugA"},
+ BugD{"BugD"},
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"BugD"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+// There are no tags here, so S should not appear.
+type BugZ struct {
+ BugA
+ BugC
+ BugY // Contains a tagged S field through BugD; should not dominate.
+}
+
+func TestDuplicatedFieldDisappears(t *testing.T) {
+ v := BugZ{
+ BugA{"BugA"},
+ BugC{"BugC"},
+ BugY{
+ BugA{"nested BugA"},
+ BugD{"nested BugD"},
+ },
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+func TestStringBytes(t *testing.T) {
+ // Test that encodeState.stringBytes and encodeState.string use the same encoding.
+ es := &encodeState{}
+ var r []rune
+ for i := '\u0000'; i <= unicode.MaxRune; i++ {
+ r = append(r, i)
+ }
+ s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
+ es.string(s)
+
+ esBytes := &encodeState{}
+ esBytes.stringBytes([]byte(s))
+
+ enc := es.Buffer.String()
+ encBytes := esBytes.Buffer.String()
+ if enc != encBytes {
+ i := 0
+ for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
+ i++
+ }
+ enc = enc[i:]
+ encBytes = encBytes[i:]
+ i = 0
+ for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
+ i++
+ }
+ enc = enc[:len(enc)-i]
+ encBytes = encBytes[:len(encBytes)-i]
+
+ if len(enc) > 20 {
+ enc = enc[:20] + "..."
+ }
+ if len(encBytes) > 20 {
+ encBytes = encBytes[:20] + "..."
+ }
+
+ t.Errorf("encodings differ at %#q vs %#q", enc, encBytes)
+ }
+}
+
+func TestIssue6458(t *testing.T) {
+ type Foo struct {
+ M RawMessage
+ }
+ x := Foo{RawMessage(`"foo"`)}
+
+ b, err := Marshal(&x)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := `{"M":"foo"}`; string(b) != want {
+ t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
+ }
+
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want := `{"M":"ImZvbyI="}`; string(b) != want {
+ t.Errorf("Marshal(x) = %#q; want %#q", b, want)
+ }
+}
+
+func TestIssue10281(t *testing.T) {
+ type Foo struct {
+ N Number
+ }
+ x := Foo{Number(`invalid`)}
+
+ b, err := Marshal(&x)
+ if err == nil {
+ t.Errorf("Marshal(&x) = %#q; want error", b)
+ }
+}
+
+func TestHTMLEscape(t *testing.T) {
+ var b, want bytes.Buffer
+ m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
+ want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
+ HTMLEscape(&b, []byte(m))
+ if !bytes.Equal(b.Bytes(), want.Bytes()) {
+ t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
+ }
+}
+
+// golang.org/issue/8582
+func TestEncodePointerString(t *testing.T) {
+ type stringPointer struct {
+ N *int64 `json:"n,string"`
+ }
+ var n int64 = 42
+ b, err := Marshal(stringPointer{N: &n})
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got, want := string(b), `{"n":"42"}`; got != want {
+ t.Errorf("Marshal = %s, want %s", got, want)
+ }
+ var back stringPointer
+ err = Unmarshal(b, &back)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if back.N == nil {
+ t.Fatalf("Unmarshalled nil N field")
+ }
+ if *back.N != 42 {
+ t.Fatalf("*N = %d; want 42", *back.N)
+ }
+}
+
+var encodeStringTests = []struct {
+ in string
+ out string
+}{
+ {"\x00", `"\u0000"`},
+ {"\x01", `"\u0001"`},
+ {"\x02", `"\u0002"`},
+ {"\x03", `"\u0003"`},
+ {"\x04", `"\u0004"`},
+ {"\x05", `"\u0005"`},
+ {"\x06", `"\u0006"`},
+ {"\x07", `"\u0007"`},
+ {"\x08", `"\u0008"`},
+ {"\x09", `"\t"`},
+ {"\x0a", `"\n"`},
+ {"\x0b", `"\u000b"`},
+ {"\x0c", `"\u000c"`},
+ {"\x0d", `"\r"`},
+ {"\x0e", `"\u000e"`},
+ {"\x0f", `"\u000f"`},
+ {"\x10", `"\u0010"`},
+ {"\x11", `"\u0011"`},
+ {"\x12", `"\u0012"`},
+ {"\x13", `"\u0013"`},
+ {"\x14", `"\u0014"`},
+ {"\x15", `"\u0015"`},
+ {"\x16", `"\u0016"`},
+ {"\x17", `"\u0017"`},
+ {"\x18", `"\u0018"`},
+ {"\x19", `"\u0019"`},
+ {"\x1a", `"\u001a"`},
+ {"\x1b", `"\u001b"`},
+ {"\x1c", `"\u001c"`},
+ {"\x1d", `"\u001d"`},
+ {"\x1e", `"\u001e"`},
+ {"\x1f", `"\u001f"`},
+}
+
+func TestEncodeString(t *testing.T) {
+ for _, tt := range encodeStringTests {
+ b, err := Marshal(tt.in)
+ if err != nil {
+ t.Errorf("Marshal(%q): %v", tt.in, err)
+ continue
+ }
+ out := string(b)
+ if out != tt.out {
+ t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/indent.go b/vendor/gopkg.in/square/go-jose.v1/json/indent.go
new file mode 100644
index 000000000..7cd9f4db1
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(&scan, c)
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, c)
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/number_test.go b/vendor/gopkg.in/square/go-jose.v1/json/number_test.go
new file mode 100644
index 000000000..4e63cf9c7
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/number_test.go
@@ -0,0 +1,133 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "regexp"
+ "testing"
+)
+
+func TestNumberIsValid(t *testing.T) {
+ // From: http://stackoverflow.com/a/13340826
+ var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
+
+ validTests := []string{
+ "0",
+ "-0",
+ "1",
+ "-1",
+ "0.1",
+ "-0.1",
+ "1234",
+ "-1234",
+ "12.34",
+ "-12.34",
+ "12E0",
+ "12E1",
+ "12e34",
+ "12E-0",
+ "12e+1",
+ "12e-34",
+ "-12E0",
+ "-12E1",
+ "-12e34",
+ "-12E-0",
+ "-12e+1",
+ "-12e-34",
+ "1.2E0",
+ "1.2E1",
+ "1.2e34",
+ "1.2E-0",
+ "1.2e+1",
+ "1.2e-34",
+ "-1.2E0",
+ "-1.2E1",
+ "-1.2e34",
+ "-1.2E-0",
+ "-1.2e+1",
+ "-1.2e-34",
+ "0E0",
+ "0E1",
+ "0e34",
+ "0E-0",
+ "0e+1",
+ "0e-34",
+ "-0E0",
+ "-0E1",
+ "-0e34",
+ "-0E-0",
+ "-0e+1",
+ "-0e-34",
+ }
+
+ for _, test := range validTests {
+ if !isValidNumber(test) {
+ t.Errorf("%s should be valid", test)
+ }
+
+ var f float64
+ if err := Unmarshal([]byte(test), &f); err != nil {
+ t.Errorf("%s should be valid but Unmarshal failed: %v", test, err)
+ }
+
+ if !jsonNumberRegexp.MatchString(test) {
+ t.Errorf("%s should be valid but regexp does not match", test)
+ }
+ }
+
+ invalidTests := []string{
+ "",
+ "invalid",
+ "1.0.1",
+ "1..1",
+ "-1-2",
+ "012a42",
+ "01.2",
+ "012",
+ "12E12.12",
+ "1e2e3",
+ "1e+-2",
+ "1e--23",
+ "1e",
+ "e1",
+ "1e+",
+ "1ea",
+ "1a",
+ "1.a",
+ "1.",
+ "01",
+ "1.e1",
+ }
+
+ for _, test := range invalidTests {
+ if isValidNumber(test) {
+ t.Errorf("%s should be invalid", test)
+ }
+
+ var f float64
+ if err := Unmarshal([]byte(test), &f); err == nil {
+ t.Errorf("%s should be invalid but unmarshal wrote %v", test, f)
+ }
+
+ if jsonNumberRegexp.MatchString(test) {
+ t.Errorf("%s should be invalid but matches regexp", test)
+ }
+ }
+}
+
+func BenchmarkNumberIsValid(b *testing.B) {
+ s := "-61657.61667E+61673"
+ for i := 0; i < b.N; i++ {
+ isValidNumber(s)
+ }
+}
+
+func BenchmarkNumberIsValidRegexp(b *testing.B) {
+ var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
+ s := "-61657.61667E+61673"
+ for i := 0; i < b.N; i++ {
+ jsonNumberRegexp.MatchString(s)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/scanner.go b/vendor/gopkg.in/square/go-jose.v1/json/scanner.go
new file mode 100644
index 000000000..ee6622e8c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/scanner.go
@@ -0,0 +1,623 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, c)
+ if v >= scanEndObject {
+ switch v {
+ // probe the scanner with a space to determine whether we will
+ // get scanEnd on the next character. Otherwise, if the next character
+ // is not a space, scanEndTop allocates a needless error.
+ case scanEndObject, scanEndArray:
+ if scan.step(scan, ' ') == scanEnd {
+ return data[:i+1], data[i+1:], nil
+ }
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, byte) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*scanner, byte) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+ s.redo = false
+ s.step = s.redoState
+ return s.redoCode
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go b/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go
new file mode 100644
index 000000000..66383ef0e
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go
@@ -0,0 +1,316 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "math/rand"
+ "reflect"
+ "testing"
+)
+
+// Tests of simple examples.
+
+type example struct {
+ compact string
+ indent string
+}
+
+var examples = []example{
+ {`1`, `1`},
+ {`{}`, `{}`},
+ {`[]`, `[]`},
+ {`{"":2}`, "{\n\t\"\": 2\n}"},
+ {`[3]`, "[\n\t3\n]"},
+ {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
+ {`{"x":1}`, "{\n\t\"x\": 1\n}"},
+ {ex1, ex1i},
+}
+
+var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
+
+var ex1i = `[
+ true,
+ false,
+ null,
+ "x",
+ 1,
+ 1.5,
+ 0,
+ -5e+2
+]`
+
+func TestCompact(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.compact)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.compact, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
+ }
+
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.indent)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.indent, err)
+ continue
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
+ }
+ }
+}
+
+func TestCompactSeparators(t *testing.T) {
+ // U+2028 and U+2029 should be escaped inside strings.
+ // They should not appear outside strings.
+ tests := []struct {
+ in, compact string
+ }{
+ {"{\"\u2028\": 1}", `{"\u2028":1}`},
+ {"{\"\u2029\" :2}", `{"\u2029":2}`},
+ }
+ for _, tt := range tests {
+ var buf bytes.Buffer
+ if err := Compact(&buf, []byte(tt.in)); err != nil {
+ t.Errorf("Compact(%q): %v", tt.in, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
+ }
+ }
+}
+
+func TestIndent(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.indent, err)
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
+ }
+
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.compact, err)
+ continue
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
+ }
+ }
+}
+
+// Tests of a large random structure.
+
+func TestCompactBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Compact(&buf, jsonBig); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b := buf.Bytes()
+ if !bytes.Equal(b, jsonBig) {
+ t.Error("Compact(jsonBig) != jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+func TestIndentBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
+ t.Fatalf("Indent1: %v", err)
+ }
+ b := buf.Bytes()
+ if len(b) == len(jsonBig) {
+ // jsonBig is compact (no unnecessary spaces);
+ // indenting should make it bigger
+ t.Fatalf("Indent(jsonBig) did not get bigger")
+ }
+
+ // should be idempotent
+ var buf1 bytes.Buffer
+ if err := Indent(&buf1, b, "", "\t"); err != nil {
+ t.Fatalf("Indent2: %v", err)
+ }
+ b1 := buf1.Bytes()
+ if !bytes.Equal(b1, b) {
+ t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
+ diff(t, b1, b)
+ return
+ }
+
+ // should get back to original
+ buf1.Reset()
+ if err := Compact(&buf1, b); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b1 = buf1.Bytes()
+ if !bytes.Equal(b1, jsonBig) {
+ t.Error("Compact(Indent(jsonBig)) != jsonBig")
+ diff(t, b1, jsonBig)
+ return
+ }
+}
+
+type indentErrorTest struct {
+ in string
+ err error
+}
+
+var indentErrorTests = []indentErrorTest{
+ {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
+ {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
+}
+
+func TestIndentErrors(t *testing.T) {
+ for i, tt := range indentErrorTests {
+ slice := make([]uint8, 0)
+ buf := bytes.NewBuffer(slice)
+ if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: Indent: %#v", i, err)
+ continue
+ }
+ }
+ }
+}
+
+func TestNextValueBig(t *testing.T) {
+ initBig()
+ var scan scanner
+ item, rest, err := nextValue(jsonBig, &scan)
+ if err != nil {
+ t.Fatalf("nextValue: %s", err)
+ }
+ if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if len(rest) != 0 {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+
+ item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
+ if err != nil {
+ t.Fatalf("nextValue extra: %s", err)
+ }
+ if len(item) != len(jsonBig) {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if string(rest) != "HELLO WORLD" {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+}
+
+var benchScan scanner
+
+func BenchmarkSkipValue(b *testing.B) {
+ initBig()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ nextValue(jsonBig, &benchScan)
+ }
+ b.SetBytes(int64(len(jsonBig)))
+}
+
+func diff(t *testing.T, a, b []byte) {
+ for i := 0; ; i++ {
+ if i >= len(a) || i >= len(b) || a[i] != b[i] {
+ j := i - 10
+ if j < 0 {
+ j = 0
+ }
+ t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
+ return
+ }
+ }
+}
+
+func trim(b []byte) []byte {
+ if len(b) > 20 {
+ return b[0:20]
+ }
+ return b
+}
+
+// Generate a random JSON object.
+
+var jsonBig []byte
+
+func initBig() {
+ n := 10000
+ if testing.Short() {
+ n = 100
+ }
+ b, err := Marshal(genValue(n))
+ if err != nil {
+ panic(err)
+ }
+ jsonBig = b
+}
+
+func genValue(n int) interface{} {
+ if n > 1 {
+ switch rand.Intn(2) {
+ case 0:
+ return genArray(n)
+ case 1:
+ return genMap(n)
+ }
+ }
+ switch rand.Intn(3) {
+ case 0:
+ return rand.Intn(2) == 0
+ case 1:
+ return rand.NormFloat64()
+ case 2:
+ return genString(30)
+ }
+ panic("unreachable")
+}
+
+func genString(stddev float64) string {
+ n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
+ c := make([]rune, n)
+ for i := range c {
+ f := math.Abs(rand.NormFloat64()*64 + 32)
+ if f > 0x10ffff {
+ f = 0x10ffff
+ }
+ c[i] = rune(f)
+ }
+ return string(c)
+}
+
+func genArray(n int) []interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if f < 1 {
+ f = 1
+ }
+ x := make([]interface{}, f)
+ for i := range x {
+ x[i] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
+
+func genMap(n int) map[string]interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if n > 0 && f == 0 {
+ f = 1
+ }
+ x := make(map[string]interface{})
+ for i := 0; i < f; i++ {
+ x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/stream.go b/vendor/gopkg.in/square/go-jose.v1/json/stream.go
new file mode 100644
index 000000000..8ddcf4d27
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/stream.go
@@ -0,0 +1,480 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scanp int // start of unread data in buf
+ scan scanner
+ err error
+
+ tokenState int
+ tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ if err := dec.tokenPrepareForDecode(); err != nil {
+ return err
+ }
+
+ if !dec.tokenValueAllowed() {
+ return &SyntaxError{msg: "not at beginning of value"}
+ }
+
+ // Read whole value into buffer.
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+ dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+ dec.scanp += n
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ err = dec.d.unmarshal(v)
+
+ // fixup token streaming state
+ dec.tokenValueEnd()
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := dec.scanp
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, c)
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ n := scanp - dec.scanp
+ err = dec.refill()
+ scanp = dec.scanp + n
+ }
+ return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if dec.scanp > 0 {
+ n := copy(dec.buf, dec.buf[dec.scanp:])
+ dec.buf = dec.buf[:n]
+ dec.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+ return err
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ if _, err = enc.w.Write(e.Bytes()); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+// Delim, for the four JSON delimiters [ ] { }
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+//
+type Token interface{}
+
+const (
+ tokenTopValue = iota
+ tokenArrayStart
+ tokenArrayValue
+ tokenArrayComma
+ tokenObjectStart
+ tokenObjectKey
+ tokenObjectColon
+ tokenObjectValue
+ tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+ // Note: Not calling peek before switch, to avoid
+ // putting peek into the standard Decode path.
+ // peek is only called when using the Token API.
+ switch dec.tokenState {
+ case tokenArrayComma:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ',' {
+ return &SyntaxError{"expected comma after array element", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ case tokenObjectColon:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ':' {
+ return &SyntaxError{"expected colon after object key", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ }
+ return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+ switch dec.tokenState {
+ case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ return true
+ }
+ return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+ switch dec.tokenState {
+ case tokenArrayStart, tokenArrayValue:
+ dec.tokenState = tokenArrayComma
+ case tokenObjectValue:
+ dec.tokenState = tokenObjectComma
+ }
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+ return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+ for {
+ c, err := dec.peek()
+ if err != nil {
+ return nil, err
+ }
+ switch c {
+ case '[':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenArrayStart
+ return Delim('['), nil
+
+ case ']':
+ if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim(']'), nil
+
+ case '{':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenObjectStart
+ return Delim('{'), nil
+
+ case '}':
+ if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim('}'), nil
+
+ case ':':
+ if dec.tokenState != tokenObjectColon {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ continue
+
+ case ',':
+ if dec.tokenState == tokenArrayComma {
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ continue
+ }
+ if dec.tokenState == tokenObjectComma {
+ dec.scanp++
+ dec.tokenState = tokenObjectKey
+ continue
+ }
+ return dec.tokenError(c)
+
+ case '"':
+ if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+ var x string
+ old := dec.tokenState
+ dec.tokenState = tokenTopValue
+ err := dec.Decode(&x)
+ dec.tokenState = old
+ if err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ dec.tokenState = tokenObjectColon
+ return x, nil
+ }
+ fallthrough
+
+ default:
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ return x, nil
+ }
+ }
+}
+
+func clearOffset(err error) {
+ if s, ok := err.(*SyntaxError); ok {
+ s.Offset = 0
+ }
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+ var context string
+ switch dec.tokenState {
+ case tokenTopValue:
+ context = " looking for beginning of value"
+ case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ context = " looking for beginning of value"
+ case tokenArrayComma:
+ context = " after array element"
+ case tokenObjectKey:
+ context = " looking for beginning of object key string"
+ case tokenObjectColon:
+ context = " after object key"
+ case tokenObjectComma:
+ context = " after object key:value pair"
+ }
+ return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+ c, err := dec.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := dec.scanp; i < len(dec.buf); i++ {
+ c := dec.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ dec.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ return 0, err
+ }
+ err = dec.refill()
+ }
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error {
+ ...
+}
+
+*/
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go b/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go
new file mode 100644
index 000000000..eccf365b2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go
@@ -0,0 +1,354 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// Test values for the stream test.
+// One of each JSON kind.
+var streamTest = []interface{}{
+ 0.1,
+ "hello",
+ nil,
+ true,
+ false,
+ []interface{}{"a", "b", "c"},
+ map[string]interface{}{"K": "Kelvin", "ß": "long s"},
+ 3.14, // another value to make sure something can follow map
+}
+
+var streamEncoded = `0.1
+"hello"
+null
+true
+false
+["a","b","c"]
+{"ß":"long s","K":"Kelvin"}
+3.14
+`
+
+func TestEncoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ for j, v := range streamTest[0:i] {
+ if err := enc.Encode(v); err != nil {
+ t.Fatalf("encode #%d: %v", j, err)
+ }
+ }
+ if have, want := buf.String(), nlines(streamEncoded, i); have != want {
+ t.Errorf("encoding %d items: mismatch", i)
+ diff(t, []byte(have), []byte(want))
+ break
+ }
+ }
+}
+
+func TestDecoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ // Use stream without newlines as input,
+ // just to stress the decoder even more.
+ // Our test input does not include back-to-back numbers.
+ // Otherwise stripping the newlines would
+ // merge two adjacent JSON values.
+ var buf bytes.Buffer
+ for _, c := range nlines(streamEncoded, i) {
+ if c != '\n' {
+ buf.WriteRune(c)
+ }
+ }
+ out := make([]interface{}, i)
+ dec := NewDecoder(&buf)
+ for j := range out {
+ if err := dec.Decode(&out[j]); err != nil {
+ t.Fatalf("decode #%d/%d: %v", j, i, err)
+ }
+ }
+ if !reflect.DeepEqual(out, streamTest[0:i]) {
+ t.Errorf("decoding %d items: mismatch", i)
+ for j := range out {
+ if !reflect.DeepEqual(out[j], streamTest[j]) {
+ t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
+ }
+ }
+ break
+ }
+ }
+}
+
+func TestDecoderBuffered(t *testing.T) {
+ r := strings.NewReader(`{"Name": "Gopher"} extra `)
+ var m struct {
+ Name string
+ }
+ d := NewDecoder(r)
+ err := d.Decode(&m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Name != "Gopher" {
+ t.Errorf("Name = %q; want Gopher", m.Name)
+ }
+ rest, err := ioutil.ReadAll(d.Buffered())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, w := string(rest), " extra "; g != w {
+ t.Errorf("Remaining = %q; want %q", g, w)
+ }
+}
+
+func nlines(s string, n int) string {
+ if n <= 0 {
+ return ""
+ }
+ for i, c := range s {
+ if c == '\n' {
+ if n--; n == 0 {
+ return s[0 : i+1]
+ }
+ }
+ }
+ return s
+}
+
+func TestRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ const raw = `["\u0056",null]`
+ const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if string([]byte(*data.Id)) != raw {
+ t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+func TestNullRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ data.Id = new(RawMessage)
+ const msg = `{"X":0.1,"Id":null,"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if data.Id != nil {
+ t.Fatalf("Raw mismatch: have non-nil, want nil")
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+var blockingTests = []string{
+ `{"x": 1}`,
+ `[1, 2, 3]`,
+}
+
+func TestBlocking(t *testing.T) {
+ for _, enc := range blockingTests {
+ r, w := net.Pipe()
+ go w.Write([]byte(enc))
+ var val interface{}
+
+ // If Decode reads beyond what w.Write writes above,
+ // it will block, and the test will deadlock.
+ if err := NewDecoder(r).Decode(&val); err != nil {
+ t.Errorf("decoding %s: %v", enc, err)
+ }
+ r.Close()
+ w.Close()
+ }
+}
+
+func BenchmarkEncoderEncode(b *testing.B) {
+ b.ReportAllocs()
+ type T struct {
+ X, Y string
+ }
+ v := &T{"foo", "bar"}
+ for i := 0; i < b.N; i++ {
+ if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+type tokenStreamCase struct {
+ json string
+ expTokens []interface{}
+}
+
+type decodeThis struct {
+ v interface{}
+}
+
+var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
+ // streaming token cases
+ {json: `10`, expTokens: []interface{}{float64(10)}},
+ {json: ` [10] `, expTokens: []interface{}{
+ Delim('['), float64(10), Delim(']')}},
+ {json: ` [false,10,"b"] `, expTokens: []interface{}{
+ Delim('['), false, float64(10), "b", Delim(']')}},
+ {json: `{ "a": 1 }`, expTokens: []interface{}{
+ Delim('{'), "a", float64(1), Delim('}')}},
+ {json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
+ Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
+ {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ Delim('{'), "a", float64(1), Delim('}'),
+ Delim('{'), "a", float64(2), Delim('}'),
+ Delim(']')}},
+ {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
+ Delim('}')}},
+ {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('['),
+ Delim('{'), "a", float64(1), Delim('}'),
+ Delim(']'), Delim('}')}},
+
+ // streaming tokens with intermittent Decode()
+ {json: `{ "a": 1 }`, expTokens: []interface{}{
+ Delim('{'), "a",
+ decodeThis{float64(1)},
+ Delim('}')}},
+ {json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim(']')}},
+ {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ decodeThis{map[string]interface{}{"a": float64(2)}},
+ Delim(']')}},
+ {json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim(']'), Delim('}')}},
+
+ {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
+ Delim('{'), "obj",
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim('}')}},
+ {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
+ Delim('{'), "obj",
+ decodeThis{[]interface{}{
+ map[string]interface{}{"a": float64(1)},
+ }},
+ Delim('}')}},
+ {json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ decodeThis{&SyntaxError{"expected comma after array element", 0}},
+ }},
+ {json: `{ "a" 1 }`, expTokens: []interface{}{
+ Delim('{'), "a",
+ decodeThis{&SyntaxError{"expected colon after object key", 0}},
+ }},
+}
+
+func TestDecodeInStream(t *testing.T) {
+
+ for ci, tcase := range tokenStreamCases {
+
+ dec := NewDecoder(strings.NewReader(tcase.json))
+ for i, etk := range tcase.expTokens {
+
+ var tk interface{}
+ var err error
+
+ if dt, ok := etk.(decodeThis); ok {
+ etk = dt.v
+ err = dec.Decode(&tk)
+ } else {
+ tk, err = dec.Token()
+ }
+ if experr, ok := etk.(error); ok {
+ if err == nil || err.Error() != experr.Error() {
+ t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
+ }
+ break
+ } else if err == io.EOF {
+ t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
+ break
+ } else if err != nil {
+ t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
+ break
+ }
+ if !reflect.DeepEqual(tk, etk) {
+ t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
+ break
+ }
+ }
+ }
+
+}
+
+// Test from golang.org/issue/11893
+func TestHTTPDecoding(t *testing.T) {
+ const raw = `{ "foo": "bar" }`
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(raw))
+ }))
+ defer ts.Close()
+ res, err := http.Get(ts.URL)
+ if err != nil {
+ log.Fatalf("GET failed: %v", err)
+ }
+ defer res.Body.Close()
+
+ foo := struct {
+ Foo string `json:"foo"`
+ }{}
+
+ d := NewDecoder(res.Body)
+ err = d.Decode(&foo)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if foo.Foo != "bar" {
+ t.Errorf("decoded %q; want \"bar\"", foo.Foo)
+ }
+
+ // make sure we get the EOF the second time
+ err = d.Decode(&foo)
+ if err != io.EOF {
+ t.Errorf("err = %v; want io.EOF", err)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go b/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go
new file mode 100644
index 000000000..85bb4ba83
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go
@@ -0,0 +1,115 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+type basicLatin2xTag struct {
+ V string `json:"$%-/"`
+}
+
+type basicLatin3xTag struct {
+ V string `json:"0123456789"`
+}
+
+type basicLatin4xTag struct {
+ V string `json:"ABCDEFGHIJKLMO"`
+}
+
+type basicLatin5xTag struct {
+ V string `json:"PQRSTUVWXYZ_"`
+}
+
+type basicLatin6xTag struct {
+ V string `json:"abcdefghijklmno"`
+}
+
+type basicLatin7xTag struct {
+ V string `json:"pqrstuvwxyz"`
+}
+
+type miscPlaneTag struct {
+ V string `json:"色ã¯åŒ‚ã¸ã©"`
+}
+
+type percentSlashTag struct {
+ V string `json:"text/html%"` // https://golang.org/issue/2718
+}
+
+type punctuationTag struct {
+ V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546
+}
+
+type emptyTag struct {
+ W string
+}
+
+type misnamedTag struct {
+ X string `jsom:"Misnamed"`
+}
+
+type badFormatTag struct {
+ Y string `:"BadFormat"`
+}
+
+type badCodeTag struct {
+ Z string `json:" !\"#&'()*+,."`
+}
+
+type spaceTag struct {
+ Q string `json:"With space"`
+}
+
+type unicodeTag struct {
+ W string `json:"Ελλάδα"`
+}
+
+var structTagObjectKeyTests = []struct {
+ raw interface{}
+ value string
+ key string
+}{
+ {basicLatin2xTag{"2x"}, "2x", "$%-/"},
+ {basicLatin3xTag{"3x"}, "3x", "0123456789"},
+ {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
+ {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
+ {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
+ {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
+ {miscPlaneTag{"ã„ã‚ã¯ã«ã»ã¸ã¨"}, "ã„ã‚ã¯ã«ã»ã¸ã¨", "色ã¯åŒ‚ã¸ã©"},
+ {emptyTag{"Pour Moi"}, "Pour Moi", "W"},
+ {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
+ {badFormatTag{"Orfevre"}, "Orfevre", "Y"},
+ {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+ {percentSlashTag{"brut"}, "brut", "text/html%"},
+ {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"},
+ {spaceTag{"Perreddu"}, "Perreddu", "With space"},
+ {unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
+}
+
+func TestStructTagObjectKey(t *testing.T) {
+ for _, tt := range structTagObjectKeyTests {
+ b, err := Marshal(tt.raw)
+ if err != nil {
+ t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
+ }
+ var f interface{}
+ err = Unmarshal(b, &f)
+ if err != nil {
+ t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
+ }
+ for i, v := range f.(map[string]interface{}) {
+ switch i {
+ case tt.key:
+ if s, ok := v.(string); !ok || s != tt.value {
+ t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
+ }
+ default:
+ t.Fatalf("Unexpected key: %#q, from %#q", i, b)
+ }
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/tags.go b/vendor/gopkg.in/square/go-jose.v1/json/tags.go
new file mode 100644
index 000000000..c38fd5102
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go b/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go
new file mode 100644
index 000000000..91fb18831
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go
@@ -0,0 +1,28 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+func TestTagParsing(t *testing.T) {
+ name, opts := parseTag("field,foobar,foo")
+ if name != "field" {
+ t.Fatalf("name = %q, want field", name)
+ }
+ for _, tt := range []struct {
+ opt string
+ want bool
+ }{
+ {"foobar", true},
+ {"foo", true},
+ {"bar", false},
+ } {
+ if opts.Contains(tt.opt) != tt.want {
+ t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz b/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz
new file mode 100644
index 000000000..0e2895b53
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz
Binary files differ
diff --git a/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go b/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go
new file mode 100644
index 000000000..686df51b2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go
@@ -0,0 +1,116 @@
+// +build !std_json
+
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "testing"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+type CaseSensitive struct {
+ A int `json:"Test"`
+ B int `json:"test"`
+ C int `json:"TEST"`
+}
+
+type UnicodeTest struct {
+ Sig string `json:"sig"`
+}
+
+func TestUnicodeComparison(t *testing.T) {
+ // Some tests from RFC 7515, Section 10.13
+ raw := []byte(`{"\u0073ig":"foo"}`)
+ var ut UnicodeTest
+ err := json.Unmarshal(raw, &ut)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if ut.Sig != "foo" {
+ t.Error("strings 'sig' and '\\u0073ig' should be equal")
+ }
+
+ raw = []byte(`{"si\u0047":"bar"}`)
+ var ut2 UnicodeTest
+ err = json.Unmarshal(raw, &ut2)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if ut2.Sig != "" {
+ t.Error("strings 'sig' and 'si\\u0047' should not be equal")
+ }
+}
+
+func TestCaseSensitiveJSON(t *testing.T) {
+ raw := []byte(`{"test":42}`)
+ var cs CaseSensitive
+ err := json.Unmarshal(raw, &cs)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if cs.A != 0 || cs.B != 42 || cs.C != 0 {
+ t.Errorf("parsing JSON should be case-sensitive (got %v)", cs)
+ }
+}
+
+func TestErrorOnTrailingCharacters(t *testing.T) {
+ raw := []byte(`{"test":42}asdf`)
+ var m map[string]interface{}
+ err := json.Unmarshal(raw, &m)
+ if err == nil {
+ t.Error("json.Unmarshal should fail if string has trailing chars")
+ }
+}
+
+func TestRejectDuplicateKeysObject(t *testing.T) {
+ raw := []byte(`{"test":42,"test":43}`)
+ var cs CaseSensitive
+ err := json.Unmarshal(raw, &cs)
+ if err == nil {
+ t.Error("should reject JSON with duplicate keys, but didn't")
+ }
+}
+
+func TestRejectDuplicateKeysInterface(t *testing.T) {
+ raw := []byte(`{"test":42,"test":43}`)
+ var m interface{}
+ err := json.Unmarshal(raw, &m)
+ if err == nil {
+ t.Error("should reject JSON with duplicate keys, but didn't")
+ }
+}
+
+func TestParseCaseSensitiveJWE(t *testing.T) {
+ invalidJWE := `{"protected":"eyJlbmMiOiJYWVoiLCJBTEciOiJYWVoifQo","encrypted_key":"QUJD","iv":"QUJD","ciphertext":"QUJD","tag":"QUJD"}`
+ _, err := ParseEncrypted(invalidJWE)
+ if err == nil {
+ t.Error("Able to parse message with case-invalid headers", invalidJWE)
+ }
+}
+
+func TestParseCaseSensitiveJWS(t *testing.T) {
+ invalidJWS := `{"PAYLOAD":"CUJD","signatures":[{"protected":"e30","signature":"CUJD"}]}`
+ _, err := ParseSigned(invalidJWS)
+ if err == nil {
+ t.Error("Able to parse message with case-invalid headers", invalidJWS)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jwe.go b/vendor/gopkg.in/square/go-jose.v1/jwe.go
new file mode 100644
index 000000000..7eb8956d2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jwe.go
@@ -0,0 +1,280 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+// rawJsonWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
+type rawJsonWebEncryption struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Unprotected *rawHeader `json:"unprotected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Recipients []rawRecipientInfo `json:"recipients,omitempty"`
+ Aad *byteBuffer `json:"aad,omitempty"`
+ EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
+ Iv *byteBuffer `json:"iv,omitempty"`
+ Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
+ Tag *byteBuffer `json:"tag,omitempty"`
+}
+
+// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
+type rawRecipientInfo struct {
+ Header *rawHeader `json:"header,omitempty"`
+ EncryptedKey string `json:"encrypted_key,omitempty"`
+}
+
+// JsonWebEncryption represents an encrypted JWE object after parsing.
+type JsonWebEncryption struct {
+ Header JoseHeader
+ protected, unprotected *rawHeader
+ recipients []recipientInfo
+ aad, iv, ciphertext, tag []byte
+ original *rawJsonWebEncryption
+}
+
+// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
+type recipientInfo struct {
+ header *rawHeader
+ encryptedKey []byte
+}
+
+// GetAuthData retrieves the (optional) authenticated data attached to the object.
+func (obj JsonWebEncryption) GetAuthData() []byte {
+ if obj.aad != nil {
+ out := make([]byte, len(obj.aad))
+ copy(out, obj.aad)
+ return out
+ }
+
+ return nil
+}
+
+// Get the merged header values
+func (obj JsonWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
+ out := rawHeader{}
+ out.merge(obj.protected)
+ out.merge(obj.unprotected)
+
+ if recipient != nil {
+ out.merge(recipient.header)
+ }
+
+ return out
+}
+
+// Get the additional authenticated data from a JWE object.
+func (obj JsonWebEncryption) computeAuthData() []byte {
+ var protected string
+
+ if obj.original != nil {
+ protected = obj.original.Protected.base64()
+ } else {
+ protected = base64URLEncode(mustSerializeJSON((obj.protected)))
+ }
+
+ output := []byte(protected)
+ if obj.aad != nil {
+ output = append(output, '.')
+ output = append(output, []byte(base64URLEncode(obj.aad))...)
+ }
+
+ return output
+}
+
+// ParseEncrypted parses an encrypted message in compact or full serialization format.
+func ParseEncrypted(input string) (*JsonWebEncryption, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseEncryptedFull(input)
+ }
+
+ return parseEncryptedCompact(input)
+}
+
+// parseEncryptedFull parses a message in compact format.
+func parseEncryptedFull(input string) (*JsonWebEncryption, error) {
+ var parsed rawJsonWebEncryption
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWE object from the raw JSON.
+func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) {
+ obj := &JsonWebEncryption{
+ original: parsed,
+ unprotected: parsed.Unprotected,
+ }
+
+ // Check that there is not a nonce in the unprotected headers
+ if (parsed.Unprotected != nil && parsed.Unprotected.Nonce != "") ||
+ (parsed.Header != nil && parsed.Header.Nonce != "") {
+ return nil, ErrUnprotectedNonce
+ }
+
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
+ }
+ }
+
+ // Note: this must be called _after_ we parse the protected header,
+ // otherwise fields from the protected header will not get picked up.
+ obj.Header = obj.mergedHeaders(nil).sanitized()
+
+ if len(parsed.Recipients) == 0 {
+ obj.recipients = []recipientInfo{
+ recipientInfo{
+ header: parsed.Header,
+ encryptedKey: parsed.EncryptedKey.bytes(),
+ },
+ }
+ } else {
+ obj.recipients = make([]recipientInfo, len(parsed.Recipients))
+ for r := range parsed.Recipients {
+ encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.Nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ obj.recipients[r].header = parsed.Recipients[r].Header
+ obj.recipients[r].encryptedKey = encryptedKey
+ }
+ }
+
+ for _, recipient := range obj.recipients {
+ headers := obj.mergedHeaders(&recipient)
+ if headers.Alg == "" || headers.Enc == "" {
+ return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
+ }
+ }
+
+ obj.iv = parsed.Iv.bytes()
+ obj.ciphertext = parsed.Ciphertext.bytes()
+ obj.tag = parsed.Tag.bytes()
+ obj.aad = parsed.Aad.bytes()
+
+ return obj, nil
+}
+
+// parseEncryptedCompact parses a message in compact format.
+func parseEncryptedCompact(input string) (*JsonWebEncryption, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 5 {
+ return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
+ }
+
+ rawProtected, err := base64URLDecode(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ encryptedKey, err := base64URLDecode(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ iv, err := base64URLDecode(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertext, err := base64URLDecode(parts[3])
+ if err != nil {
+ return nil, err
+ }
+
+ tag, err := base64URLDecode(parts[4])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJsonWebEncryption{
+ Protected: newBuffer(rawProtected),
+ EncryptedKey: newBuffer(encryptedKey),
+ Iv: newBuffer(iv),
+ Ciphertext: newBuffer(ciphertext),
+ Tag: newBuffer(tag),
+ }
+
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JsonWebEncryption) CompactSerialize() (string, error) {
+ if len(obj.recipients) != 1 || obj.unprotected != nil ||
+ obj.protected == nil || obj.recipients[0].header != nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s.%s.%s",
+ base64URLEncode(serializedProtected),
+ base64URLEncode(obj.recipients[0].encryptedKey),
+ base64URLEncode(obj.iv),
+ base64URLEncode(obj.ciphertext),
+ base64URLEncode(obj.tag)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JsonWebEncryption) FullSerialize() string {
+ raw := rawJsonWebEncryption{
+ Unprotected: obj.unprotected,
+ Iv: newBuffer(obj.iv),
+ Ciphertext: newBuffer(obj.ciphertext),
+ EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
+ Tag: newBuffer(obj.tag),
+ Aad: newBuffer(obj.aad),
+ Recipients: []rawRecipientInfo{},
+ }
+
+ if len(obj.recipients) > 1 {
+ for _, recipient := range obj.recipients {
+ info := rawRecipientInfo{
+ Header: recipient.header,
+ EncryptedKey: base64URLEncode(recipient.encryptedKey),
+ }
+ raw.Recipients = append(raw.Recipients, info)
+ }
+ } else {
+ // Use flattened serialization
+ raw.Header = obj.recipients[0].header
+ raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
+ }
+
+ if obj.protected != nil {
+ raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jwe_test.go b/vendor/gopkg.in/square/go-jose.v1/jwe_test.go
new file mode 100644
index 000000000..ab03fd000
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jwe_test.go
@@ -0,0 +1,537 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "math/big"
+ "testing"
+)
+
+func TestCompactParseJWE(t *testing.T) {
+ // Should parse
+ msg := "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.dGVzdA"
+ _, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Error("Unable to parse valid message:", err)
+ }
+
+ // Messages that should fail to parse
+ failures := []string{
+ // Too many parts
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.dGVzdA.dGVzdA",
+ // Not enough parts
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA",
+ // Invalid encrypted key
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.//////.dGVzdA.dGVzdA.dGVzdA",
+ // Invalid IV
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.//////.dGVzdA.dGVzdA",
+ // Invalid ciphertext
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.//////.dGVzdA",
+ // Invalid tag
+ "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.//////",
+ // Invalid header
+ "W10.dGVzdA.dGVzdA.dGVzdA.dGVzdA",
+ // Invalid header
+ "######.dGVzdA.dGVzdA.dGVzdA.dGVzdA",
+ // Missing alc/enc params
+ "e30.dGVzdA.dGVzdA.dGVzdA.dGVzdA",
+ }
+
+ for _, msg := range failures {
+ _, err = ParseEncrypted(msg)
+ if err == nil {
+ t.Error("Able to parse invalid message", msg)
+ }
+ }
+}
+
+func TestFullParseJWE(t *testing.T) {
+ // Messages that should succeed to parse
+ successes := []string{
+ // Flattened serialization, single recipient
+ "{\"protected\":\"eyJhbGciOiJYWVoiLCJlbmMiOiJYWVoifQo\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ // Unflattened serialization, single recipient
+ "{\"protected\":\"\",\"unprotected\":{\"enc\":\"XYZ\"},\"recipients\":[{\"header\":{\"alg\":\"XYZ\"},\"encrypted_key\":\"QUJD\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ }
+
+ for i := range successes {
+ _, err := ParseEncrypted(successes[i])
+ if err != nil {
+ t.Error("Unble to parse valid message", err, successes[i])
+ }
+ }
+
+ // Messages that should fail to parse
+ failures := []string{
+ // Empty
+ "{}",
+ // Invalid JSON
+ "{XX",
+ // Invalid protected header
+ "{\"protected\":\"###\"}",
+ // Invalid protected header
+ "{\"protected\":\"e1gK\"}",
+ // Invalid encrypted key
+ "{\"protected\":\"e30\",\"encrypted_key\":\"###\"}",
+ // Invalid IV
+ "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"###\"}",
+ // Invalid ciphertext
+ "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"###\"}",
+ // Invalid tag
+ "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"###\"}",
+ // Invalid AAD
+ "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\",\"aad\":\"###\"}",
+ // Missing alg/enc headers
+ "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ // Missing enc header
+ "{\"protected\":\"eyJhbGciOiJYWVoifQ\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ // Missing alg header
+ "{\"protected\":\"eyJlbmMiOiJYWVoifQ\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ // Unflattened serialization, single recipient, invalid encrypted_key
+ "{\"protected\":\"\",\"recipients\":[{\"header\":{\"alg\":\"XYZ\", \"enc\":\"XYZ\"},\"encrypted_key\":\"###\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ // Unflattened serialization, single recipient, missing alg
+ "{\"protected\":\"eyJhbGciOiJYWVoifQ\",\"recipients\":[{\"encrypted_key\":\"QUJD\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}",
+ }
+
+ for i := range failures {
+ _, err := ParseEncrypted(failures[i])
+ if err == nil {
+ t.Error("Able to parse invalid message", err, failures[i])
+ }
+ }
+}
+
+func TestMissingInvalidHeaders(t *testing.T) {
+ obj := &JsonWebEncryption{
+ protected: &rawHeader{Enc: A128GCM},
+ unprotected: &rawHeader{},
+ recipients: []recipientInfo{
+ recipientInfo{},
+ },
+ }
+
+ _, err := obj.Decrypt(nil)
+ if err != ErrUnsupportedKeyType {
+ t.Error("should detect invalid key")
+ }
+
+ obj.unprotected.Crit = []string{"1", "2"}
+
+ _, err = obj.Decrypt(nil)
+ if err == nil {
+ t.Error("should reject message with crit header")
+ }
+
+ obj.unprotected.Crit = nil
+ obj.protected = &rawHeader{Alg: string(RSA1_5)}
+
+ _, err = obj.Decrypt(rsaTestKey)
+ if err == nil || err == ErrCryptoFailure {
+ t.Error("should detect missing enc header")
+ }
+}
+
+func TestRejectUnprotectedJWENonce(t *testing.T) {
+ // No need to test compact, since that's always protected
+
+ // Flattened JSON
+ input := `{
+ "header": {
+ "alg": "XYZ", "enc": "XYZ",
+ "nonce": "should-cause-an-error"
+ },
+ "encrypted_key": "does-not-matter",
+ "aad": "does-not-matter",
+ "iv": "does-not-matter",
+ "ciphertext": "does-not-matter",
+ "tag": "does-not-matter"
+ }`
+ _, err := ParseEncrypted(input)
+ if err == nil {
+ t.Error("JWE with an unprotected nonce parsed as valid.")
+ } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" {
+ t.Errorf("Improper error for unprotected nonce: %v", err)
+ }
+
+ input = `{
+ "unprotected": {
+ "alg": "XYZ", "enc": "XYZ",
+ "nonce": "should-cause-an-error"
+ },
+ "encrypted_key": "does-not-matter",
+ "aad": "does-not-matter",
+ "iv": "does-not-matter",
+ "ciphertext": "does-not-matter",
+ "tag": "does-not-matter"
+ }`
+ _, err = ParseEncrypted(input)
+ if err == nil {
+ t.Error("JWE with an unprotected nonce parsed as valid.")
+ } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" {
+ t.Errorf("Improper error for unprotected nonce: %v", err)
+ }
+
+ // Full JSON
+ input = `{
+ "header": { "alg": "XYZ", "enc": "XYZ" },
+ "aad": "does-not-matter",
+ "iv": "does-not-matter",
+ "ciphertext": "does-not-matter",
+ "tag": "does-not-matter",
+ "recipients": [{
+ "header": { "nonce": "should-cause-an-error" },
+ "encrypted_key": "does-not-matter"
+ }]
+ }`
+ _, err = ParseEncrypted(input)
+ if err == nil {
+ t.Error("JWS with an unprotected nonce parsed as valid.")
+ } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" {
+ t.Errorf("Improper error for unprotected nonce: %v", err)
+ }
+}
+
+func TestCompactSerialize(t *testing.T) {
+ // Compact serialization must fail if we have unprotected headers
+ obj := &JsonWebEncryption{
+ unprotected: &rawHeader{Alg: "XYZ"},
+ }
+
+ _, err := obj.CompactSerialize()
+ if err == nil {
+ t.Error("Object with unprotected headers can't be compact serialized")
+ }
+}
+
+func TestVectorsJWE(t *testing.T) {
+ plaintext := []byte("The true sign of intelligence is not knowledge but imagination.")
+
+ publicKey := &rsa.PublicKey{
+ N: fromBase64Int(`
+ oahUIoWw0K0usKNuOR6H4wkf4oBUXHTxRvgb48E-BVvxkeDNjbC4he8rUW
+ cJoZmds2h7M70imEVhRU5djINXtqllXI4DFqcI1DgjT9LewND8MW2Krf3S
+ psk_ZkoFnilakGygTwpZ3uesH-PFABNIUYpOiN15dsQRkgr0vEhxN92i2a
+ sbOenSZeyaxziK72UwxrrKoExv6kc5twXTq4h-QChLOln0_mtUZwfsRaMS
+ tPs6mS6XrgxnxbWhojf663tuEQueGC-FCMfra36C9knDFGzKsNa7LZK2dj
+ YgyD3JR_MB_4NUJW_TqOQtwHYbxevoJArm-L5StowjzGy-_bq6Gw`),
+ E: 65537,
+ }
+
+ expectedCompact := stripWhitespace(`
+ eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.ROQCfge4JPm_
+ yACxv1C1NSXmwNbL6kvmCuyxBRGpW57DvlwByjyjsb6g8m7wtLMqKEyhFCn
+ tV7sjippEePIlKln6BvVnz5ZLXHNYQgmubuNq8MC0KTwcaGJ_C0z_T8j4PZ
+ a1nfpbhSe-ePYaALrf_nIsSRKu7cWsrwOSlaRPecRnYeDd_ytAxEQWYEKFi
+ Pszc70fP9geZOB_09y9jq0vaOF0jGmpIAmgk71lCcUpSdrhNokTKo5y8MH8
+ 3NcbIvmuZ51cjXQj1f0_AwM9RW3oCh2Hu0z0C5l4BujZVsDuGgMsGZsjUhS
+ RZsAQSXHCAmlJ2NlnN60U7y4SPJhKv5tKYw.48V1_ALb6US04U3b.5eym8T
+ W_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiS
+ diwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ`)
+
+ expectedFull := stripWhitespace(`
+ { "protected":"eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ",
+ "encrypted_key":
+ "ROQCfge4JPm_yACxv1C1NSXmwNbL6kvmCuyxBRGpW57DvlwByjyjsb
+ 6g8m7wtLMqKEyhFCntV7sjippEePIlKln6BvVnz5ZLXHNYQgmubuNq
+ 8MC0KTwcaGJ_C0z_T8j4PZa1nfpbhSe-ePYaALrf_nIsSRKu7cWsrw
+ OSlaRPecRnYeDd_ytAxEQWYEKFiPszc70fP9geZOB_09y9jq0vaOF0
+ jGmpIAmgk71lCcUpSdrhNokTKo5y8MH83NcbIvmuZ51cjXQj1f0_Aw
+ M9RW3oCh2Hu0z0C5l4BujZVsDuGgMsGZsjUhSRZsAQSXHCAmlJ2Nln
+ N60U7y4SPJhKv5tKYw",
+ "iv": "48V1_ALb6US04U3b",
+ "ciphertext":
+ "5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFS
+ hS8iB7j6jiSdiwkIr3ajwQzaBtQD_A",
+ "tag":"XFBoMYUZodetZdvTiFvSkQ" }`)
+
+ // Mock random reader
+ randReader = bytes.NewReader([]byte{
+ // Encryption key
+ 177, 161, 244, 128, 84, 143, 225, 115, 63, 180, 3, 255, 107, 154,
+ 212, 246, 138, 7, 110, 91, 112, 46, 34, 105, 47, 130, 203, 46, 122,
+ 234, 64, 252,
+ // Randomness for RSA-OAEP
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Initialization vector
+ 227, 197, 117, 252, 2, 219, 233, 68, 180, 225, 77, 219})
+ defer resetRandReader()
+
+ // Encrypt with a dummy key
+ encrypter, err := NewEncrypter(RSA_OAEP, A256GCM, publicKey)
+ if err != nil {
+ panic(err)
+ }
+
+ object, err := encrypter.Encrypt(plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ serialized, err := object.CompactSerialize()
+ if serialized != expectedCompact {
+ t.Error("Compact serialization is not what we expected", serialized, expectedCompact)
+ }
+
+ serialized = object.FullSerialize()
+ if serialized != expectedFull {
+ t.Error("Full serialization is not what we expected")
+ }
+}
+
+func TestVectorsJWECorrupt(t *testing.T) {
+ priv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: fromHexInt(`
+ a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8
+ ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c
+ bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd
+ bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93
+ ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`),
+ E: 65537,
+ },
+ D: fromHexInt(`
+ 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195
+ 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d
+ 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6
+ 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb
+ 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`),
+ Primes: []*big.Int{
+ fromHexInt(`
+ d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262
+ 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c
+ 2f26a471dcad212eac7ca39d`),
+ fromHexInt(`
+ cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3
+ 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af
+ 72bfe9a030e860b0288b5d77`),
+ },
+ }
+
+ corruptCiphertext := stripWhitespace(`
+ eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.NFl09dehy
+ IR2Oh5iSsvEa82Ps7DLjRHeo0RnuTuSR45OsaIP6U8yu7vLlWaZKSZMy
+ B2qRBSujf-5XIRoNhtyIyjk81eJRXGa_Bxaor1XBCMyyhGchW2H2P71f
+ PhDO6ufSC7kV4bNqgHR-4ziS7KXwzN83_5kogXqxUpymUoJDNc.tk-GT
+ W_VVhiTIKFF.D_BE6ImZUl9F.52a-zFnRb3YQwIC7UrhVyQ`)
+
+ corruptAuthtag := stripWhitespace(`
+ eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.NFl09dehy
+ IR2Oh5iSsvEa82Ps7DLjRHeo0RnuTuSR45OsaIP6U8yu7vLlWaZKSZMy
+ B2qRBSujf-5XIRoNhtyIyjk81eJRXGa_Bxaor1XBCMyyhGchW2H2P71f
+ PhDO6ufSC7kV4bNqgHR-4ziS7KNwzN83_5kogXqxUpymUoJDNc.tk-GT
+ W_VVhiTIKFF.D_BE6ImZUl9F.52a-zFnRb3YQwiC7UrhVyQ`)
+
+ msg, _ := ParseEncrypted(corruptCiphertext)
+ _, err := msg.Decrypt(priv)
+ if err != ErrCryptoFailure {
+ t.Error("should detect corrupt ciphertext")
+ }
+
+ msg, _ = ParseEncrypted(corruptAuthtag)
+ _, err = msg.Decrypt(priv)
+ if err != ErrCryptoFailure {
+ t.Error("should detect corrupt auth tag")
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestSampleNimbusJWEMessagesRSA(t *testing.T) {
+ rsaPrivateKey, err := LoadPrivateKey(fromBase64Bytes(`
+ MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCNRCEmf5PlbXKuT4uwnb
+ wGKvFrtpi+bDYxOZxxqxdVkZM/bYATAnD1fg9pNvLMKeF+MWJ9kPIMmDgOh9RdnRdLvQGb
+ BzhLmxwhhcua2QYiHEZizXmiaXvNP12bzEBhebdX7ObW8izMVW0p0lqHPNzkK3K75B0Sxo
+ FMVKkZ7KtBHgepBT5yPhPPcNe5lXQeTne5bo3I60DRcN9jTBgMJOXdq0I9o4y6ZmoXdNTm
+ 0EyLzn9/EYiHqBxtKFh791EHR7wYgyi/t+nOKr4sO74NbEByP0mHDil+mPvZSzFW4l7fPx
+ OclRZvpRIKIub2TroZA9s2WsshGf79eqqXYbBB9NNRAgMBAAECggEAIExbZ/nzTplfhwsY
+ 3SCzRJW87OuqsJ79JPQPGM4NX7sQ94eJqM7+FKLl0yCFErjgnYGdCyiArvB+oJPdsimgke
+ h83X0hGeg03lVA3/6OsG3WifCAxulnLN44AM8KST8S9D9t5+cm5vEBLHazzAfWWTS13s+g
+ 9hH8rf8NSqgZ36EutjKlvLdHx1mWcKX7SREFVHT8FWPAbdhTLEHUjoWHrfSektnczaSHnt
+ q8fFJy6Ld13QkF1ZJRUhtA24XrD+qLTc+M36IuedjeZaLHFB+KyhYR3YvXEtrbCug7dCRd
+ uG6uTlDCSaSy7xHeTPolWtWo9F202jal54otxiAJFGUHgQKBgQDRAT0s6YQZUfwE0wluXV
+ k0JdhDdCo8sC1aMmKlRKWUkBAqrDl7BI3MF56VOr4ybr90buuscshFf9TtrtBOjHSGcfDI
+ tSKfhhkW5ewQKB0YqyHzoD6UKT0/XAshFY3esc3uCxuJ/6vOiXV0og9o7eFvr51O0TfDFh
+ mcTvW4wirKlQKBgQCtB7UAu8I9Nn8czkd6oXLDRyTWYviuiqFmxR+PM9klgZtsumkeSxO1
+ lkfFoj9+G8nFaqYEBA9sPeNtJVTSROCvj/iQtoqpV2NiI/wWeVszpBwsswx2mlks4LJa8a
+ Yz9xrsfNoroKYVppefc/MCoSx4M+99RSm3FSpLGZQHAUGyzQKBgQDMQmq4JuuMF1y2lk0E
+ SESyuz21BqV0tDVOjilsHT+5hmXWXoS6nkO6L2czrrpM7YE82F6JJZBmo7zEIXHBInGLJ3
+ XLoYLZ5qNEhqYDUEDHaBCBWZ1vDTKnZlwWFEuXVavNNZvPbUhKTHq25t8qjDki/r09Vykp
+ BsM2yNBKpbBOVQKBgCJyUVd3CaFUExQyAMrqD0XPCQdhJq7gzGcAQVsp8EXmOoH3zmuIeM
+ ECzQEMXuWFNLMHm0tbX5Kl83vMHcnKioyI9ewhWxOBYTitf0ceG8j5F97SOl32NmCXzwoJ
+ 55Oa0xJXfLuIvOe8hZzp4WwZmBfKBxiCR166aPQQgIawelrVAoGAEJsHomfCI4epxH4oMw
+ qYJMCGy95zloB+2+c86BZCOJAGwnfzbtc2eutWZw61/9sSO8sQCfzA8oX+5HwAgnFVzwW4
+ lNMZohppYcpwN9EyjkPaCXuALC7p5rF2o63wY7JLvnjS2aYZliknh2yW6X6fSB0PK0Cpvd
+ lAIyRw6Kud0zI=`))
+ if err != nil {
+ panic(err)
+ }
+
+ rsaSampleMessages := []string{
+ "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBMV81In0.EW0KOhHeoAxTBnLjYhh2T6HjwI-srNs6RpcSdZvE-GJ5iww3EYWBCmeGGj1UVz6OcBfwW3wllZ6GPOHU-hxVQH5KYpVOjkmrFIYU6-8BHhxBP_PjSJEBCZzjOgsCm9Th4-zmlO7UWTdK_UtwE7nk4X-kkmEy-aZBCShA8nFe2MVvqD5F7nvEWNFBOHh8ae_juo-kvycoIzvxLV9g1B0Zn8K9FAlu8YF1KiL5NFekn76f3jvAwlExuRbFPUx4gJN6CeBDK_D57ABsY2aBVDSiQceuYZxvCIAajqSS6dMT382FNJzAiQhToOpo_1w5FnnBjzJLLEKDk_I-Eo2YCWxxsQ.5mCMuxJqLRuPXGAr.Ghe4INeBhP3MDWGvyNko7qanKdZIzKjfeiU.ja3UlVWJXKNFJ-rZsJWycw",
+ "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBMV81In0.JsJeYoP0St1bRYNUaAmA34DAA27usE7RNuC2grGikBRmh1xrwUOpnEIXXpwr7fjVmNi52zzWkNHC8JkkRTrLcCh2VXvnOnarpH8DCr9qM6440bSrahzbxIvDds8z8q0wT1W4kjVnq1mGwGxg8RQNBWTV6Sp2FLQkZyjzt_aXsgYzr3zEmLZxB-d41lBS81Mguk_hdFJIg_WO4ao54lozvxkCn_uMiIZ8eLb8qHy0h-N21tiHGCaiC2vV8KXomwoqbJ0SXrEH4r9_R2J844H80TBZdbvNBd8whvoQNHvOX659LNs9EQ9xxvHU2kqGZekXBu7sDXXTjctMkMITobGSzw.1v5govaDvanP3LGp.llwYNBDrD7MwVLaFHesljlratfmndWs4XPQ.ZGT1zk9_yIKi2GzW6CuAyA",
+ "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBMV81In0.fBv3fA3TMS3ML8vlsCuvwdsKvB0ym8R30jJrlOiqkWKk7WVUkjDInFzr1zw3Owla6c5BqOJNoACXt4IWbkLbkoWV3tweXlWwpafuaWPkjLOUH_K31rS2fCX5x-MTj8_hScquVQXpbz3vk2EfulRmGXZc_8JU2NqQCAsYy3a28houqP3rDe5jEAvZS2SOFvJkKW--f5S-z39t1D7fNz1N8Btd9SmXWQzjbul5YNxI9ctqxhJpkKYpxOLlvrzdA6YdJjOlDx3n6S-HnSZGM6kQd_xKtAf8l1EGwhQmhbXhMhjVxMvGwE5BX7PAb8Ccde5bzOCJx-PVbVetuLb169ZYqQ._jiZbOPRR82FEWMZ.88j68LI-K2KT6FMBEdlz6amG5nvaJU8a-90.EnEbUTJsWNqJYKzfO0x4Yw",
+ "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBMV81In0.bN6FN0qmGxhkESiVukrCaDVG3woL0xE-0bHN_Mu0WZXTQWbzzT-7jOvaN1xhGK8nzi8qpCSRgE5onONNB9i8OnJm3MMIxF7bUUEAXO9SUAFn2v--wNc4drPc5OjIu0RiJrDVDkkGjNrBDIuBaEQcke7A0v91PH58dXE7o4TLPzC8UJmRtXWhUSwjXVF3-UmYRMht2rjHJlvRbtm6Tu2LMBIopRL0zj6tlPP4Dm7I7sz9OEB3VahYAhpXnFR7D_f8RjLSXQmBvB1FiI5l_vMz2NFt2hYUmQF3EJMLIEdHvvPp3iHDGiXC1obJrDID_CCf3qs9UY7DMYL622KLvP2NIg.qb72oxECzxd_aNuHVR0aNg.Gwet9Ms8hB8rKEb0h4RGdFNRq97Qs2LQaJM0HWrCqoI.03ljVThOFvgXzMmQJ79VjQ",
+ "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBMV81In0.ZbEOP6rqdiIP4g7Nl1PL5gwhgDwv9RinyiUQxZXPOmD7kwEZrZ093dJnhqI9kEd3QGFlHDpB7HgNz53d27z2zmEj1-27v6miizq6tH4sN2MoeZLwSyk16O1_n3bVdDmROawsTYYFJfHsuLwyVJxPd37duIYnbUCFO9J8lLIv-2VI50KJ1t47YfE4P-Wt9jVzxP2CVUQaJwTlcwfiDLJTagYmfyrDjf525WlQFlgfJGqsJKp8BX9gmKvAo-1iCBAM8VpEjS0u0_hW9VSye36yh8BthVV-VJkhJ-0tMpto3bbBmj7M25Xf4gbTrrVU7Nz6wb18YZuhHZWmj2Y2nHV6Jg.AjnS44blTrIIfFlqVw0_Mg.muCRgaEXNKKpW8rMfW7jf7Zpn3VwSYDz-JTRg16jZxY.qjc9OGlMaaWKDWQSIwVpR4K556Pp6SF9",
+ "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBMV81In0.c7_F1lMlRHQQE3WbKmtHBYTosdZrG9hPfs-F9gNQYet61zKG8NXVkSy0Zf2UFHt0vhcO8hP2qrqOFsy7vmRj20xnGHQ2EE29HH6hwX5bx1Jj3uE5WT9Gvh0OewpvF9VubbwWTIObBpdEG7XdJsMAQlIxtXUmQYAtLTWcy2ZJipyJtVlWQLaPuE8BKfZH-XAsp2CpQNiRPI8Ftza3EAspiyRfVQbjKt7nF8nuZ2sESjt7Y50q4CSiiCuGT28T3diMN0_rWrH-I-xx7OQvJlrQaNGglGtu3jKUcrJDcvxW2e1OxriaTeuQ848ayuRvGUNeSv6WoVYmkiK1x_gNwUAAbw.7XtSqHJA7kjt6JrfxJMwiA.Yvi4qukAbdT-k-Fd2s4G8xzL4VFxaFC0ZIzgFDAI6n0.JSWPJ-HjOE3SK9Lm0yHclmjS7Z1ahtQga9FHGCWVRcc",
+ "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.SYVxJbCnJ_tcR13LJpaqHQj-nGNkMxre4A1FmnUdxnvzeJwuvyrLiUdRsZR1IkP4fqLtDON2mumx39QeJQf0WIObPBYlIxycRLkwxDHRVlyTmPvdZHAxN26jPrk09wa5SgK1UF1W1VSQIPm-Tek8jNAmarF1Yxzxl-t54wZFlQiHP4TuaczugO5f-J4nlWenfla2mU1snDgdUMlEZGOAQ_gTEtwSgd1MqXmK_7LZBkoDqqoCujMZhziafJPXPDaUUqBLW3hHkkDA7GpVec3XcTtNUWQJqOpMyQhqo1KQMc8jg3fuirILp-hjvvNVtBnCRBvbrKUCPzu2_yH3HM_agA.2VsdijtonAxShNIW.QzzB3P9CxYP3foNKN0Ma1Z9tMwijAlkWo08.ZdQkIPDY_M-hxqi5fD4NGw",
+ "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.Z2oTJXXib1u-S38Vn3DRKE3JnhnwgUa92UhsefzY2Wpdn0dmxMfYt9iRoJGFfSAcA97MOfjyvXVRCKWXGrG5AZCMAXEqU8SNQwKPRjlcqojcVzQyMucXI0ikLC4mUgeRlfKTwsBicq6JZZylzRoLGGSNJQbni3_BLsf7H3Qor0BYg0FPCLG9Z2OVvrFzvjTLmZtV6gFlVrMHBxJub_aUet9gAkxiu1Wx_Kx46TlLX2tkumXIpTGlzX6pef6jLeZ5EIg_K-Uz4tkWgWQIEkLD7qmTyk5pAGmzukHa_08jIh5-U-Sd8XGZdx4J1pVPJ5CPg0qDJGZ_cfgkgpWbP_wB6A.4qgKfokK1EwYxz20._Md82bv_KH2Vru0Ue2Eb6oAqHP2xBBP5jF8.WFRojvQpD5VmZlOr_dN0rQ",
+ "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.JzCUgJcBJmBgByp4PBAABUfhezPvndxBIVzaoZ96DAS0HPni0OjMbsOGsz6JwNsiTr1gSn_S6R1WpZM8GJc9R2z0EKKVP67TR62ZSG0MEWyLpHmG_4ug0fAp1HWWMa9bT4ApSaOLgwlpVAb_-BPZZgIu6c8cREuMon6UBHDqW1euTBbzk8zix3-FTZ6p5b_3soDL1wXfRiRBEsxxUGMnpryx1OFb8Od0JdyGF0GgfLt6OoaujDJpo-XtLRawu1Xlg6GqRs0NQwSHZ5jXgQ6-zgCufXonAmYTiIyBXY2no9XmECTexjwrS_05nA7H-UyIZEBOCp3Yhz2zxrt5j_0pvQ.SJR-ghhaUKP4zXtZ.muiuzLfZA0y0BDNsroGTw2r2-l73SLf9lK8.XFMH1oHr1G6ByP3dWSUUPA",
+ "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBLU9BRVAifQ.U946MVfIm4Dpk_86HrnIA-QXyiUu0LZ67PL93CMLmEtJemMNDqmRd9fXyenCIhAC7jPIV1aaqW7gS194xyrrnUpBoJBdbegiPqOfquy493Iq_GQ8OXnFxFibPNQ6rU0l8BwIfh28ei_VIF2jqN6bhxFURCVW7fG6n6zkCCuEyc7IcxWafSHjH2FNttREuVj-jS-4LYDZsFzSKbpqoYF6mHt8H3btNEZDTSmy_6v0fV1foNtUKNfWopCp-iE4hNh4EzJfDuU8eXLhDb03aoOockrUiUCh-E0tQx9su4rOv-mDEOHHAQK7swm5etxoa7__9PC3Hg97_p4GM9gC9ykNgw.pnXwvoSPi0kMQP54of-HGg.RPJt1CMWs1nyotx1fOIfZ8760mYQ69HlyDp3XmdVsZ8.Yxw2iPVWaBROFE_FGbvodA",
+ "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBLU9BRVAifQ.eKEOIJUJpXmO_ghH_nGCJmoEspqKyiy3D5l0P8lKutlo8AuYHPQlgOsaFYnDkypyUVWd9zi-JaQuCeo7dzoBiS1L71nAZo-SUoN0anQBkVuyuRjr-deJMhPPfq1H86tTk-4rKzPr1Ivd2RGXMtWsrUpNGk81r1v8DdMntLE7UxZQqT34ONuZg1IXnD_U6di7k07unI29zuU1ySeUr6w1YPw5aUDErMlpZcEJWrgOEYWaS2nuC8sWGlPGYEjqkACMFGn-y40UoS_JatNZO6gHK3SKZnXD7vN5NAaMo_mFNbh50e1t_zO8DaUdLtXPOBLcx_ULoteNd9H8HyDGWqwAPw.0xmtzJfeVMoIT1Cp68QrXA.841l1aA4c3uvSYfw6l180gn5JZQjL53WQ5fr8ejtvoI.lojzeWql_3gDq-AoaIbl_aGQRH_54w_f",
+ "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBLU9BRVAifQ.D0QkvIXR1TL7dIHWuPNMybmmD8UPyQd1bRKjRDNbA2HmKGpamCtcJmpNB_EetNFe-LDmhe44BYI_XN2wIBbYURKgDK_WG9BH0LQw_nCVqQ-sKqjtj3yQeytXhLHYTDmiF0TO-uW-RFR7GbPAdARBfuf4zj82r_wDD9sD5WSCGx89iPfozDOYQ_OLwdL2WD99VvDyfwS3ZhxA-9IMSYv5pwqPkxj4C0JdjCqrN0YNrZn_1ORgjtsVmcWXsmusObTozUGA7n5GeVepfZdU1vrMulAwdRYqOYtlqKaOpFowe9xFN3ncBG7wb4f9pmzbS_Dgt-1_Ii_4SEB9GQ4NiuBZ0w.N4AZeCxMGUv52A0UVJsaZw.5eHOGbZdtahnp3l_PDY-YojYib4ft4SRmdsQ2kggrTs.WsmGH8ZDv4ctBFs7qsQvw2obe4dVToRcAQaZ3PYL34E",
+ "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.fDTxO_ZzZ3Jdrdw-bxvg7u-xWB2q1tp3kI5zH6JfhLUm4h6rt9qDA_wZlRym8-GzEtkUjkTtQGs6HgQx_qlyy8ylCakY5GHsNhCG4m0UNhRiNfcasAs03JSXfON9-tfTJimWD9n4k5OHHhvcrsCW1G3jYeLsK9WHCGRIhNz5ULbo8HBrCTbmZ6bOEQ9mqhdssLpdV24HDpebotf3bgPJqoaTfWU6Uy7tLmPiNuuNRLQ-iTpLyNMTVvGqqZhpcV3lAEN5l77QabI5xLJYucvYjrXQhAEZ7YXO8oRYhGkdG2XXIRcwr87rBeRH-47HAyhZgF_PBPBhhrJNS9UNMqdfBw.FvU4_s7Md6vxnXWd.fw29Q4_gHt4f026DPPV-CNebQ8plJ6IVLX8._apBZrw7WsT8HOmxgCrTwA",
+ "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.bYuorK-rHMbO4c2CRWtvyOEaM1EN-o-wLRZ0wFWRX9mCXQ-iTNarZn7ksYM1XnGmZ4u3CSowX1Hpca9Rg72_VJCmKapqCT7r3YfasN4_oeLwuSKI_gT-uVOznod97tn3Gf_EDv0y1V4H0k9BEIFGbajAcG1znTD_ODY3j2KZJxisfrsBoslc6N-HI0kKZMC2hSGuHOcOf8HN1sTE-BLqZCtoj-zxQECJK8Wh14Ih4jzzdmmiu_qmSR780K6su-4PRt3j8uY7oCiLBfwpCsCmhJgp8rKd91zoedZmamfvX38mJIfE52j4fG6HmIYw9Ov814fk9OffV6tzixjcg54Q2g.yeVJz4aSh2s-GUr9.TBzzWP5llEiDdugpP2SmPf2U4MEGG9EoPWk.g25UoWpsBaOd45J__FX7mA",
+ "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.h9tFtmh762JuffBxlSQbJujCyI4Zs9yc3IOb1yR8g65W4ZHosIvzVGHWbShj4EY9MNrz-RbKtHfqQGGzDeo3Xb4-HcQ2ZDHyWoUg7VfA8JafJ5zIKL1npz8eUExOVMLsAaRfHg8qNfczodg3egoSmX5Q-nrx4DeidDSXYZaZjV0C72stLTPcuQ7XPV7z1tvERAkqpvcsRmJn_PiRNxIbAgoyHMJ4Gijuzt1bWZwezlxYmw0TEuwCTVC2fl9NJTZyxOntS1Lcm-WQGlPkVYeVgYTOQXLlp7tF9t-aAvYpth2oWGT6Y-hbPrjx_19WaKD0XyWCR46V32DlXEVDP3Xl2A.NUgfnzQyEaJjzt9r.k2To43B2YVWMeR-w3n4Pr2b5wYq2o87giHk.X8_QYCg0IGnn1pJqe8p_KA",
+ "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.EDq6cNP6Yp1sds5HZ4CkXYp7bs9plIYVZScKvuyxUy0H1VyBC_YWg0HvndPNb-vwh1LA6KMxRazlOwJ9iPR9YzHnYmGgPM3Je_ZzBfiPlRfq6hQBpGnNaypBI1XZ2tyFBhulsVLqyJe2SmM2Ud00kasOdMYgcN8FNFzq7IOE7E0FUQkIwLdUL1nrzepiYDp-5bGkxWRcL02cYfdqdm00G4m0GkUxAmdxa3oPNxZlt2NeBI_UVWQSgJE-DJVJQkDcyA0id27TV2RCDnmujYauNT_wYlyb0bFDx3pYzzNXfAXd4wHZxt75QaLZ5APJ0EVfiXJ0qki6kT-GRVmOimUbQA.vTULZL7LvS0WD8kR8ZUtLg.mb2f0StEmmkuuvsyz8UplMvF58FtZzlu8eEwzvPUvN0.hbhveEN40V-pgG2hSVgyKg",
+ "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.DuYk92p7u-YIN-JKn-XThmlVcnhU9x5TieQ2uhsLQVNlo0iWC9JJPP6bT6aI6u_1BIS3yE8_tSGGL7eM-zyEk6LuTqSWFRaZcZC06d0MnS9eYZcw1T2D17fL-ki-NtCaTahJD7jE2s0HevRVW49YtL-_V8whnO_EyVjvXIAQlPYqhH_o-0Nzcpng9ggdAnuF2rY1_6iRPYFJ3BLQvG1oWhyJ9s6SBttlOa0i6mmFCVLHx6sRpdGAB3lbCL3wfmHq4tpIv77gfoYUNP0SNff-zNmBXF_wp3dCntLZFTjbfMpGyHlruF_uoaLqwdjYpUGNUFVUoeSiMnSbMKm9NxiDgQ.6Mdgcqz7bMU1UeoAwFC8pg.W36QWOlBaJezakUX5FMZzbAgeAu_R14AYKZCQmuhguw.5OeyIJ03olxmJft8uBmjuOFQPWNZMYLI",
+ "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.ECulJArWFsPL2FlpCN0W8E7IseSjJg1cZqE3wz5jk9gvwgNForAUEv5KYZqhNI-p5IxkGV0f8K6Y2X8pWzbLwiPIjZe8_dVqHYJoINxqCSgWLBhz0V36qL9Nc_xARTBk4-ZteIu75NoXVeos9gNvFnkOCj4tm-jGo8z8EFO9XfODgjhiR4xv8VqUtvrkjo9GQConaga5zpV-J4JQlXbdqbDjnuwacnJAxYpFyuemqcgqsl6BnFX3tovGkmSUPqcvF1A6tiHqr-TEmcgVqo5C3xswknRBKTQRM00iAmJ92WlVdkoOCx6E6O7cVHFawZ14BLzWzm66Crb4tv0ucYvk_Q.mxolwUaoj5S5kHCfph0w8g.nFpgYdnYg3blHCCEi2XXQGkkKQBXs2OkZaH11m3PRvk.k8BAVT4EcyrUFVIKr-KOSPbF89xyL0Vri2rFTu2iIWM",
+ }
+
+ for _, msg := range rsaSampleMessages {
+ obj, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ plaintext, err := obj.Decrypt(rsaPrivateKey)
+ if err != nil {
+ t.Error("unable to decrypt message", msg, err)
+ continue
+ }
+ if string(plaintext) != "Lorem ipsum dolor sit amet" {
+ t.Error("plaintext is not what we expected for msg", msg)
+ }
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestSampleNimbusJWEMessagesAESKW(t *testing.T) {
+ aesTestKeys := [][]byte{
+ fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D"),
+ fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D95EC9CDC2D82233C"),
+ fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D95EC9CDC2D82233C333C35BA29044E90"),
+ }
+
+ aesSampleMessages := [][]string{
+ []string{
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoib2ZMd2Q5NGloVWFRckJ0T1pQUDdjUSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiV2Z3TnN5cjEwWUFjY2p2diJ9.9x3RxdqIS6P9xjh93Eu1bQ.6fs3_fSGt2jull_5.YDlzr6sWACkFg_GU5MEc-ZEWxNLwI_JMKe_jFA.f-pq-V7rlSSg_q2e1gDygw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoic2RneXB1ckFjTEFzTmZJU0lkZUNpUSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoieVFMR0dCdDJFZ0c1THdyViJ9.arslKo4aKlh6f4s0z1_-U-8JbmhAoZHN.Xw2Q-GX98YXwuc4i.halTEWMWAYZbv-qOD52G6bte4x6sxlh1_VpGEA.Z1spn016v58cW6Q2o0Qxag",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoicTNzejF5VUlhbVBDYXJfZ05kSVJqQSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiM0ZRM0FsLWJWdWhmcEIyQyJ9.dhVipWbzIdsINttuZM4hnjpHvwEHf0VsVrOp4GAg01g.dk7dUyt1Qj13Pipw.5Tt70ONATF0BZAS8dBkYmCV7AQUrfb8qmKNLmw.A6ton9MQjZg0b3C0QcW-hg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiUHNpTGphZnJZNE16UlRmNlBPLTZfdyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiSUFPbnd2ODR5YXFEaUxtbSJ9.swf92_LyCvjsvkynHTuMNXRl_MX2keU-fMDWIMezHG4.LOp9SVIXzs4yTnOtMyXZYQ.HUlXrzqJ1qXYl3vUA-ydezCg77WvJNtKdmZ3FPABoZw.8UYl1LOofQLAxHHvWqoTbg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiWGRndHQ5dUVEMVlVeU1rVHl6M3lqZyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiWF90V2RhSmh6X3J1SHJvQSJ9.JQ3dS1JSgzIFi5M9ig63FoFU1nHBTmPwXY_ovNE2m1JOSUvHtalmihIuraPDloCf.e920JVryUIWt7zJJQM-www.8DUrl4LmsxIEhRr9RLTHG9tBTOcwXqEbQHAJd_qMHzE.wHinoqGUhL4O7lx125kponpwNtlp8VGJ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoicGgyaTdoY0FWNlh3ZkQta1RHYlVXdyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiaG41Smk4Wm1rUmRrSUxWVSJ9._bQlJXl22dhsBgYPhkxUyinBNi871teGWbviOueWj2PqG9OPxIc9SDS8a27YLSVDMircd5Q1Df28--vcXIABQA.DssmhrAg6w_f2VDaPpxTbQ.OGclEmqrxwvZqAfn7EgXlIfXgr0wiGvEbZz3zADnqJs.YZeP0uKVEiDl8VyC-s20YN-RbdyGNsbdtoGDP3eMof8",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTEyOEtXIn0.TEMcXEoY8WyqGjYs5GZgS-M_Niwu6wDY.i-26KtTt51Td6Iwd.wvhkagvPsLj3QxhPBbfH_th8OqxisUtme2UadQ.vlfvBPv3bw2Zk2H60JVNLQ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTEyOEtXIn0.gPaR6mgQ9TUx05V6DRfgTQeZxl0ZSzBa5uQd-qw6yLs.MojplOD77FkMooS-.2yuD7dKR_C3sFbhgwiBccKKOF8DrSvNiwX7wPQ.qDKUbSvMnJv0qifjpWC14g",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTEyOEtXIn0.Fg-dgSkUW1KEaL5YDPoWHNL8fpX1WxWVLA9OOWsjIFhQVDKyUZI7BQ.mjRBpyJTZf7H-quf.YlNHezMadtaSKp23G-ozmYhHOeHwuJnvWGTtGg.YagnR7awBItUlMDo4uklvg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTEyOEtXIn0.x1vYzUE-E2XBWva9OPuwtqfQaf9rlJCIBAyAe6N2q2kWfJrkxGxFsQ.gAwe78dyODFaoP2IOityAA.Yh5YfovkWxGBNAs1sVhvXow_2izHHsBiYEc9JYD6kVg.mio1p3ncp2wLEaEaRa7P0w",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTEyOEtXIn0.szGrdnmF7D5put2aRBvSSFfp0vRgkRGYaafijJIqAF6PWd1IxsysZRV8aQkQOW1cB6d0fXsTfYM.Ru25LVOOk4xhaK-cIZ0ThA.pF9Ok5zot7elVqXFW5YYHV8MuF9gVGzpQnG1XDs_g_w.-7la0uwcNPpteev185pMHZjbVDXlrec8",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTEyOEtXIn0.cz-hRv0xR5CnOcnoRWNK8Q9poyVYzRCVTjfmEXQN6xPOZUkJ3zKNqb8Pir_FS0o2TVvxmIbuxeISeATTR2Ttx_YGCNgMkc93.SF5rEQT94lZR-UORcMKqGw.xphygoU7zE0ZggOczXCi_ytt-Evln8CL-7WLDlWcUHg.5h99r8xCCwP2PgDbZqzCJ13oFfB2vZWetD5qZjmmVho",
+ },
+ []string{
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoiVWR5WUVKdEJ5ZTA5dzdjclY0cXI1QSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiZlBBV0QwUmdSbHlFdktQcCJ9.P1uTfTuH-imL-NJJMpuTRA.22yqZ1NIfx3KNPgc.hORWZaTSgni1FS-JT90vJly-cU37qTn-tWSqTg.gMN0ufXF92rSXupTtBNkhA",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoiOU9qX3B2LTJSNW5lZl9YbWVkUWltUSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiY3BybGEwYUYzREVQNmFJTSJ9.6NVpAm_APiC7km2v-oNR8g23K9U_kf1-.jIg-p8tNwSvwxch0.1i-GPaxS4qR6Gy4tzeVtSdRFRSKQSMpmn-VhzA.qhFWPqtA6vVPl7OM3DThsA",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoiOVc3THg3MVhGQVJCb3NaLVZ5dXc4ZyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiZ1N4ZE5heFdBSVBRR0tHYiJ9.3YjPz6dVQwAtCekvtXiHZrooOUlmCsMSvyfwmGwdrOA.hA_C0IDJmGaRzsB0.W4l7OPqpFxiVOZTGfAlRktquyRTo4cEOk9KurQ.l4bGxOkO_ql_jlPo3Oz3TQ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiOHJYbWl2WXFWZjNfbHhhd2NUbHJoUSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiVXBWeXprVTNKcjEwYXRqYyJ9.8qft-Q_xqUbo5j_aVrVNHchooeLttR4Kb6j01O8k98M.hXO-5IKBYCL9UdwBFVm0tg.EBM4lCZX_K6tfqYmfoDxVPHcf6cT--AegXTTjfSqsIw.Of8xUvEQSh3xgFT3uENnAg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiVnItSnVaX0tqV2hSWWMzdzFwZ3cwdyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiRGg2R3dISVBVS3ljZGNZeCJ9.YSEDjCnGWr_n9H94AvLoRnwm6bdU9w6-Q67k-QQRVcKRd6673pgH9zEF9A9Dt6o1.gcmVN4kxqBuMq6c7GrK3UQ.vWzJb0He6OY1lhYYjYS7CLh55REAAq1O7yNN-ND4R5Q.OD0B6nwyFaDr_92ysDOtlVnJaeoIqhGw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoieEtad1BGYURpQ3NqUnBqZUprZHhmZyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoieTVHRFdteXdkb2R1SDJlYyJ9.AW0gbhWqlptOQ1y9aoNVwrTIIkBfrp33C2OWJsbrDRk6lhxg_IgFhMDTE37moReySGUtttC4CXQD_7etHmd3Hw.OvKXK-aRKlXHOpJQ9ZY_YQ.Ngv7WarDDvR2uBj_DavPAR3DYuIaygvSSdcHrc8-ZqM.MJ6ElitzFCKf_0h5fIJw8uOLC6ps7dKZPozF8juQmUY",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTE5MktXIn0.8qu63pppcSvp1vv37WrZ44qcCTg7dQMA.cDp-f8dJTrDEpZW4.H6OBJYs4UvFR_IZHLYQZxB6u9a0wOdAif2LNfQ.1dB-id0UIwRSlmwHx5BJCg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTE5MktXIn0._FdoKQvC8qUs7K0upriEihUwztK8gOwonXpOxdIwrfs.UO38ok8gDdpLVa1T.x1GvHdVCy4fxoQRg-OQK4Ez3jDOvu9gllLPeEA.3dLeZGIprh_nHizOTVi1xw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTE5MktXIn0.uzCJskgSIK6VkjJIu-dQi18biqaY0INc_A1Ehx0oESafgtR99_n4IA.W2eKK8Y14WwTowI_.J2cJC7R6Bz6maR0s1UBMPyRi5BebNUAmof4pvw.-7w6htAlc4iUsOJ6I04rFg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTE5MktXIn0.gImQeQETp_6dfJypFDPLlv7c5pCzuq86U16gzrLiCXth6X9XfxJpvQ.YlC4MxjtLWrsyEvlFhvsqw.Vlpvmg9F3gkz4e1xG01Yl2RXx-jG99rF5UvCxOBXSLc.RZUrU_FoR5bG3M-j3GY0Dw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTE5MktXIn0.T2EfQ6Tu2wJyRMgZzfvBYmQNCCfdMudMrg86ibEMVAOUKJPtR3WMPEb_Syy9p2VjrLKRlv7nebo.GPc8VbarPPRtzIRATB8NsA.ugPCqLvVLwh55bWlwjsFkmWzJ31z5z-wuih2oJqmG_U.m7FY3EjvV6mKosEYJ5cY7ezFoVQoJS8X",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTE5MktXIn0.OgLMhZ-2ZhslQyHfzOfyC-qmT6bNg9AdpP59B4jtyxWkQu3eW475WCdiAjojjeyBtVRGQ5vOomwaOIFejY_IekzH6I_taii3.U9x44MF6Wyz5TIwIzwhoxQ.vK7yvSF2beKdNxNY_7n4XdF7JluCGZoxdFJyTJVkSmI.bXRlI8KL-g7gpprQxGmXjVYjYghhWJq7mlCfWI8q2uA",
+ },
+ []string{
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoiR3BjX3pfbjduZjJVZlEtWGdsaTBaQSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiUk40eUdhOVlvYlFhUmZ1TCJ9.Q4ukD6_hZpmASAVcqWJ9Wg.Zfhny_1WNdlp4fH-.3sekDCjkExQCcv28ZW4yrcFnz0vma3vgoenSXA.g8_Ird2Y0itTCDP61du-Yg",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoiWC05UkNVWVh4U3NRelcwelVJS01VUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiY3JNMnJfa3RrdWpyQ1h5OSJ9.c0q2jCxxV4y1h9u_Xvn7FqUDnbkmNEG4.S_noOTZKuUo9z1l6.ez0RdA25vXMUGH96iXmj3DEVox0J7TasJMnzgg.RbuSPTte_NzTtEEokbc5Ig",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoiWmwyaDFpUW11QWZWd2lJeVp5RHloZyIsImFsZyI6IkEyNTZHQ01LVyIsIml2Ijoib19xZmljb0N0NzNzRWo1QyJ9.NpJxRJ0aqcpekD6HU2u9e6_pL_11JXjWvjfeQnAKkZU.4c5qBcBBrMWi27Lf.NKwNIb4b6cRDJ1TwMKsPrjs7ADn6aNoBdQClVw.yNWmSSRBqQfIQObzj8zDqw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiMXdwVEI3LWhjdzZUVXhCbVh2UzdhUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiOUdIVnZJaDZ0a09vX2pHUSJ9.MFgIhp9mzlq9hoPqqKVKHJ3HL79EBYtV4iNhD63yqiU.UzW5iq8ou21VpZYJgKEN8A.1gOEzA4uAPvHP76GMfs9uLloAV10mKaxiZVAeL7iQA0.i1X_2i0bCAz-soXF9bI_zw",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiNThocUtsSk15Y1BFUEFRUlNfSzlNUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiUDh3aTBWMTluVnZqNXpkOSJ9.FXidOWHNFJODO74Thq3J2cC-Z2B8UZkn7SikeosU0bUK6Jx_lzzmUZ-Lafadpdpj.iLfcDbpuBKFiSfiBzUQc7Q.VZK-aD7BFspqfvbwa0wE2wwWxdomzk2IKMetFe8bI44.7wC6rJRGa4x48xbYMd6NH9VzK8uNn4Cb",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoicGcwOEpUcXdzMXdEaXBaRUlpVExoQSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiSlpodk9CdU1RUDFFZTZTNSJ9.wqVgTPm6TcYCTkpbwmn9sW4mgJROH2A3dIdSXo5oKIQUIVbQsmy7KXH8UYO2RS9slMGtb869C8o0My67GKg9dQ.ogrRiLlqjB1S5j-7a05OwA.2Y_LyqhU4S_RXMsB74bxcBacd23J2Sp5Lblw-sOkaUY.XGMiYoU-f3GaEzSvG41vpJP2DMGbeDFoWmkUGLUjc4M",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTI1NktXIn0.QiIZm9NYfahqYFIbiaoUhCCHjotHMkup.EsU0XLn4FjzzCILn.WuCoQkm9vzo95E7hxBtfYpt-Mooc_vmSTyzj6Q.NbeeYVy6gQPlmhoWDrZwaQ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTI1NktXIn0.1ol3j_Lt0Os3UMe2Gypj0o8b77k0FSmqD7kNRNoMa9U.vZ2HMTgN2dgUd42h.JvNcy8-c8sYzOC089VtFSg2BOQx3YF8CqSTuJw.t03LRioWWKN3d7SjinU6SQ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTI1NktXIn0.gbkk03l1gyrE9qGEMVtORiyyUqKsgzbqjLd8lw0RQ07WWn--TV4BgA.J8ThH4ac2UhSsMIP.g-W1piEGrdi3tNwQDJXpYm3fQjTf82mtVCrCOg.-vY05P4kiB9FgF2vwrSeXQ",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTI1NktXIn0.k86pQs7gmQIzuIWRFwesF32XY2xi1WbYxi7XUf_CYlOlehwGCTINHg.3NcC9VzfQgsECISKf4xy-g.v2amdo-rgeGsg-II_tvPukX9D-KAP27xxf2uQJ277Ws.E4LIE3fte3glAnPpnd8D9Q",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTI1NktXIn0.b8iN0Am3fCUvj7sBd7Z0lpfzBjh1MOgojV7J5rDfrcTU3b35RGYgEV1RdcrtUTBgUwITDjmU7jM.wsSDBFghDga_ERv36I2AOg.6uJsucCb2YReFOJGBdo4zidTIKLUmZBIXfm_M0AJpKk.YwdAfXI3HHcw2wLSnfCRtw4huZQtSKhz",
+ "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTI1NktXIn0.akY9pHCbkHPh5VpXIrX0At41XnJIKBR9iMMkf301vKeJNAZYJTxWzeJhFd-DhQ47tMctc3YYkwZkQ5I_9fGYb_f0oBcw4esh.JNwuuHud78h6S99NO1oBQQ.0RwckPYATBgvw67upkAQ1AezETHc-gh3rryz19i5ryc.3XClRTScgzfMgLCHxHHoRF8mm9VVGXv_Ahtx65PskKQ",
+ },
+ }
+
+ for i, msgs := range aesSampleMessages {
+ for _, msg := range msgs {
+ obj, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ plaintext, err := obj.Decrypt(aesTestKeys[i])
+ if err != nil {
+ t.Error("unable to decrypt message", msg, err)
+ continue
+ }
+ if string(plaintext) != "Lorem ipsum dolor sit amet" {
+ t.Error("plaintext is not what we expected for msg", msg)
+ }
+ }
+ }
+}
+
+// Test vectors generated with jose4j
+func TestSampleJose4jJWEMessagesECDH(t *testing.T) {
+ ecTestKey := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ"),
+ Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck"),
+ },
+ D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw"),
+ }
+
+ ecSampleMessages := []string{
+ "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTEyOENCQy1IUzI1NiIsImVwayI6eyJrdHkiOiJFQyIsIngiOiJTQzAtRnJHUkVvVkpKSmg1TGhORmZqZnFXMC1XSUFyd3RZMzJzQmFQVVh3IiwieSI6ImFQMWlPRENveU9laTVyS1l2VENMNlRMZFN5UEdUN0djMnFsRnBwNXdiWFEiLCJjcnYiOiJQLTI1NiJ9fQ..3mifklTnTTGuA_etSUBBCw.dj8KFM8OlrQ3rT35nHcHZ7A5p84VB2OZb054ghSjS-M.KOIgnJjz87LGqMtikXGxXw",
+ "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTE5MkNCQy1IUzM4NCIsImVwayI6eyJrdHkiOiJFQyIsIngiOiJUaHRGc0lRZ1E5MkZOYWFMbUFDQURLbE93dmNGVlRORHc4ampfWlJidUxjIiwieSI6IjJmRDZ3UXc3YmpYTm1nVThXMGpFbnl5ZUZkX3Y4ZmpDa3l1R29vTFhGM0EiLCJjcnYiOiJQLTI1NiJ9fQ..90zFayMkKc-fQC_19f6P3A.P1Y_7lMnfkUQOXW_en31lKZ3zAn1nEYn6fXLjmyVPrQ.hrgwy1cePVfhMWT0h-crKTXldglHZ-4g",
+ "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTI1NkNCQy1IUzUxMiIsImVwayI6eyJrdHkiOiJFQyIsIngiOiI5R1Z6c3VKNWgySl96UURVUFR3WU5zUkFzVzZfY2RzN0pELVQ2RDREQ1ZVIiwieSI6InFZVGl1dVU4aTB1WFpoaS14VGlRNlZJQm5vanFoWENPVnpmWm1pR2lRTEUiLCJjcnYiOiJQLTI1NiJ9fQ..v2reRlDkIsw3eWEsTCc1NA.0qakrFdbhtBCTSl7EREf9sxgHBP9I-Xw29OTJYnrqP8.54ozViEBYYmRkcKp7d2Ztt4hzjQ9Vb5zCeijN_RQrcI",
+ "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiOElUemg3VVFaaUthTWtfME9qX1hFaHZENXpUWjE2Ti13WVdjeTJYUC1tdyIsInkiOiJPNUJiVEk0bUFpU005ZmpCejBRU3pXaU5vbnl3cWlQLUN0RGgwdnNGYXNRIiwiY3J2IjoiUC0yNTYifX0.D3DP3wqPvJv4TYYfhnfrOG6nsM-MMH_CqGfnOGjgdXHNF7xRwEJBOA.WL9Kz3gNYA7S5Rs5mKcXmA.EmQkXhO_nFqAwxJWaM0DH4s3pmCscZovB8YWJ3Ru4N8.Bf88uzwfxiyTjpejU5B0Ng",
+ "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiMjlJMk4zRkF0UlBlNGhzYjRLWlhTbmVyV0wyTVhtSUN1LXJJaXhNSHpJQSIsInkiOiJvMjY1bzFReEdmbDhzMHQ0U1JROS00RGNpc3otbXh4NlJ6WVF4SktyeWpJIiwiY3J2IjoiUC0yNTYifX0.DRmsmXz6fCnLc_njDIKdpM7Oc4jTqd_yd9J94TOUksAstEUkAl9Ie3Wg-Ji_LzbdX2xRLXIimcw.FwJOHPQhnqKJCfxt1_qRnQ.ssx3q1ZYILsMTln5q-K8HVn93BVPI5ViusstKMxZzRs.zzcfzWNYSdNDdQ4CiHfymj0bePaAbVaT",
+ "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiRUp6bTViQnRzVXJNYTl2Y1Q2d1hZRXI3ZjNMcjB0N1V4SDZuZzdGcFF0VSIsInkiOiJRYTNDSDllVTFXYjItdFdVSDN3Sk9fTDVMZXRsRUlMQWNkNE9XR2tFd0hZIiwiY3J2IjoiUC0yNTYifX0.5WxwluZpVWAOJdVrsnDIlEc4_wfRE1gXOaQyx_rKkElNz157Ykf-JsAD7aEvXfx--NKF4js5zYyjeCtxWBhRWPOoNNZJlqV_.Iuo82-qsP2S1SgQQklAnrw.H4wB6XoLKOKWCu6Y3LPAEuHkvyvr-xAh4IBm53uRF8g._fOLKq0bqDZ8KNjni_MJ4olHNaYz376dV9eNmp9O9PU",
+ "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiZktNSG5sRkoxajBTSnJ3WGtVWlpaX3BtWHdUQlJtcHhlaTkxdUpaczUycyIsInkiOiJLRkxKaXhEUTJQcjEybWp1aFdYb3pna2U1V3lhWnhmTWlxZkJ0OEJpbkRvIiwiY3J2IjoiUC0yNTYifX0.2LSD2Mw4tyYJyfsmpVmzBtJRd12jMEYGdlhFbaXIbKi5A33CGNQ1tg.s40aAjmZOvK8Us86FCBdHg.jpYSMAKp___oMCoWM495mTfbi_YC80ObeoCmGE3H_gs.A6V-jJJRY1yz24CaXGUbzg",
+ "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiSDRxcFUzeWtuRktWRnV4SmxLa3NZSE5ieHF3aXM0WWtCVVFHVE1Td05JQSIsInkiOiJHb0lpRUZaUGRRSHJCbVR4ZTA3akJoZmxrdWNqUjVoX1QwNWVXc3Zib0prIiwiY3J2IjoiUC0yNTYifX0.KTrwwV2uzD--gf3PGG-kjEAGgi7u0eMqZPZfa4kpyFGm3x8t2m1NHdz3t9rfiqjuaqsxPKhF4gs.cu16fEOzYaSxhHu_Ht9w4g.BRJdxVBI9spVtY5KQ6gTR4CNcKvmLUMKZap0AO-RF2I.DZyUaa2p6YCIaYtjWOjC9GN_VIYgySlZ",
+ "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoieDBYSGRkSGM2Q0ktSnlfbUVMOEZZRExhWnV0UkVFczR4c3BMQmcwZk1jbyIsInkiOiJEa0xzOUJGTlBkTTVTNkpLYVJ3cnV1TWMwcUFzWW9yNW9fZWp6NXBNVXFrIiwiY3J2IjoiUC0yNTYifX0.mfCxJ7JYIqTMqcAh5Vp2USF0eF7OhOeluqda7YagOUJNwxA9wC9o23DSoLUylfrZUfanZrJJJcG69awlv-LY7anOLHlp3Ht5.ec48A_JWb4qa_PVHWZaTfQ.kDAjIDb3LzJpfxNh-DiAmAuaKMYaOGSTb0rkiJLuVeY.oxGCpPlii4pr89XMk4b9s084LucTqPGU6TLbOW2MZoc",
+ "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiQXB5TnlqU2d0bmRUcFg0eENYenNDRnZva1l3X18weXg2dGRUYzdPUUhIMCIsInkiOiJYUHdHMDVDaW1vOGlhWmxZbDNsMEp3ZllhY1FZWHFuM2RRZEJUWFpldDZBIiwiY3J2IjoiUC0yNTYifX0.yTA2PwK9IPqkaGPenZ9R-gOn9m9rvcSEfuX_Nm8AkuwHIYLzzYeAEA.ZW1F1iyHYKfo-YoanNaIVg.PouKQD94DlPA5lbpfGJXY-EJhidC7l4vSayVN2vVzvA.MexquqtGaXKUvX7WBmD4bA",
+ "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiaDRWeGNzNVUzWk1fTlp4WmJxQ3hMTVB5UmEtR2ktSVNZa0xDTzE1RHJkZyIsInkiOiJFeVotS3dWNVE5OXlnWk5zU0lpSldpR3hqbXNLUk1WVE5sTTNSd1VYTFRvIiwiY3J2IjoiUC0yNTYifX0.wo56VISyL1QAbi2HLuVut5NGF2FvxKt7B8zHzJ3FpmavPozfbVZV08-GSYQ6jLQWJ4xsO80I4Kg.3_9Bo5ozvD96WHGhqp_tfQ.48UkJ6jk6WK70QItb2QZr0edKH7O-aMuVahTEeqyfW4.ulMlY2tbC341ct20YSmNdtc84FRz1I4g",
+ "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiN0xZRzZZWTJkel9ZaGNvNnRCcG1IX0tPREQ2X2hwX05tajdEc1c2RXgxcyIsInkiOiI5Y2lPeDcwUkdGT0tpVnBRX0NHQXB5NVlyeThDazBmUkpwNHVrQ2tjNmQ0IiwiY3J2IjoiUC0yNTYifX0.bWwW3J80k46HG1fQAZxUroko2OO8OKkeRavr_o3AnhJDMvp78OR229x-fZUaBm4uWv27_Yjm0X9T2H2lhlIli2Rl9v1PNC77.1NmsJBDGI1fDjRzyc4mtyA.9KfCFynQj7LmJq08qxAG4c-6ZPz1Lh3h3nUbgVwB0TI.cqech0d8XHzWfkWqgKZq1SlAfmO0PUwOsNVkuByVGWk",
+ }
+
+ for _, msg := range ecSampleMessages {
+ obj, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ plaintext, err := obj.Decrypt(ecTestKey)
+ if err != nil {
+ t.Error("unable to decrypt message", msg, err)
+ continue
+ }
+ if string(plaintext) != "Lorem ipsum dolor sit amet." {
+ t.Error("plaintext is not what we expected for msg", msg)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jwk.go b/vendor/gopkg.in/square/go-jose.v1/jwk.go
new file mode 100644
index 000000000..505dd700e
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jwk.go
@@ -0,0 +1,457 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+// rawJsonWebKey represents a public or private key in JWK format, used for parsing/serializing.
+type rawJsonWebKey struct {
+ Use string `json:"use,omitempty"`
+ Kty string `json:"kty,omitempty"`
+ Kid string `json:"kid,omitempty"`
+ Crv string `json:"crv,omitempty"`
+ Alg string `json:"alg,omitempty"`
+ K *byteBuffer `json:"k,omitempty"`
+ X *byteBuffer `json:"x,omitempty"`
+ Y *byteBuffer `json:"y,omitempty"`
+ N *byteBuffer `json:"n,omitempty"`
+ E *byteBuffer `json:"e,omitempty"`
+ // -- Following fields are only used for private keys --
+ // RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
+ // completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
+ // we have a private key whereas D == nil means we have only a public key.
+ D *byteBuffer `json:"d,omitempty"`
+ P *byteBuffer `json:"p,omitempty"`
+ Q *byteBuffer `json:"q,omitempty"`
+ Dp *byteBuffer `json:"dp,omitempty"`
+ Dq *byteBuffer `json:"dq,omitempty"`
+ Qi *byteBuffer `json:"qi,omitempty"`
+ // Certificates
+ X5c []string `json:"x5c,omitempty"`
+}
+
+// JsonWebKey represents a public or private key in JWK format.
+type JsonWebKey struct {
+ Key interface{}
+ Certificates []*x509.Certificate
+ KeyID string
+ Algorithm string
+ Use string
+}
+
+// MarshalJSON serializes the given key to its JSON representation.
+func (k JsonWebKey) MarshalJSON() ([]byte, error) {
+ var raw *rawJsonWebKey
+ var err error
+
+ switch key := k.Key.(type) {
+ case *ecdsa.PublicKey:
+ raw, err = fromEcPublicKey(key)
+ case *rsa.PublicKey:
+ raw = fromRsaPublicKey(key)
+ case *ecdsa.PrivateKey:
+ raw, err = fromEcPrivateKey(key)
+ case *rsa.PrivateKey:
+ raw, err = fromRsaPrivateKey(key)
+ case []byte:
+ raw, err = fromSymmetricKey(key)
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ raw.Kid = k.KeyID
+ raw.Alg = k.Algorithm
+ raw.Use = k.Use
+
+ for _, cert := range k.Certificates {
+ raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
+ }
+
+ return json.Marshal(raw)
+}
+
+// UnmarshalJSON reads a key from its JSON representation.
+func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) {
+ var raw rawJsonWebKey
+ err = json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ var key interface{}
+ switch raw.Kty {
+ case "EC":
+ if raw.D != nil {
+ key, err = raw.ecPrivateKey()
+ } else {
+ key, err = raw.ecPublicKey()
+ }
+ case "RSA":
+ if raw.D != nil {
+ key, err = raw.rsaPrivateKey()
+ } else {
+ key, err = raw.rsaPublicKey()
+ }
+ case "oct":
+ key, err = raw.symmetricKey()
+ default:
+ err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
+ }
+
+ if err == nil {
+ *k = JsonWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
+ }
+
+ k.Certificates = make([]*x509.Certificate, len(raw.X5c))
+ for i, cert := range raw.X5c {
+ raw, err := base64.StdEncoding.DecodeString(cert)
+ if err != nil {
+ return err
+ }
+ k.Certificates[i], err = x509.ParseCertificate(raw)
+ if err != nil {
+ return err
+ }
+ }
+
+ return
+}
+
+// JsonWebKeySet represents a JWK Set object.
+type JsonWebKeySet struct {
+ Keys []JsonWebKey `json:"keys"`
+}
+
+// Key convenience method returns keys by key ID. Specification states
+// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
+// cases where they are not distinct. Hence method returns a slice
+// of JsonWebKeys.
+func (s *JsonWebKeySet) Key(kid string) []JsonWebKey {
+ var keys []JsonWebKey
+ for _, key := range s.Keys {
+ if key.KeyID == kid {
+ keys = append(keys, key)
+ }
+ }
+
+ return keys
+}
+
+const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
+const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
+
+func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
+ coordLength := curveSize(curve)
+ crv, err := curveName(curve)
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf(ecThumbprintTemplate, crv,
+ newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
+ newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
+}
+
+func rsaThumbprintInput(n *big.Int, e int) (string, error) {
+ return fmt.Sprintf(rsaThumbprintTemplate,
+ newBufferFromInt(uint64(e)).base64(),
+ newBuffer(n.Bytes()).base64()), nil
+}
+
+// Thumbprint computes the JWK Thumbprint of a key using the
+// indicated hash algorithm.
+func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ var input string
+ var err error
+ switch key := k.Key.(type) {
+ case *ecdsa.PublicKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *ecdsa.PrivateKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *rsa.PublicKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ case *rsa.PrivateKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ h := hash.New()
+ h.Write([]byte(input))
+ return h.Sum(nil), nil
+}
+
+// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
+func (k *JsonWebKey) IsPublic() bool {
+ switch k.Key.(type) {
+ case *ecdsa.PublicKey, *rsa.PublicKey:
+ return true
+ default:
+ return false
+ }
+}
+
+// Valid checks that the key contains the expected parameters.
+func (k *JsonWebKey) Valid() bool {
+ if k.Key == nil {
+ return false
+ }
+ switch key := k.Key.(type) {
+ case *ecdsa.PublicKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil {
+ return false
+ }
+ case *ecdsa.PrivateKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
+ return false
+ }
+ case *rsa.PublicKey:
+ if key.N == nil || key.E == 0 {
+ return false
+ }
+ case *rsa.PrivateKey:
+ if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func (key rawJsonWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
+ if key.N == nil || key.E == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
+ }
+
+ return &rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ }, nil
+}
+
+func fromRsaPublicKey(pub *rsa.PublicKey) *rawJsonWebKey {
+ return &rawJsonWebKey{
+ Kty: "RSA",
+ N: newBuffer(pub.N.Bytes()),
+ E: newBufferFromInt(uint64(pub.E)),
+ }
+}
+
+func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil {
+ return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ }, nil
+}
+
+func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) {
+ if pub == nil || pub.X == nil || pub.Y == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
+ }
+
+ name, err := curveName(pub.Curve)
+ if err != nil {
+ return nil, err
+ }
+
+ size := curveSize(pub.Curve)
+
+ xBytes := pub.X.Bytes()
+ yBytes := pub.Y.Bytes()
+
+ if len(xBytes) > size || len(yBytes) > size {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
+ }
+
+ key := &rawJsonWebKey{
+ Kty: "EC",
+ Crv: name,
+ X: newFixedSizeBuffer(xBytes, size),
+ Y: newFixedSizeBuffer(yBytes, size),
+ }
+
+ return key, nil
+}
+
+func (key rawJsonWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
+ var missing []string
+ switch {
+ case key.N == nil:
+ missing = append(missing, "N")
+ case key.E == nil:
+ missing = append(missing, "E")
+ case key.D == nil:
+ missing = append(missing, "D")
+ case key.P == nil:
+ missing = append(missing, "P")
+ case key.Q == nil:
+ missing = append(missing, "Q")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
+ }
+
+ rv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ },
+ D: key.D.bigInt(),
+ Primes: []*big.Int{
+ key.P.bigInt(),
+ key.Q.bigInt(),
+ },
+ }
+
+ if key.Dp != nil {
+ rv.Precomputed.Dp = key.Dp.bigInt()
+ }
+ if key.Dq != nil {
+ rv.Precomputed.Dq = key.Dq.bigInt()
+ }
+ if key.Qi != nil {
+ rv.Precomputed.Qinv = key.Qi.bigInt()
+ }
+
+ err := rv.Validate()
+ return rv, err
+}
+
+func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJsonWebKey, error) {
+ if len(rsa.Primes) != 2 {
+ return nil, ErrUnsupportedKeyType
+ }
+
+ raw := fromRsaPublicKey(&rsa.PublicKey)
+
+ raw.D = newBuffer(rsa.D.Bytes())
+ raw.P = newBuffer(rsa.Primes[0].Bytes())
+ raw.Q = newBuffer(rsa.Primes[1].Bytes())
+
+ return raw, nil
+}
+
+func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil || key.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ },
+ D: key.D.bigInt(),
+ }, nil
+}
+
+func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJsonWebKey, error) {
+ raw, err := fromEcPublicKey(&ec.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if ec.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key")
+ }
+
+ raw.D = newBuffer(ec.D.Bytes())
+
+ return raw, nil
+}
+
+func fromSymmetricKey(key []byte) (*rawJsonWebKey, error) {
+ return &rawJsonWebKey{
+ Kty: "oct",
+ K: newBuffer(key),
+ }, nil
+}
+
+func (key rawJsonWebKey) symmetricKey() ([]byte, error) {
+ if key.K == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
+ }
+ return key.K.bytes(), nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jwk_test.go b/vendor/gopkg.in/square/go-jose.v1/jwk_test.go
new file mode 100644
index 000000000..c34f5de56
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jwk_test.go
@@ -0,0 +1,662 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "reflect"
+ "testing"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+// Test chain of two X.509 certificates
+var testCertificates, _ = x509.ParseCertificates(fromBase64Bytes(`
+MIIDfDCCAmSgAwIBAgIJANWAkzF7PA8/MA0GCSqGSIb3DQEBCwUAMFUxCzAJ
+BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4GA1UEChMHY2VydGlnbzEQMA4G
+A1UECxMHZXhhbXBsZTEVMBMGA1UEAxMMZXhhbXBsZS1sZWFmMB4XDTE2MDYx
+MDIyMTQxMVoXDTIzMDQxNTIyMTQxMVowVTELMAkGA1UEBhMCVVMxCzAJBgNV
+BAgTAkNBMRAwDgYDVQQKEwdjZXJ0aWdvMRAwDgYDVQQLEwdleGFtcGxlMRUw
+EwYDVQQDEwxleGFtcGxlLWxlYWYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQC7stSvfQyGuHw3v34fisqIdDXberrFoFk9ht/WdXgYzX2uLNKd
+sR/J5sbWSl8K/5djpzj31eIzqU69w8v7SChM5x9bouDsABHz3kZucx5cSafE
+gJojysBkcrq3VY+aJanzbL+qErYX+lhRpPcZK6JMWIwar8Y3B2la4yWwieec
+w2/WfEVvG0M/DOYKnR8QHFsfl3US1dnBM84czKPyt9r40gDk2XiH/lGts5a9
+4rAGvbr8IMCtq0mA5aH3Fx3mDSi3+4MZwygCAHrF5O5iSV9rEI+m2+7j2S+j
+HDUnvV+nqcpb9m6ENECnYX8FD2KcqlOjTmw8smDy09N2Np6i464lAgMBAAGj
+TzBNMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAsBgNVHREEJTAj
+hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAGM4aa/qrURUweZBIwZYv8O9b2+r4l0HjGAh982/B9sMlM05
+kojyDCUGvj86z18Lm8mKr4/y+i0nJ+vDIksEvfDuzw5ALAXGcBzPJKtICUf7
+LstA/n9NNpshWz0kld9ylnB5mbUzSFDncVyeXkEf5sGQXdIIZT9ChRBoiloS
+aa7dvBVCcsX1LGP2LWqKtD+7nUnw5qCwtyAVT8pthEUxFTpywoiJS5ZdzeEx
+8MNGvUeLFj2kleqPF78EioEQlSOxViCuctEtnQuPcDLHNFr10byTZY9roObi
+qdsJLMVvb2XliJjAqaPa9AkYwGE6xHw2ispwg64Rse0+AtKups19WIUwggNT
+MIICO6ADAgECAgkAqD4tCWKt9/AwDQYJKoZIhvcNAQELBQAwVTELMAkGA1UE
+BhMCVVMxCzAJBgNVBAgTAkNBMRAwDgYDVQQKEwdjZXJ0aWdvMRAwDgYDVQQL
+EwdleGFtcGxlMRUwEwYDVQQDEwxleGFtcGxlLXJvb3QwHhcNMTYwNjEwMjIx
+NDExWhcNMjMwNDE1MjIxNDExWjBVMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
+Q0ExEDAOBgNVBAoTB2NlcnRpZ28xEDAOBgNVBAsTB2V4YW1wbGUxFTATBgNV
+BAMTDGV4YW1wbGUtcm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAMo4ShKI2MxDz/NQVxBbz0tbD5R5NcobA0NKkaPKLyMEpnWVY9ucyauM
+joNn1F568cfOoF0pm3700U8UTPt2MMxEHIi4mFG/OF8UF+Voh1J42Tb42lRo
+W5RRR3ogh4+7QB1G94nxkYddHAJ4QMhUJlLigFg8c6Ff/MxYODy9I7ilLFOM
+Zzsjx8fFpRKRXNQFt471P/V4WTSba7GzdTOJRyTZf/xipF36n8RoEQPvyde8
+pEAsCC4oDOrEiCTdxw8rRJVAU0Wr55XX+qjxyi55C6oykIC/BWR+lUqGd7IL
+Y2Uyt/OVxllt8b+KuVKNCfn4TFlfgizLWkJRs6JV9KuwJ20CAwEAAaMmMCQw
+DgYDVR0PAQH/BAQDAgIEMBIGA1UdEwEB/wQIMAYBAf8CAQAwDQYJKoZIhvcN
+AQELBQADggEBAIsQlTrm9NT6gts0cs4JHp8AutuMrvGyLpIUOlJcEybvgxaz
+LebIMGZek5w3yEJiCyCK9RdNDP3Kdc/+nM6PhvzfPOVo58+0tMCYyEpZVXhD
+zmasNDP4fMbiUpczvx5OwPw/KuhwD+1ITuZUQnQlqXgTYoj9n39+qlgUsHos
+WXHmfzd6Fcz96ADSXg54IL2cEoJ41Q3ewhA7zmWWPLMAl21aex2haiAmzqqN
+xXyfZTnGNnE3lkV1yVguOrqDZyMRdcxDFvxvtmEeMtYV2Mc/zlS9ccrcOkrc
+mZSDxthLu3UMl98NA2NrCGWwzJwpk36vQ0PRSbibsCMarFspP8zbIoU=`))
+
+func TestCurveSize(t *testing.T) {
+ size256 := curveSize(elliptic.P256())
+ size384 := curveSize(elliptic.P384())
+ size521 := curveSize(elliptic.P521())
+ if size256 != 32 {
+ t.Error("P-256 have 32 bytes")
+ }
+ if size384 != 48 {
+ t.Error("P-384 have 48 bytes")
+ }
+ if size521 != 66 {
+ t.Error("P-521 have 66 bytes")
+ }
+}
+
+func TestRoundtripRsaPrivate(t *testing.T) {
+ jwk, err := fromRsaPrivateKey(rsaTestKey)
+ if err != nil {
+ t.Error("problem constructing JWK from rsa key", err)
+ }
+
+ rsa2, err := jwk.rsaPrivateKey()
+ if err != nil {
+ t.Error("problem converting RSA private -> JWK", err)
+ }
+
+ if rsa2.N.Cmp(rsaTestKey.N) != 0 {
+ t.Error("RSA private N mismatch")
+ }
+ if rsa2.E != rsaTestKey.E {
+ t.Error("RSA private E mismatch")
+ }
+ if rsa2.D.Cmp(rsaTestKey.D) != 0 {
+ t.Error("RSA private D mismatch")
+ }
+ if len(rsa2.Primes) != 2 {
+ t.Error("RSA private roundtrip expected two primes")
+ }
+ if rsa2.Primes[0].Cmp(rsaTestKey.Primes[0]) != 0 {
+ t.Error("RSA private P mismatch")
+ }
+ if rsa2.Primes[1].Cmp(rsaTestKey.Primes[1]) != 0 {
+ t.Error("RSA private Q mismatch")
+ }
+}
+
+func TestRsaPrivateInsufficientPrimes(t *testing.T) {
+ brokenRsaPrivateKey := rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: rsaTestKey.N,
+ E: rsaTestKey.E,
+ },
+ D: rsaTestKey.D,
+ Primes: []*big.Int{rsaTestKey.Primes[0]},
+ }
+
+ _, err := fromRsaPrivateKey(&brokenRsaPrivateKey)
+ if err != ErrUnsupportedKeyType {
+ t.Error("expected unsupported key type error, got", err)
+ }
+}
+
+func TestRsaPrivateExcessPrimes(t *testing.T) {
+ brokenRsaPrivateKey := rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: rsaTestKey.N,
+ E: rsaTestKey.E,
+ },
+ D: rsaTestKey.D,
+ Primes: []*big.Int{
+ rsaTestKey.Primes[0],
+ rsaTestKey.Primes[1],
+ big.NewInt(3),
+ },
+ }
+
+ _, err := fromRsaPrivateKey(&brokenRsaPrivateKey)
+ if err != ErrUnsupportedKeyType {
+ t.Error("expected unsupported key type error, got", err)
+ }
+}
+
+func TestRoundtripEcPublic(t *testing.T) {
+ for i, ecTestKey := range []*ecdsa.PrivateKey{ecTestKey256, ecTestKey384, ecTestKey521} {
+ jwk, err := fromEcPublicKey(&ecTestKey.PublicKey)
+
+ ec2, err := jwk.ecPublicKey()
+ if err != nil {
+ t.Error("problem converting ECDSA private -> JWK", i, err)
+ }
+
+ if !reflect.DeepEqual(ec2.Curve, ecTestKey.Curve) {
+ t.Error("ECDSA private curve mismatch", i)
+ }
+ if ec2.X.Cmp(ecTestKey.X) != 0 {
+ t.Error("ECDSA X mismatch", i)
+ }
+ if ec2.Y.Cmp(ecTestKey.Y) != 0 {
+ t.Error("ECDSA Y mismatch", i)
+ }
+ }
+}
+
+func TestRoundtripEcPrivate(t *testing.T) {
+ for i, ecTestKey := range []*ecdsa.PrivateKey{ecTestKey256, ecTestKey384, ecTestKey521} {
+ jwk, err := fromEcPrivateKey(ecTestKey)
+
+ ec2, err := jwk.ecPrivateKey()
+ if err != nil {
+ t.Error("problem converting ECDSA private -> JWK", i, err)
+ }
+
+ if !reflect.DeepEqual(ec2.Curve, ecTestKey.Curve) {
+ t.Error("ECDSA private curve mismatch", i)
+ }
+ if ec2.X.Cmp(ecTestKey.X) != 0 {
+ t.Error("ECDSA X mismatch", i)
+ }
+ if ec2.Y.Cmp(ecTestKey.Y) != 0 {
+ t.Error("ECDSA Y mismatch", i)
+ }
+ if ec2.D.Cmp(ecTestKey.D) != 0 {
+ t.Error("ECDSA D mismatch", i)
+ }
+ }
+}
+
+func TestRoundtripX5C(t *testing.T) {
+ jwk := JsonWebKey{
+ Key: rsaTestKey,
+ KeyID: "bar",
+ Algorithm: "foo",
+ Certificates: testCertificates,
+ }
+
+ jsonbar, err := jwk.MarshalJSON()
+ if err != nil {
+ t.Error("problem marshaling", err)
+ }
+
+ var jwk2 JsonWebKey
+ err = jwk2.UnmarshalJSON(jsonbar)
+ if err != nil {
+ t.Error("problem unmarshalling", err)
+ }
+
+ if !reflect.DeepEqual(testCertificates, jwk2.Certificates) {
+ t.Error("Certificates not equal", jwk.Certificates, jwk2.Certificates)
+ }
+
+ jsonbar2, err := jwk2.MarshalJSON()
+ if err != nil {
+ t.Error("problem marshaling", err)
+ }
+ if !bytes.Equal(jsonbar, jsonbar2) {
+ t.Error("roundtrip should not lose information")
+ }
+}
+
+func TestMarshalUnmarshal(t *testing.T) {
+ kid := "DEADBEEF"
+
+ for i, key := range []interface{}{ecTestKey256, ecTestKey384, ecTestKey521, rsaTestKey} {
+ for _, use := range []string{"", "sig", "enc"} {
+ jwk := JsonWebKey{Key: key, KeyID: kid, Algorithm: "foo"}
+ if use != "" {
+ jwk.Use = use
+ }
+
+ jsonbar, err := jwk.MarshalJSON()
+ if err != nil {
+ t.Error("problem marshaling", i, err)
+ }
+
+ var jwk2 JsonWebKey
+ err = jwk2.UnmarshalJSON(jsonbar)
+ if err != nil {
+ t.Error("problem unmarshalling", i, err)
+ }
+
+ jsonbar2, err := jwk2.MarshalJSON()
+ if err != nil {
+ t.Error("problem marshaling", i, err)
+ }
+
+ if !bytes.Equal(jsonbar, jsonbar2) {
+ t.Error("roundtrip should not lose information", i)
+ }
+ if jwk2.KeyID != kid {
+ t.Error("kid did not roundtrip JSON marshalling", i)
+ }
+
+ if jwk2.Algorithm != "foo" {
+ t.Error("alg did not roundtrip JSON marshalling", i)
+ }
+
+ if jwk2.Use != use {
+ t.Error("use did not roundtrip JSON marshalling", i)
+ }
+ }
+ }
+}
+
+func TestMarshalNonPointer(t *testing.T) {
+ type EmbedsKey struct {
+ Key JsonWebKey
+ }
+
+ keyJson := []byte(`{
+ "e": "AQAB",
+ "kty": "RSA",
+ "n": "vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw"
+ }`)
+ var parsedKey JsonWebKey
+ err := json.Unmarshal(keyJson, &parsedKey)
+ if err != nil {
+ t.Error(fmt.Sprintf("Error unmarshalling key: %v", err))
+ return
+ }
+ ek := EmbedsKey{
+ Key: parsedKey,
+ }
+ out, err := json.Marshal(ek)
+ if err != nil {
+ t.Error(fmt.Sprintf("Error marshalling JSON: %v", err))
+ return
+ }
+ expected := "{\"Key\":{\"kty\":\"RSA\",\"n\":\"vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw\",\"e\":\"AQAB\"}}"
+ if string(out) != expected {
+ t.Error("Failed to marshal embedded non-pointer JWK properly:", string(out))
+ }
+}
+
+func TestMarshalUnmarshalInvalid(t *testing.T) {
+ // Make an invalid curve coordinate by creating a byte array that is one
+ // byte too large, and setting the first byte to 1 (otherwise it's just zero).
+ invalidCoord := make([]byte, curveSize(ecTestKey256.Curve)+1)
+ invalidCoord[0] = 1
+
+ keys := []interface{}{
+ // Empty keys
+ &rsa.PrivateKey{},
+ &ecdsa.PrivateKey{},
+ // Invalid keys
+ &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ // Missing values in pub key
+ Curve: elliptic.P256(),
+ },
+ },
+ &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ // Invalid curve
+ Curve: nil,
+ X: ecTestKey256.X,
+ Y: ecTestKey256.Y,
+ },
+ },
+ &ecdsa.PrivateKey{
+ // Valid pub key, but missing priv key values
+ PublicKey: ecTestKey256.PublicKey,
+ },
+ &ecdsa.PrivateKey{
+ // Invalid pub key, values too large
+ PublicKey: ecdsa.PublicKey{
+ Curve: ecTestKey256.Curve,
+ X: big.NewInt(0).SetBytes(invalidCoord),
+ Y: big.NewInt(0).SetBytes(invalidCoord),
+ },
+ D: ecTestKey256.D,
+ },
+ nil,
+ }
+
+ for i, key := range keys {
+ jwk := JsonWebKey{Key: key}
+ _, err := jwk.MarshalJSON()
+ if err == nil {
+ t.Error("managed to serialize invalid key", i)
+ }
+ }
+}
+
+func TestWebKeyVectorsInvalid(t *testing.T) {
+ keys := []string{
+ // Invalid JSON
+ "{X",
+ // Empty key
+ "{}",
+ // Invalid RSA keys
+ `{"kty":"RSA"}`,
+ `{"kty":"RSA","e":""}`,
+ `{"kty":"RSA","e":"XXXX"}`,
+ `{"kty":"RSA","d":"XXXX"}`,
+ // Invalid EC keys
+ `{"kty":"EC","crv":"ABC"}`,
+ `{"kty":"EC","crv":"P-256"}`,
+ `{"kty":"EC","crv":"P-256","d":"XXX"}`,
+ `{"kty":"EC","crv":"ABC","d":"dGVzdA","x":"dGVzdA"}`,
+ `{"kty":"EC","crv":"P-256","d":"dGVzdA","x":"dGVzdA"}`,
+ }
+
+ for _, key := range keys {
+ var jwk2 JsonWebKey
+ err := jwk2.UnmarshalJSON([]byte(key))
+ if err == nil {
+ t.Error("managed to parse invalid key:", key)
+ }
+ }
+}
+
+// Test vectors from RFC 7520
+var cookbookJWKs = []string{
+ // EC Public
+ stripWhitespace(`{
+ "kty": "EC",
+ "kid": "bilbo.baggins@hobbiton.example",
+ "use": "sig",
+ "crv": "P-521",
+ "x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9
+ A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt",
+ "y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVy
+ SsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1"
+ }`),
+
+ // EC Private
+ stripWhitespace(`{
+ "kty": "EC",
+ "kid": "bilbo.baggins@hobbiton.example",
+ "use": "sig",
+ "crv": "P-521",
+ "x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9
+ A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt",
+ "y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVy
+ SsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1",
+ "d": "AAhRON2r9cqXX1hg-RoI6R1tX5p2rUAYdmpHZoC1XNM56KtscrX6zb
+ KipQrCW9CGZH3T4ubpnoTKLDYJ_fF3_rJt"
+ }`),
+
+ // RSA Public
+ stripWhitespace(`{
+ "kty": "RSA",
+ "kid": "bilbo.baggins@hobbiton.example",
+ "use": "sig",
+ "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT
+ -O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqV
+ wGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-
+ oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde
+ 3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuC
+ LqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5g
+ HdrNP5zw",
+ "e": "AQAB"
+ }`),
+
+ // RSA Private
+ stripWhitespace(`{"kty":"RSA",
+ "kid":"juliet@capulet.lit",
+ "use":"enc",
+ "n":"t6Q8PWSi1dkJj9hTP8hNYFlvadM7DflW9mWepOJhJ66w7nyoK1gPNqFMSQRy
+ O125Gp-TEkodhWr0iujjHVx7BcV0llS4w5ACGgPrcAd6ZcSR0-Iqom-QFcNP
+ 8Sjg086MwoqQU_LYywlAGZ21WSdS_PERyGFiNnj3QQlO8Yns5jCtLCRwLHL0
+ Pb1fEv45AuRIuUfVcPySBWYnDyGxvjYGDSM-AqWS9zIQ2ZilgT-GqUmipg0X
+ OC0Cc20rgLe2ymLHjpHciCKVAbY5-L32-lSeZO-Os6U15_aXrk9Gw8cPUaX1
+ _I8sLGuSiVdt3C_Fn2PZ3Z8i744FPFGGcG1qs2Wz-Q",
+ "e":"AQAB",
+ "d":"GRtbIQmhOZtyszfgKdg4u_N-R_mZGU_9k7JQ_jn1DnfTuMdSNprTeaSTyWfS
+ NkuaAwnOEbIQVy1IQbWVV25NY3ybc_IhUJtfri7bAXYEReWaCl3hdlPKXy9U
+ vqPYGR0kIXTQRqns-dVJ7jahlI7LyckrpTmrM8dWBo4_PMaenNnPiQgO0xnu
+ ToxutRZJfJvG4Ox4ka3GORQd9CsCZ2vsUDmsXOfUENOyMqADC6p1M3h33tsu
+ rY15k9qMSpG9OX_IJAXmxzAh_tWiZOwk2K4yxH9tS3Lq1yX8C1EWmeRDkK2a
+ hecG85-oLKQt5VEpWHKmjOi_gJSdSgqcN96X52esAQ",
+ "p":"2rnSOV4hKSN8sS4CgcQHFbs08XboFDqKum3sc4h3GRxrTmQdl1ZK9uw-PIHf
+ QP0FkxXVrx-WE-ZEbrqivH_2iCLUS7wAl6XvARt1KkIaUxPPSYB9yk31s0Q8
+ UK96E3_OrADAYtAJs-M3JxCLfNgqh56HDnETTQhH3rCT5T3yJws",
+ "q":"1u_RiFDP7LBYh3N4GXLT9OpSKYP0uQZyiaZwBtOCBNJgQxaj10RWjsZu0c6I
+ edis4S7B_coSKB0Kj9PaPaBzg-IySRvvcQuPamQu66riMhjVtG6TlV8CLCYK
+ rYl52ziqK0E_ym2QnkwsUX7eYTB7LbAHRK9GqocDE5B0f808I4s",
+ "dp":"KkMTWqBUefVwZ2_Dbj1pPQqyHSHjj90L5x_MOzqYAJMcLMZtbUtwKqvVDq3
+ tbEo3ZIcohbDtt6SbfmWzggabpQxNxuBpoOOf_a_HgMXK_lhqigI4y_kqS1w
+ Y52IwjUn5rgRrJ-yYo1h41KR-vz2pYhEAeYrhttWtxVqLCRViD6c",
+ "dq":"AvfS0-gRxvn0bwJoMSnFxYcK1WnuEjQFluMGfwGitQBWtfZ1Er7t1xDkbN9
+ GQTB9yqpDoYaN06H7CFtrkxhJIBQaj6nkF5KKS3TQtQ5qCzkOkmxIe3KRbBy
+ mXxkb5qwUpX5ELD5xFc6FeiafWYY63TmmEAu_lRFCOJ3xDea-ots",
+ "qi":"lSQi-w9CpyUReMErP1RsBLk7wNtOvs5EQpPqmuMvqW57NBUczScEoPwmUqq
+ abu9V0-Py4dQ57_bapoKRu1R90bvuFnU63SHWEFglZQvJDMeAvmj4sm-Fp0o
+ Yu_neotgQ0hzbI5gry7ajdYy9-2lNx_76aBZoOUu9HCJ-UsfSOI8"}`),
+
+ // X.509 Certificate Chain
+ stripWhitespace(`{"kty":"RSA",
+ "use":"sig",
+ "kid":"1b94c",
+ "n":"vrjOfz9Ccdgx5nQudyhdoR17V-IubWMeOZCwX_jj0hgAsz2J_pqYW08
+ PLbK_PdiVGKPrqzmDIsLI7sA25VEnHU1uCLNwBuUiCO11_-7dYbsr4iJmG0Q
+ u2j8DsVyT1azpJC_NG84Ty5KKthuCaPod7iI7w0LK9orSMhBEwwZDCxTWq4a
+ YWAchc8t-emd9qOvWtVMDC2BXksRngh6X5bUYLy6AyHKvj-nUy1wgzjYQDwH
+ MTplCoLtU-o-8SNnZ1tmRoGE9uJkBLdh5gFENabWnU5m1ZqZPdwS-qo-meMv
+ VfJb6jJVWRpl2SUtCnYG2C32qvbWbjZ_jBPD5eunqsIo1vQ",
+ "e":"AQAB",
+ "x5c":
+ ["MIIDQjCCAiqgAwIBAgIGATz/FuLiMA0GCSqGSIb3DQEBBQUAMGIxCzAJB
+ gNVBAYTAlVTMQswCQYDVQQIEwJDTzEPMA0GA1UEBxMGRGVudmVyMRwwGgYD
+ VQQKExNQaW5nIElkZW50aXR5IENvcnAuMRcwFQYDVQQDEw5CcmlhbiBDYW1
+ wYmVsbDAeFw0xMzAyMjEyMzI5MTVaFw0xODA4MTQyMjI5MTVaMGIxCzAJBg
+ NVBAYTAlVTMQswCQYDVQQIEwJDTzEPMA0GA1UEBxMGRGVudmVyMRwwGgYDV
+ QQKExNQaW5nIElkZW50aXR5IENvcnAuMRcwFQYDVQQDEw5CcmlhbiBDYW1w
+ YmVsbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL64zn8/QnH
+ YMeZ0LncoXaEde1fiLm1jHjmQsF/449IYALM9if6amFtPDy2yvz3YlRij66
+ s5gyLCyO7ANuVRJx1NbgizcAblIgjtdf/u3WG7K+IiZhtELto/A7Fck9Ws6
+ SQvzRvOE8uSirYbgmj6He4iO8NCyvaK0jIQRMMGQwsU1quGmFgHIXPLfnpn
+ fajr1rVTAwtgV5LEZ4Iel+W1GC8ugMhyr4/p1MtcIM42EA8BzE6ZQqC7VPq
+ PvEjZ2dbZkaBhPbiZAS3YeYBRDWm1p1OZtWamT3cEvqqPpnjL1XyW+oyVVk
+ aZdklLQp2Btgt9qr21m42f4wTw+Xrp6rCKNb0CAwEAATANBgkqhkiG9w0BA
+ QUFAAOCAQEAh8zGlfSlcI0o3rYDPBB07aXNswb4ECNIKG0CETTUxmXl9KUL
+ +9gGlqCz5iWLOgWsnrcKcY0vXPG9J1r9AqBNTqNgHq2G03X09266X5CpOe1
+ zFo+Owb1zxtp3PehFdfQJ610CDLEaS9V9Rqp17hCyybEpOGVwe8fnk+fbEL
+ 2Bo3UPGrpsHzUoaGpDftmWssZkhpBJKVMJyf/RuP2SmmaIzmnw9JiSlYhzo
+ 4tpzd5rFXhjRbg4zW9C+2qok+2+qDM1iJ684gPHMIY8aLWrdgQTxkumGmTq
+ gawR+N5MDtdPTEQ0XfIBc2cJEUyMTY5MPvACWpkA6SdS4xSvdXK3IVfOWA=="]}`),
+}
+
+// SHA-256 thumbprints of the above keys, hex-encoded
+var cookbookJWKThumbprints = []string{
+ "747ae2dd2003664aeeb21e4753fe7402846170a16bc8df8f23a8cf06d3cbe793",
+ "747ae2dd2003664aeeb21e4753fe7402846170a16bc8df8f23a8cf06d3cbe793",
+ "f63838e96077ad1fc01c3f8405774dedc0641f558ebb4b40dccf5f9b6d66a932",
+ "0fc478f8579325fcee0d4cbc6d9d1ce21730a6e97e435d6008fb379b0ebe47d4",
+ "0ddb05bfedbec2070fa037324ba397396561d3425d6d69245570c261dc49dee3",
+}
+
+func TestWebKeyVectorsValid(t *testing.T) {
+ for _, key := range cookbookJWKs {
+ var jwk2 JsonWebKey
+ err := jwk2.UnmarshalJSON([]byte(key))
+ if err != nil {
+ t.Error("unable to parse valid key:", key, err)
+ }
+ }
+}
+
+func TestThumbprint(t *testing.T) {
+ for i, key := range cookbookJWKs {
+ var jwk2 JsonWebKey
+ err := jwk2.UnmarshalJSON([]byte(key))
+ if err != nil {
+ t.Error("unable to parse valid key:", key, err)
+ }
+
+ tp, err := jwk2.Thumbprint(crypto.SHA256)
+ if err != nil {
+ t.Error("unable to compute thumbprint:", key, err)
+ }
+
+ tpHex := hex.EncodeToString(tp)
+ if cookbookJWKThumbprints[i] != tpHex {
+ t.Error("incorrect thumbprint:", i, cookbookJWKThumbprints[i], tpHex)
+ }
+ }
+}
+
+func TestMarshalUnmarshalJWKSet(t *testing.T) {
+ jwk1 := JsonWebKey{Key: rsaTestKey, KeyID: "ABCDEFG", Algorithm: "foo"}
+ jwk2 := JsonWebKey{Key: rsaTestKey, KeyID: "GFEDCBA", Algorithm: "foo"}
+ var set JsonWebKeySet
+ set.Keys = append(set.Keys, jwk1)
+ set.Keys = append(set.Keys, jwk2)
+
+ jsonbar, err := json.Marshal(&set)
+ if err != nil {
+ t.Error("problem marshalling set", err)
+ }
+ var set2 JsonWebKeySet
+ err = json.Unmarshal(jsonbar, &set2)
+ if err != nil {
+ t.Error("problem unmarshalling set", err)
+ }
+ jsonbar2, err := json.Marshal(&set2)
+ if err != nil {
+ t.Error("problem marshalling set", err)
+ }
+ if !bytes.Equal(jsonbar, jsonbar2) {
+ t.Error("roundtrip should not lose information")
+ }
+}
+
+func TestJWKSetKey(t *testing.T) {
+ jwk1 := JsonWebKey{Key: rsaTestKey, KeyID: "ABCDEFG", Algorithm: "foo"}
+ jwk2 := JsonWebKey{Key: rsaTestKey, KeyID: "GFEDCBA", Algorithm: "foo"}
+ var set JsonWebKeySet
+ set.Keys = append(set.Keys, jwk1)
+ set.Keys = append(set.Keys, jwk2)
+ k := set.Key("ABCDEFG")
+ if len(k) != 1 {
+ t.Errorf("method should return slice with one key not %d", len(k))
+ }
+ if k[0].KeyID != "ABCDEFG" {
+ t.Error("method should return key with ID ABCDEFG")
+ }
+}
+
+func TestJWKSymmetricKey(t *testing.T) {
+ sample1 := `{"kty":"oct","alg":"A128KW","k":"GawgguFyGrWKav7AX4VKUg"}`
+ sample2 := `{"kty":"oct","k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow","kid":"HMAC key used in JWS spec Appendix A.1 example"}`
+
+ var jwk1 JsonWebKey
+ json.Unmarshal([]byte(sample1), &jwk1)
+
+ if jwk1.Algorithm != "A128KW" {
+ t.Errorf("expected Algorithm to be A128KW, but was '%s'", jwk1.Algorithm)
+ }
+ expected1 := fromHexBytes("19ac2082e1721ab58a6afec05f854a52")
+ if !bytes.Equal(jwk1.Key.([]byte), expected1) {
+ t.Errorf("expected Key to be '%s', but was '%s'", hex.EncodeToString(expected1), hex.EncodeToString(jwk1.Key.([]byte)))
+ }
+
+ var jwk2 JsonWebKey
+ json.Unmarshal([]byte(sample2), &jwk2)
+
+ if jwk2.KeyID != "HMAC key used in JWS spec Appendix A.1 example" {
+ t.Errorf("expected KeyID to be 'HMAC key used in JWS spec Appendix A.1 example', but was '%s'", jwk2.KeyID)
+ }
+ expected2 := fromHexBytes(`
+ 0323354b2b0fa5bc837e0665777ba68f5ab328e6f054c928a90f84b2d2502ebf
+ d3fb5a92d20647ef968ab4c377623d223d2e2172052e4f08c0cd9af567d080a3`)
+ if !bytes.Equal(jwk2.Key.([]byte), expected2) {
+ t.Errorf("expected Key to be '%s', but was '%s'", hex.EncodeToString(expected2), hex.EncodeToString(jwk2.Key.([]byte)))
+ }
+}
+
+func TestJWKSymmetricRoundtrip(t *testing.T) {
+ jwk1 := JsonWebKey{Key: []byte{1, 2, 3, 4}}
+ marshaled, err := jwk1.MarshalJSON()
+ if err != nil {
+ t.Errorf("failed to marshal valid JWK object", err)
+ }
+
+ var jwk2 JsonWebKey
+ err = jwk2.UnmarshalJSON(marshaled)
+ if err != nil {
+ t.Errorf("failed to unmarshal valid JWK object", err)
+ }
+
+ if !bytes.Equal(jwk1.Key.([]byte), jwk2.Key.([]byte)) {
+ t.Error("round-trip of symmetric JWK gave different raw keys")
+ }
+}
+
+func TestJWKSymmetricInvalid(t *testing.T) {
+ invalid := JsonWebKey{}
+ _, err := invalid.MarshalJSON()
+ if err == nil {
+ t.Error("excepted error on marshaling invalid symmetric JWK object")
+ }
+
+ var jwk JsonWebKey
+ err = jwk.UnmarshalJSON([]byte(`{"kty":"oct"}`))
+ if err == nil {
+ t.Error("excepted error on unmarshaling invalid symmetric JWK object")
+ }
+}
+
+func TestJWKValid(t *testing.T) {
+ bigInt := big.NewInt(0)
+ eccPub := ecdsa.PublicKey{elliptic.P256(), bigInt, bigInt}
+ rsaPub := rsa.PublicKey{bigInt, 1}
+ cases := []struct {
+ key interface{}
+ expectedValidity bool
+ }{
+ {nil, false},
+ {&ecdsa.PublicKey{}, false},
+ {&eccPub, true},
+ {&ecdsa.PrivateKey{}, false},
+ {&ecdsa.PrivateKey{eccPub, bigInt}, true},
+ {&rsa.PublicKey{}, false},
+ {&rsaPub, true},
+ {&rsa.PrivateKey{}, false},
+ {&rsa.PrivateKey{rsaPub, bigInt, []*big.Int{bigInt, bigInt}, rsa.PrecomputedValues{}}, true},
+ }
+
+ for _, tc := range cases {
+ k := &JsonWebKey{Key: tc.key}
+ if valid := k.Valid(); valid != tc.expectedValidity {
+ t.Errorf("expected Valid to return %t, got %t", tc.expectedValidity, valid)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jws.go b/vendor/gopkg.in/square/go-jose.v1/jws.go
new file mode 100644
index 000000000..04a2a1530
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jws.go
@@ -0,0 +1,272 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+// rawJsonWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
+type rawJsonWebSignature struct {
+ Payload *byteBuffer `json:"payload,omitempty"`
+ Signatures []rawSignatureInfo `json:"signatures,omitempty"`
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
+type rawSignatureInfo struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// JsonWebSignature represents a signed JWS object after parsing.
+type JsonWebSignature struct {
+ payload []byte
+ // Signatures attached to this object (may be more than one for multi-sig).
+ // Be careful about accessing these directly, prefer to use Verify() or
+ // VerifyMulti() to ensure that the data you're getting is verified.
+ Signatures []Signature
+}
+
+// Signature represents a single signature over the JWS payload and protected header.
+type Signature struct {
+ // Header fields, such as the signature algorithm
+ Header JoseHeader
+
+ // The actual signature value
+ Signature []byte
+
+ protected *rawHeader
+ header *rawHeader
+ original *rawSignatureInfo
+}
+
+// ParseSigned parses a signed message in compact or full serialization format.
+func ParseSigned(input string) (*JsonWebSignature, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseSignedFull(input)
+ }
+
+ return parseSignedCompact(input)
+}
+
+// Get a header value
+func (sig Signature) mergedHeaders() rawHeader {
+ out := rawHeader{}
+ out.merge(sig.protected)
+ out.merge(sig.header)
+ return out
+}
+
+// Compute data to be signed
+func (obj JsonWebSignature) computeAuthData(signature *Signature) []byte {
+ var serializedProtected string
+
+ if signature.original != nil && signature.original.Protected != nil {
+ serializedProtected = signature.original.Protected.base64()
+ } else if signature.protected != nil {
+ serializedProtected = base64URLEncode(mustSerializeJSON(signature.protected))
+ } else {
+ serializedProtected = ""
+ }
+
+ return []byte(fmt.Sprintf("%s.%s",
+ serializedProtected,
+ base64URLEncode(obj.payload)))
+}
+
+// parseSignedFull parses a message in full format.
+func parseSignedFull(input string) (*JsonWebSignature, error) {
+ var parsed rawJsonWebSignature
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWS object from the raw JSON.
+func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
+ if parsed.Payload == nil {
+ return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
+ }
+
+ obj := &JsonWebSignature{
+ payload: parsed.Payload.bytes(),
+ Signatures: make([]Signature, len(parsed.Signatures)),
+ }
+
+ if len(parsed.Signatures) == 0 {
+ // No signatures array, must be flattened serialization
+ signature := Signature{}
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ signature.protected = &rawHeader{}
+ err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Header != nil && parsed.Header.Nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ signature.header = parsed.Header
+ signature.Signature = parsed.Signature.bytes()
+ // Make a fake "original" rawSignatureInfo to store the unprocessed
+ // Protected header. This is necessary because the Protected header can
+ // contain arbitrary fields not registered as part of the spec. See
+ // https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
+ // If we unmarshal Protected into a rawHeader with its explicit list of fields,
+ // we cannot marshal losslessly. So we have to keep around the original bytes.
+ // This is used in computeAuthData, which will first attempt to use
+ // the original bytes of a protected header, and fall back on marshaling the
+ // header struct only if those bytes are not available.
+ signature.original = &rawSignatureInfo{
+ Protected: parsed.Protected,
+ Header: parsed.Header,
+ Signature: parsed.Signature,
+ }
+
+ signature.Header = signature.mergedHeaders().sanitized()
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := signature.Header.JsonWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ obj.Signatures = append(obj.Signatures, signature)
+ }
+
+ for i, sig := range parsed.Signatures {
+ if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
+ obj.Signatures[i].protected = &rawHeader{}
+ err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if sig.Header != nil && sig.Header.Nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized()
+ obj.Signatures[i].Signature = sig.Signature.bytes()
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := obj.Signatures[i].Header.JsonWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ // Copy value of sig
+ original := sig
+
+ obj.Signatures[i].header = sig.Header
+ obj.Signatures[i].original = &original
+ }
+
+ return obj, nil
+}
+
+// parseSignedCompact parses a message in compact format.
+func parseSignedCompact(input string) (*JsonWebSignature, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
+ }
+
+ rawProtected, err := base64URLDecode(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ payload, err := base64URLDecode(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := base64URLDecode(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJsonWebSignature{
+ Payload: newBuffer(payload),
+ Protected: newBuffer(rawProtected),
+ Signature: newBuffer(signature),
+ }
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JsonWebSignature) CompactSerialize() (string, error) {
+ if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s",
+ base64URLEncode(serializedProtected),
+ base64URLEncode(obj.payload),
+ base64URLEncode(obj.Signatures[0].Signature)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JsonWebSignature) FullSerialize() string {
+ raw := rawJsonWebSignature{
+ Payload: newBuffer(obj.payload),
+ }
+
+ if len(obj.Signatures) == 1 {
+ if obj.Signatures[0].protected != nil {
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+ raw.Protected = newBuffer(serializedProtected)
+ }
+ raw.Header = obj.Signatures[0].header
+ raw.Signature = newBuffer(obj.Signatures[0].Signature)
+ } else {
+ raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
+ for i, signature := range obj.Signatures {
+ raw.Signatures[i] = rawSignatureInfo{
+ Header: signature.header,
+ Signature: newBuffer(signature.Signature),
+ }
+
+ if signature.protected != nil {
+ raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
+ }
+ }
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/jws_test.go b/vendor/gopkg.in/square/go-jose.v1/jws_test.go
new file mode 100644
index 000000000..4526f11c9
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/jws_test.go
@@ -0,0 +1,312 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestEmbeddedHMAC(t *testing.T) {
+ // protected: {"alg":"HS256", "jwk":{"kty":"oct", "k":"MTEx"}}, aka HMAC key.
+ msg := `{"payload":"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ","protected":"eyJhbGciOiJIUzI1NiIsICJqd2siOnsia3R5Ijoib2N0IiwgImsiOiJNVEV4In19","signature":"lvo41ZZsuHwQvSh0uJtEXRR3vmuBJ7in6qMoD7p9jyo"}`
+
+ _, err := ParseSigned(msg)
+ if err == nil {
+ t.Error("should not allow parsing JWS with embedded JWK with HMAC key")
+ }
+}
+
+func TestCompactParseJWS(t *testing.T) {
+ // Should parse
+ msg := "eyJhbGciOiJYWVoifQ.cGF5bG9hZA.c2lnbmF0dXJl"
+ _, err := ParseSigned(msg)
+ if err != nil {
+ t.Error("Unable to parse valid message:", err)
+ }
+
+ // Messages that should fail to parse
+ failures := []string{
+ // Not enough parts
+ "eyJhbGciOiJYWVoifQ.cGF5bG9hZA",
+ // Invalid signature
+ "eyJhbGciOiJYWVoifQ.cGF5bG9hZA.////",
+ // Invalid payload
+ "eyJhbGciOiJYWVoifQ.////.c2lnbmF0dXJl",
+ // Invalid header
+ "////.eyJhbGciOiJYWVoifQ.c2lnbmF0dXJl",
+ // Invalid header
+ "cGF5bG9hZA.cGF5bG9hZA.c2lnbmF0dXJl",
+ }
+
+ for i := range failures {
+ _, err = ParseSigned(failures[i])
+ if err == nil {
+ t.Error("Able to parse invalid message")
+ }
+ }
+}
+
+func TestFullParseJWS(t *testing.T) {
+ // Messages that should succeed to parse
+ successes := []string{
+ "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"e30\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"},{\"protected\":\"e30\",\"signature\":\"CUJD\"}]}",
+ }
+
+ for i := range successes {
+ _, err := ParseSigned(successes[i])
+ if err != nil {
+ t.Error("Unble to parse valid message", err, successes[i])
+ }
+ }
+
+ // Messages that should fail to parse
+ failures := []string{
+ // Empty
+ "{}",
+ // Invalid JSON
+ "{XX",
+ // Invalid protected header
+ "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}",
+ // Invalid protected header
+ "{\"payload\":\"CUJD\",\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}",
+ // Invalid protected header
+ "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"###\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}",
+ // Invalid payload
+ "{\"payload\":\"###\",\"signatures\":[{\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}",
+ // Invalid payload
+ "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"e30\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"###\"}]}",
+ }
+
+ for i := range failures {
+ _, err := ParseSigned(failures[i])
+ if err == nil {
+ t.Error("Able to parse invalid message", err, failures[i])
+ }
+ }
+}
+
+func TestRejectUnprotectedJWSNonce(t *testing.T) {
+ // No need to test compact, since that's always protected
+
+ // Flattened JSON
+ input := `{
+ "header": { "nonce": "should-cause-an-error" },
+ "payload": "does-not-matter",
+ "signature": "does-not-matter"
+ }`
+ _, err := ParseSigned(input)
+ if err == nil {
+ t.Error("JWS with an unprotected nonce parsed as valid.")
+ } else if err != ErrUnprotectedNonce {
+ t.Errorf("Improper error for unprotected nonce: %v", err)
+ }
+
+ // Full JSON
+ input = `{
+ "payload": "does-not-matter",
+ "signatures": [{
+ "header": { "nonce": "should-cause-an-error" },
+ "signature": "does-not-matter"
+ }]
+ }`
+ _, err = ParseSigned(input)
+ if err == nil {
+ t.Error("JWS with an unprotected nonce parsed as valid.")
+ } else if err != ErrUnprotectedNonce {
+ t.Errorf("Improper error for unprotected nonce: %v", err)
+ }
+}
+
+func TestVerifyFlattenedWithIncludedUnprotectedKey(t *testing.T) {
+ input := `{
+ "header": {
+ "alg": "RS256",
+ "jwk": {
+ "e": "AQAB",
+ "kty": "RSA",
+ "n": "tSwgy3ORGvc7YJI9B2qqkelZRUC6F1S5NwXFvM4w5-M0TsxbFsH5UH6adigV0jzsDJ5imAechcSoOhAh9POceCbPN1sTNwLpNbOLiQQ7RD5mY_pSUHWXNmS9R4NZ3t2fQAzPeW7jOfF0LKuJRGkekx6tXP1uSnNibgpJULNc4208dgBaCHo3mvaE2HV2GmVl1yxwWX5QZZkGQGjNDZYnjFfa2DKVvFs0QbAk21ROm594kAxlRlMMrvqlf24Eq4ERO0ptzpZgm_3j_e4hGRD39gJS7kAzK-j2cacFQ5Qi2Y6wZI2p-FCq_wiYsfEAIkATPBiLKl_6d_Jfcvs_impcXQ"
+ }
+ },
+ "payload": "Zm9vCg",
+ "signature": "hRt2eYqBd_MyMRNIh8PEIACoFtmBi7BHTLBaAhpSU6zyDAFdEBaX7us4VB9Vo1afOL03Q8iuoRA0AT4akdV_mQTAQ_jhTcVOAeXPr0tB8b8Q11UPQ0tXJYmU4spAW2SapJIvO50ntUaqU05kZd0qw8-noH1Lja-aNnU-tQII4iYVvlTiRJ5g8_CADsvJqOk6FcHuo2mG643TRnhkAxUtazvHyIHeXMxydMMSrpwUwzMtln4ZJYBNx4QGEq6OhpAD_VSp-w8Lq5HOwGQoNs0bPxH1SGrArt67LFQBfjlVr94E1sn26p4vigXm83nJdNhWAMHHE9iV67xN-r29LT-FjA"
+ }`
+
+ jws, err := ParseSigned(input)
+ if err != nil {
+ t.Error("Unable to parse valid message.")
+ }
+ if len(jws.Signatures) != 1 {
+ t.Error("Too many or too few signatures.")
+ }
+ sig := jws.Signatures[0]
+ if sig.Header.JsonWebKey == nil {
+ t.Error("No JWK in signature header.")
+ }
+ payload, err := jws.Verify(sig.Header.JsonWebKey)
+ if err != nil {
+ t.Error(fmt.Sprintf("Signature did not validate: %v", err))
+ }
+ if string(payload) != "foo\n" {
+ t.Error(fmt.Sprintf("Payload was incorrect: '%s' should have been 'foo\\n'", string(payload)))
+ }
+}
+
+func TestVerifyFlattenedWithPrivateProtected(t *testing.T) {
+ // The protected field contains a Private Header Parameter name, per
+ // https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
+ // Base64-decoded, it's '{"nonce":"8HIepUNFZUa-exKTrXVf4g"}'
+ input := `{"header":{"alg":"RS256","jwk":{"kty":"RSA","n":"7ixeydcbxxppzxrBphrW1atUiEZqTpiHDpI-79olav5XxAgWolHmVsJyxzoZXRxmtED8PF9-EICZWBGdSAL9ZTD0hLUCIsPcpdgT_LqNW3Sh2b2caPL2hbMF7vsXvnCGg9varpnHWuYTyRrCLUF9vM7ES-V3VCYTa7LcCSRm56Gg9r19qar43Z9kIKBBxpgt723v2cC4bmLmoAX2s217ou3uCpCXGLOeV_BesG4--Nl3pso1VhCfO85wEWjmW6lbv7Kg4d7Jdkv5DjDZfJ086fkEAYZVYGRpIgAvJBH3d3yKDCrSByUEud1bWuFjQBmMaeYOrVDXO_mbYg5PwUDMhw","e":"AQAB"}},"protected":"eyJub25jZSI6IjhISWVwVU5GWlVhLWV4S1RyWFZmNGcifQ","payload":"eyJjb250YWN0IjpbIm1haWx0bzpmb29AYmFyLmNvbSJdfQ","signature":"AyvVGMgXsQ1zTdXrZxE_gyO63pQgotL1KbI7gv6Wi8I7NRy0iAOkDAkWcTQT9pcCYApJ04lXfEDZfP5i0XgcFUm_6spxi5mFBZU-NemKcvK9dUiAbXvb4hB3GnaZtZiuVnMQUb_ku4DOaFFKbteA6gOYCnED_x7v0kAPHIYrQnvIa-KZ6pTajbV9348zgh9TL7NgGIIsTcMHd-Jatr4z1LQ0ubGa8tS300hoDhVzfoDQaEetYjCo1drR1RmdEN1SIzXdHOHfubjA3ZZRbrF_AJnNKpRRoIwzu1VayOhRmdy1qVSQZq_tENF4VrQFycEL7DhG7JLoXC4T2p1urwMlsw"}`
+
+ jws, err := ParseSigned(input)
+ if err != nil {
+ t.Error("Unable to parse valid message.")
+ }
+ if len(jws.Signatures) != 1 {
+ t.Error("Too many or too few signatures.")
+ }
+ sig := jws.Signatures[0]
+ if sig.Header.JsonWebKey == nil {
+ t.Error("No JWK in signature header.")
+ }
+ payload, err := jws.Verify(sig.Header.JsonWebKey)
+ if err != nil {
+ t.Error(fmt.Sprintf("Signature did not validate: %v", err))
+ }
+ expected := "{\"contact\":[\"mailto:foo@bar.com\"]}"
+ if string(payload) != expected {
+ t.Error(fmt.Sprintf("Payload was incorrect: '%s' should have been '%s'", string(payload), expected))
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestSampleNimbusJWSMessagesRSA(t *testing.T) {
+ rsaPublicKey, err := LoadPublicKey(fromBase64Bytes(`
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3aLSGwbeX0ZA2Ha+EvELaIFGzO
+ 91+Q15JQc/tdGdCgGW3XAbrh7ZUhDh1XKzbs+UOQxqn3Eq4YOx18IG0WsJSuCaHQIxnDlZ
+ t/GP8WLwjMC0izlJLm2SyfM/EEoNpmTC3w6MQ2dHK7SZ9Zoq+sKijQd+V7CYdr8zHMpDrd
+ NKoEcR0HjmvzzdMoUChhkGH5TaNbZyollULTggepaYUKS8QphqdSDMWiSetKG+g6V87lv6
+ CVYyK1FF6g7Esp5OOj5pNn3/bmF+7V+b7TvK91NCIlURCjE9toRgNoIP4TDnWRn/vvfZ3G
+ zNrtWmlizqz3r5KdvIs71ahWgMUSD4wfazrwIDAQAB`))
+ if err != nil {
+ panic(err)
+ }
+
+ rsaSampleMessages := []string{
+ "eyJhbGciOiJSUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.YHX849fvekz6wJGeyqnQhFqyHFcUXNJKj3o2w3ddR46YLlsCopUJrlifRU_ZuTWzpYxt5oC--T2eoqMhlCvltSWrE5_1_EumqiMfAYsZULx9E6Jns7q3w7mttonYFSIh7aR3-yg2HMMfTCgoAY1y_AZ4VjXwHDcZ5gu1oZDYgvZF4uXtCmwT6e5YtR1m8abiWPF8BgoTG_BD3KV6ClLj_QQiNFdfdxAMDw7vKVOKG1T7BFtz6cDs2Q3ILS4To5E2IjcVSSYS8mi77EitCrWmrqbK_G3WCdKeUFGnMnyuKXaCDy_7FLpAZ6Z5RomRr5iskXeJZdZqIKcJV8zl4fpsPA",
+ "eyJhbGciOiJSUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.meyfoOTjAAjXHFYiNlU7EEnsYtbeUYeEglK6BL_cxISEr2YAGLr1Gwnn2HnucTnH6YilyRio7ZC1ohy_ZojzmaljPHqpr8kn1iqNFu9nFE2M16ZPgJi38-PGzppcDNliyzOQO-c7L-eA-v8Gfww5uyRaOJdiWg-hUJmeGBIngPIeLtSVmhJtz8oTeqeNdUOqQv7f7VRCuvagLhW1PcEM91VUS-gS0WEUXoXWZ2lp91No0v1O24izgX3__FKiX_16XhrOfAgJ82F61vjbTIQYwhexHPZyYTlXYt_scNRzFGhSKeGFin4zVdFLOXWJqKWdUd5IrDP5Nya3FSoWbWDXAg",
+ "eyJhbGciOiJSUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.rQPz0PDh8KyE2AX6JorgI0MLwv-qi1tcWlz6tuZuWQG1hdrlzq5tR1tQg1evYNc_SDDX87DWTSKXT7JEqhKoFixLfZa13IJrOc7FB8r5ZLx7OwOBC4F--OWrvxMA9Y3MTJjPN3FemQePUo-na2vNUZv-YgkcbuOgbO3hTxwQ7j1JGuqy-YutXOFnccdXvntp3t8zYZ4Mg1It_IyL9pzgGqHIEmMV1pCFGHsDa-wStB4ffmdhrADdYZc0q_SvxUdobyC_XzZCz9ENzGIhgwYxyyrqg7kjqUGoKmCLmoSlUFW7goTk9IC5SXdUyLPuESxOWNfHoRClGav230GYjPFQFA",
+ "eyJhbGciOiJQUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.UTtxjsv_6x4CdlAmZfAW6Lun3byMjJbcwRp_OlPH2W4MZaZar7aql052mIB_ddK45O9VUz2aphYVRvKPZY8WHmvlTUU30bk0z_cDJRYB9eIJVMOiRCYj0oNkz1iEZqsP0YgngxwuUDv4Q4A6aJ0Bo5E_rZo3AnrVHMHUjPp_ZRRSBFs30tQma1qQ0ApK4Gxk0XYCYAcxIv99e78vldVRaGzjEZmQeAVZx4tGcqZP20vG1L84nlhSGnOuZ0FhR8UjRFLXuob6M7EqtMRoqPgRYw47EI3fYBdeSivAg98E5S8R7R1NJc7ef-l03RvfUSY0S3_zBq_4PlHK6A-2kHb__w",
+ "eyJhbGciOiJSUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.meyfoOTjAAjXHFYiNlU7EEnsYtbeUYeEglK6BL_cxISEr2YAGLr1Gwnn2HnucTnH6YilyRio7ZC1ohy_ZojzmaljPHqpr8kn1iqNFu9nFE2M16ZPgJi38-PGzppcDNliyzOQO-c7L-eA-v8Gfww5uyRaOJdiWg-hUJmeGBIngPIeLtSVmhJtz8oTeqeNdUOqQv7f7VRCuvagLhW1PcEM91VUS-gS0WEUXoXWZ2lp91No0v1O24izgX3__FKiX_16XhrOfAgJ82F61vjbTIQYwhexHPZyYTlXYt_scNRzFGhSKeGFin4zVdFLOXWJqKWdUd5IrDP5Nya3FSoWbWDXAg",
+ "eyJhbGciOiJSUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.rQPz0PDh8KyE2AX6JorgI0MLwv-qi1tcWlz6tuZuWQG1hdrlzq5tR1tQg1evYNc_SDDX87DWTSKXT7JEqhKoFixLfZa13IJrOc7FB8r5ZLx7OwOBC4F--OWrvxMA9Y3MTJjPN3FemQePUo-na2vNUZv-YgkcbuOgbO3hTxwQ7j1JGuqy-YutXOFnccdXvntp3t8zYZ4Mg1It_IyL9pzgGqHIEmMV1pCFGHsDa-wStB4ffmdhrADdYZc0q_SvxUdobyC_XzZCz9ENzGIhgwYxyyrqg7kjqUGoKmCLmoSlUFW7goTk9IC5SXdUyLPuESxOWNfHoRClGav230GYjPFQFA",
+ }
+
+ for _, msg := range rsaSampleMessages {
+ obj, err := ParseSigned(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ payload, err := obj.Verify(rsaPublicKey)
+ if err != nil {
+ t.Error("unable to verify message", msg, err)
+ continue
+ }
+ if string(payload) != "Lorem ipsum dolor sit amet" {
+ t.Error("payload is not what we expected for msg", msg)
+ }
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestSampleNimbusJWSMessagesEC(t *testing.T) {
+ ecPublicKeyP256, err := LoadPublicKey(fromBase64Bytes("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg62jq6FyL1otEj9Up7S35BUrwGF9TVrAzrrY1rHUKZqYIGEg67u/imjgadVcr7y9Q32I0gB8W8FHqbqt696rA=="))
+ if err != nil {
+ panic(err)
+ }
+ ecPublicKeyP384, err := LoadPublicKey(fromBase64Bytes("MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEPXsVlqCtN2oTY+F+hFZm3M0ldYpb7IeeJM5wYmT0k1RaqzBFDhDMNnYK5Q5x+OyssZrAtHgYDFw02AVJhhng/eHRp7mqmL/vI3wbxJtrLKYldIbBA+9fYBQcKeibjlu5"))
+ if err != nil {
+ panic(err)
+ }
+ ecPublicKeyP521, err := LoadPublicKey(fromBase64Bytes("MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAa2w3MMJ5FWD6tSf68G+Wy5jIhWXOD3IA7pE5IC/myQzo1lWcD8KS57SM6nm4POtPcxyLmDhL7FLuh8DKoIZyvtAAdK8+tOQP7XXRlT2bkvzIuazp05It3TAPu00YzTIpKfDlc19Y1lvf7etrbFqhShD92B+hHmhT4ddrdbPCBDW8hvU="))
+ if err != nil {
+ panic(err)
+ }
+
+ ecPublicKeys := []interface{}{ecPublicKeyP256, ecPublicKeyP384, ecPublicKeyP521}
+
+ ecSampleMessages := []string{
+ "eyJhbGciOiJFUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.MEWJVlvGRQyzMEGOYm4rwuiwxrX-6LjnlbaRDAuhwmnBm2Gtn7pRpGXRTMFZUXsSGDz2L1p-Hz1qn8j9bFIBtQ",
+ "eyJhbGciOiJFUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.nbdjPnJPYQtVNNdBIx8-KbFKplTxrz-hnW5UNhYUY7SBkwHK4NZnqc2Lv4DXoA0aWHq9eiypgOh1kmyPWGEmqKAHUx0xdIEkBoHk3ZsbmhOQuq2jL_wcMUG6nTWNhLrB",
+ "eyJhbGciOiJFUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.AeYNFC1rwIgQv-5fwd8iRyYzvTaSCYTEICepgu9gRId-IW99kbSVY7yH0MvrQnqI-a0L8zwKWDR35fW5dukPAYRkADp3Y1lzqdShFcEFziUVGo46vqbiSajmKFrjBktJcCsfjKSaLHwxErF-T10YYPCQFHWb2nXJOOI3CZfACYqgO84g",
+ }
+
+ for i, msg := range ecSampleMessages {
+ obj, err := ParseSigned(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ payload, err := obj.Verify(ecPublicKeys[i])
+ if err != nil {
+ t.Error("unable to verify message", msg, err)
+ continue
+ }
+ if string(payload) != "Lorem ipsum dolor sit amet" {
+ t.Error("payload is not what we expected for msg", msg)
+ }
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestSampleNimbusJWSMessagesHMAC(t *testing.T) {
+ hmacTestKey := fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D")
+
+ hmacSampleMessages := []string{
+ "eyJhbGciOiJIUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.W5tc_EUhxexcvLYEEOckyyvdb__M5DQIVpg6Nmk1XGM",
+ "eyJhbGciOiJIUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.sBu44lXOJa4Nd10oqOdYH2uz3lxlZ6o32QSGHaoGdPtYTDG5zvSja6N48CXKqdAh",
+ "eyJhbGciOiJIUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.M0yR4tmipsORIix-BitIbxEPGaxPchDfj8UNOpKuhDEfnb7URjGvCKn4nOlyQ1z9mG1FKbwnqR1hOVAWSzAU_w",
+ }
+
+ for _, msg := range hmacSampleMessages {
+ obj, err := ParseSigned(msg)
+ if err != nil {
+ t.Error("unable to parse message", msg, err)
+ continue
+ }
+ payload, err := obj.Verify(hmacTestKey)
+ if err != nil {
+ t.Error("unable to verify message", msg, err)
+ continue
+ }
+ if string(payload) != "Lorem ipsum dolor sit amet" {
+ t.Error("payload is not what we expected for msg", msg)
+ }
+ }
+}
+
+// Test vectors generated with nimbus-jose-jwt
+func TestErrorMissingPayloadJWS(t *testing.T) {
+ _, err := (&rawJsonWebSignature{}).sanitized()
+ if err == nil {
+ t.Error("was able to parse message with missing payload")
+ }
+ if !strings.Contains(err.Error(), "missing payload") {
+ t.Errorf("unexpected error message, should contain 'missing payload': %s", err)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/shared.go b/vendor/gopkg.in/square/go-jose.v1/shared.go
new file mode 100644
index 000000000..9d895a912
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/shared.go
@@ -0,0 +1,224 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+)
+
+// KeyAlgorithm represents a key management algorithm.
+type KeyAlgorithm string
+
+// SignatureAlgorithm represents a signature (or MAC) algorithm.
+type SignatureAlgorithm string
+
+// ContentEncryption represents a content encryption algorithm.
+type ContentEncryption string
+
+// CompressionAlgorithm represents an algorithm used for plaintext compression.
+type CompressionAlgorithm string
+
+var (
+ // ErrCryptoFailure represents an error in cryptographic primitive. This
+ // occurs when, for example, a message had an invalid authentication tag or
+ // could not be decrypted.
+ ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
+
+ // ErrUnsupportedAlgorithm indicates that a selected algorithm is not
+ // supported. This occurs when trying to instantiate an encrypter for an
+ // algorithm that is not yet implemented.
+ ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
+
+ // ErrUnsupportedKeyType indicates that the given key type/format is not
+ // supported. This occurs when trying to instantiate an encrypter and passing
+ // it a key of an unrecognized type or with unsupported parameters, such as
+ // an RSA private key with more than two primes.
+ ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
+
+ // ErrNotSupported serialization of object is not supported. This occurs when
+ // trying to compact-serialize an object which can't be represented in
+ // compact form.
+ ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
+
+ // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
+ // nonce header parameter was included in an unprotected header object.
+ ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
+)
+
+// Key management algorithms
+const (
+ RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
+ RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
+ RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
+ A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
+ A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
+ A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
+ DIRECT = KeyAlgorithm("dir") // Direct encryption
+ ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
+ ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
+ ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
+ ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
+ A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
+ A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
+ A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
+ PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
+ PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
+ PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
+)
+
+// Signature algorithms
+const (
+ HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
+ HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
+ HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
+ RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
+ RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
+ RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
+ ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
+ ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
+ ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
+ PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
+ PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
+ PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
+)
+
+// Content encryption algorithms
+const (
+ A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
+ A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
+ A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
+ A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
+ A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
+ A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
+)
+
+// Compression algorithms
+const (
+ NONE = CompressionAlgorithm("") // No compression
+ DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
+)
+
+// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
+type rawHeader struct {
+ Alg string `json:"alg,omitempty"`
+ Enc ContentEncryption `json:"enc,omitempty"`
+ Zip CompressionAlgorithm `json:"zip,omitempty"`
+ Crit []string `json:"crit,omitempty"`
+ Apu *byteBuffer `json:"apu,omitempty"`
+ Apv *byteBuffer `json:"apv,omitempty"`
+ Epk *JsonWebKey `json:"epk,omitempty"`
+ Iv *byteBuffer `json:"iv,omitempty"`
+ Tag *byteBuffer `json:"tag,omitempty"`
+ Jwk *JsonWebKey `json:"jwk,omitempty"`
+ Kid string `json:"kid,omitempty"`
+ Nonce string `json:"nonce,omitempty"`
+}
+
+// JoseHeader represents the read-only JOSE header for JWE/JWS objects.
+type JoseHeader struct {
+ KeyID string
+ JsonWebKey *JsonWebKey
+ Algorithm string
+ Nonce string
+}
+
+// sanitized produces a cleaned-up header object from the raw JSON.
+func (parsed rawHeader) sanitized() JoseHeader {
+ return JoseHeader{
+ KeyID: parsed.Kid,
+ JsonWebKey: parsed.Jwk,
+ Algorithm: parsed.Alg,
+ Nonce: parsed.Nonce,
+ }
+}
+
+// Merge headers from src into dst, giving precedence to headers from l.
+func (dst *rawHeader) merge(src *rawHeader) {
+ if src == nil {
+ return
+ }
+
+ if dst.Alg == "" {
+ dst.Alg = src.Alg
+ }
+ if dst.Enc == "" {
+ dst.Enc = src.Enc
+ }
+ if dst.Zip == "" {
+ dst.Zip = src.Zip
+ }
+ if dst.Crit == nil {
+ dst.Crit = src.Crit
+ }
+ if dst.Crit == nil {
+ dst.Crit = src.Crit
+ }
+ if dst.Apu == nil {
+ dst.Apu = src.Apu
+ }
+ if dst.Apv == nil {
+ dst.Apv = src.Apv
+ }
+ if dst.Epk == nil {
+ dst.Epk = src.Epk
+ }
+ if dst.Iv == nil {
+ dst.Iv = src.Iv
+ }
+ if dst.Tag == nil {
+ dst.Tag = src.Tag
+ }
+ if dst.Kid == "" {
+ dst.Kid = src.Kid
+ }
+ if dst.Jwk == nil {
+ dst.Jwk = src.Jwk
+ }
+ if dst.Nonce == "" {
+ dst.Nonce = src.Nonce
+ }
+}
+
+// Get JOSE name of curve
+func curveName(crv elliptic.Curve) (string, error) {
+ switch crv {
+ case elliptic.P256():
+ return "P-256", nil
+ case elliptic.P384():
+ return "P-384", nil
+ case elliptic.P521():
+ return "P-521", nil
+ default:
+ return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
+ }
+}
+
+// Get size of curve in bytes
+func curveSize(crv elliptic.Curve) int {
+ bits := crv.Params().BitSize
+
+ div := bits / 8
+ mod := bits % 8
+
+ if mod == 0 {
+ return div
+ }
+
+ return div + 1
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/signing.go b/vendor/gopkg.in/square/go-jose.v1/signing.go
new file mode 100644
index 000000000..e64f8ab8d
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/signing.go
@@ -0,0 +1,258 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+)
+
+// NonceSource represents a source of random nonces to go into JWS objects
+type NonceSource interface {
+ Nonce() (string, error)
+}
+
+// Signer represents a signer which takes a payload and produces a signed JWS object.
+type Signer interface {
+ Sign(payload []byte) (*JsonWebSignature, error)
+ SetNonceSource(source NonceSource)
+ SetEmbedJwk(embed bool)
+}
+
+// MultiSigner represents a signer which supports multiple recipients.
+type MultiSigner interface {
+ Sign(payload []byte) (*JsonWebSignature, error)
+ SetNonceSource(source NonceSource)
+ SetEmbedJwk(embed bool)
+ AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error
+}
+
+type payloadSigner interface {
+ signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
+}
+
+type payloadVerifier interface {
+ verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
+}
+
+type genericSigner struct {
+ recipients []recipientSigInfo
+ nonceSource NonceSource
+ embedJwk bool
+}
+
+type recipientSigInfo struct {
+ sigAlg SignatureAlgorithm
+ keyID string
+ publicKey *JsonWebKey
+ signer payloadSigner
+}
+
+// NewSigner creates an appropriate signer based on the key type
+func NewSigner(alg SignatureAlgorithm, signingKey interface{}) (Signer, error) {
+ // NewMultiSigner never fails (currently)
+ signer := NewMultiSigner()
+
+ err := signer.AddRecipient(alg, signingKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return signer, nil
+}
+
+// NewMultiSigner creates a signer for multiple recipients
+func NewMultiSigner() MultiSigner {
+ return &genericSigner{
+ recipients: []recipientSigInfo{},
+ embedJwk: true,
+ }
+}
+
+// newVerifier creates a verifier based on the key type
+func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
+ switch verificationKey := verificationKey.(type) {
+ case *rsa.PublicKey:
+ return &rsaEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case *ecdsa.PublicKey:
+ return &ecEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case []byte:
+ return &symmetricMac{
+ key: verificationKey,
+ }, nil
+ case *JsonWebKey:
+ return newVerifier(verificationKey.Key)
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+func (ctx *genericSigner) AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
+ recipient, err := makeJWSRecipient(alg, signingKey)
+ if err != nil {
+ return err
+ }
+
+ ctx.recipients = append(ctx.recipients, recipient)
+ return nil
+}
+
+func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
+ switch signingKey := signingKey.(type) {
+ case *rsa.PrivateKey:
+ return newRSASigner(alg, signingKey)
+ case *ecdsa.PrivateKey:
+ return newECDSASigner(alg, signingKey)
+ case []byte:
+ return newSymmetricSigner(alg, signingKey)
+ case *JsonWebKey:
+ recipient, err := makeJWSRecipient(alg, signingKey.Key)
+ if err != nil {
+ return recipientSigInfo{}, err
+ }
+ recipient.keyID = signingKey.KeyID
+ return recipient, nil
+ default:
+ return recipientSigInfo{}, ErrUnsupportedKeyType
+ }
+}
+
+func (ctx *genericSigner) Sign(payload []byte) (*JsonWebSignature, error) {
+ obj := &JsonWebSignature{}
+ obj.payload = payload
+ obj.Signatures = make([]Signature, len(ctx.recipients))
+
+ for i, recipient := range ctx.recipients {
+ protected := &rawHeader{
+ Alg: string(recipient.sigAlg),
+ }
+
+ if recipient.publicKey != nil && ctx.embedJwk {
+ protected.Jwk = recipient.publicKey
+ }
+ if recipient.keyID != "" {
+ protected.Kid = recipient.keyID
+ }
+
+ if ctx.nonceSource != nil {
+ nonce, err := ctx.nonceSource.Nonce()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
+ }
+ protected.Nonce = nonce
+ }
+
+ serializedProtected := mustSerializeJSON(protected)
+
+ input := []byte(fmt.Sprintf("%s.%s",
+ base64URLEncode(serializedProtected),
+ base64URLEncode(payload)))
+
+ signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ signatureInfo.protected = protected
+ obj.Signatures[i] = signatureInfo
+ }
+
+ return obj, nil
+}
+
+// SetNonceSource provides or updates a nonce pool to the first recipients.
+// After this method is called, the signer will consume one nonce per
+// signature, returning an error it is unable to get a nonce.
+func (ctx *genericSigner) SetNonceSource(source NonceSource) {
+ ctx.nonceSource = source
+}
+
+// SetEmbedJwk specifies if the signing key should be embedded in the protected
+// header, if any. It defaults to 'true', though that may change in the future.
+// Note that the use of embedded JWKs in the signature header can be dangerous,
+// as you cannot assume that the key received in a payload is trusted.
+func (ctx *genericSigner) SetEmbedJwk(embed bool) {
+ ctx.embedJwk = embed
+}
+
+// Verify validates the signature on the object and returns the payload.
+// This function does not support multi-signature, if you desire multi-sig
+// verification use VerifyMulti instead.
+//
+// Be careful when verifying signatures based on embedded JWKs inside the
+// payload header. You cannot assume that the key received in a payload is
+// trusted.
+func (obj JsonWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(obj.Signatures) > 1 {
+ return nil, errors.New("square/go-jose: too many signatures in payload; expecting only one")
+ }
+
+ signature := obj.Signatures[0]
+ headers := signature.mergedHeaders()
+ if len(headers.Crit) > 0 {
+ // Unsupported crit header
+ return nil, ErrCryptoFailure
+ }
+
+ input := obj.computeAuthData(&signature)
+ alg := SignatureAlgorithm(headers.Alg)
+ err = verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return obj.payload, nil
+ }
+
+ return nil, ErrCryptoFailure
+}
+
+// VerifyMulti validates (one of the multiple) signatures on the object and
+// returns the index of the signature that was verified, along with the signature
+// object and the payload. We return the signature and index to guarantee that
+// callers are getting the verified value.
+func (obj JsonWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return -1, Signature{}, nil, err
+ }
+
+ for i, signature := range obj.Signatures {
+ headers := signature.mergedHeaders()
+ if len(headers.Crit) > 0 {
+ // Unsupported crit header
+ continue
+ }
+
+ input := obj.computeAuthData(&signature)
+ alg := SignatureAlgorithm(headers.Alg)
+ err := verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return i, signature, obj.payload, nil
+ }
+ }
+
+ return -1, Signature{}, nil, ErrCryptoFailure
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/signing_test.go b/vendor/gopkg.in/square/go-jose.v1/signing_test.go
new file mode 100644
index 000000000..15c319730
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/signing_test.go
@@ -0,0 +1,451 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "testing"
+
+ "gopkg.in/square/go-jose.v1/json"
+)
+
+type staticNonceSource string
+
+func (sns staticNonceSource) Nonce() (string, error) {
+ return string(sns), nil
+}
+
+func RoundtripJWS(sigAlg SignatureAlgorithm, serializer func(*JsonWebSignature) (string, error), corrupter func(*JsonWebSignature), signingKey interface{}, verificationKey interface{}, nonce string) error {
+ signer, err := NewSigner(sigAlg, signingKey)
+ if err != nil {
+ return fmt.Errorf("error on new signer: %s", err)
+ }
+
+ if nonce != "" {
+ signer.SetNonceSource(staticNonceSource(nonce))
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := signer.Sign(input)
+ if err != nil {
+ return fmt.Errorf("error on sign: %s", err)
+ }
+
+ msg, err := serializer(obj)
+ if err != nil {
+ return fmt.Errorf("error on serialize: %s", err)
+ }
+
+ obj, err = ParseSigned(msg)
+ if err != nil {
+ return fmt.Errorf("error on parse: %s", err)
+ }
+
+ // (Maybe) mangle the object
+ corrupter(obj)
+
+ output, err := obj.Verify(verificationKey)
+ if err != nil {
+ return fmt.Errorf("error on verify: %s", err)
+ }
+
+ // Check that verify works with embedded keys (if present)
+ for i, sig := range obj.Signatures {
+ if sig.Header.JsonWebKey != nil {
+ _, err = obj.Verify(sig.Header.JsonWebKey)
+ if err != nil {
+ return fmt.Errorf("error on verify with embedded key %d: %s", i, err)
+ }
+ }
+
+ // Check that the nonce correctly round-tripped (if present)
+ if sig.Header.Nonce != nonce {
+ return fmt.Errorf("Incorrect nonce returned: [%s]", sig.Header.Nonce)
+ }
+ }
+
+ if bytes.Compare(output, input) != 0 {
+ return fmt.Errorf("input/output do not match, got '%s', expected '%s'", output, input)
+ }
+
+ return nil
+}
+
+func TestRoundtripsJWS(t *testing.T) {
+ // Test matrix
+ sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512, HS256, HS384, HS512, ES256, ES384, ES512}
+
+ serializers := []func(*JsonWebSignature) (string, error){
+ func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebSignature) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ corrupter := func(obj *JsonWebSignature) {}
+
+ for _, alg := range sigAlgs {
+ signingKey, verificationKey := GenerateSigningTestKey(alg)
+
+ for i, serializer := range serializers {
+ err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce")
+ if err != nil {
+ t.Error(err, alg, i)
+ }
+ }
+ }
+}
+
+func TestRoundtripsJWSCorruptSignature(t *testing.T) {
+ // Test matrix
+ sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512, HS256, HS384, HS512, ES256, ES384, ES512}
+
+ serializers := []func(*JsonWebSignature) (string, error){
+ func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebSignature) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ corrupters := []func(*JsonWebSignature){
+ func(obj *JsonWebSignature) {
+ // Changes bytes in signature
+ obj.Signatures[0].Signature[10]++
+ },
+ func(obj *JsonWebSignature) {
+ // Set totally invalid signature
+ obj.Signatures[0].Signature = []byte("###")
+ },
+ }
+
+ // Test all different configurations
+ for _, alg := range sigAlgs {
+ signingKey, verificationKey := GenerateSigningTestKey(alg)
+
+ for i, serializer := range serializers {
+ for j, corrupter := range corrupters {
+ err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce")
+ if err == nil {
+ t.Error("failed to detect corrupt signature", err, alg, i, j)
+ }
+ }
+ }
+ }
+}
+
+func TestSignerWithBrokenRand(t *testing.T) {
+ sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512}
+
+ serializer := func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() }
+ corrupter := func(obj *JsonWebSignature) {}
+
+ // Break rand reader
+ readers := []func() io.Reader{
+ // Totally broken
+ func() io.Reader { return bytes.NewReader([]byte{}) },
+ // Not enough bytes
+ func() io.Reader { return io.LimitReader(rand.Reader, 20) },
+ }
+
+ defer resetRandReader()
+
+ for _, alg := range sigAlgs {
+ signingKey, verificationKey := GenerateSigningTestKey(alg)
+ for i, getReader := range readers {
+ randReader = getReader()
+ err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce")
+ if err == nil {
+ t.Error("signer should fail if rand is broken", alg, i)
+ }
+ }
+ }
+}
+
+func TestJWSInvalidKey(t *testing.T) {
+ signingKey0, verificationKey0 := GenerateSigningTestKey(RS256)
+ _, verificationKey1 := GenerateSigningTestKey(ES256)
+
+ signer, err := NewSigner(RS256, signingKey0)
+ if err != nil {
+ panic(err)
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := signer.Sign(input)
+ if err != nil {
+ panic(err)
+ }
+
+ // Must work with correct key
+ _, err = obj.Verify(verificationKey0)
+ if err != nil {
+ t.Error("error on verify", err)
+ }
+
+ // Must not work with incorrect key
+ _, err = obj.Verify(verificationKey1)
+ if err == nil {
+ t.Error("verification should fail with incorrect key")
+ }
+
+ // Must not work with invalid key
+ _, err = obj.Verify("")
+ if err == nil {
+ t.Error("verification should fail with incorrect key")
+ }
+}
+
+func TestMultiRecipientJWS(t *testing.T) {
+ signer := NewMultiSigner()
+
+ sharedKey := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }
+
+ signer.AddRecipient(RS256, rsaTestKey)
+ signer.AddRecipient(HS384, sharedKey)
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := signer.Sign(input)
+ if err != nil {
+ t.Fatal("error on sign: ", err)
+ }
+
+ _, err = obj.CompactSerialize()
+ if err == nil {
+ t.Fatal("message with multiple recipient was compact serialized")
+ }
+
+ msg := obj.FullSerialize()
+
+ obj, err = ParseSigned(msg)
+ if err != nil {
+ t.Fatal("error on parse: ", err)
+ }
+
+ i, _, output, err := obj.VerifyMulti(&rsaTestKey.PublicKey)
+ if err != nil {
+ t.Fatal("error on verify: ", err)
+ }
+
+ if i != 0 {
+ t.Fatal("signature index should be 0 for RSA key")
+ }
+
+ if bytes.Compare(output, input) != 0 {
+ t.Fatal("input/output do not match", output, input)
+ }
+
+ i, _, output, err = obj.VerifyMulti(sharedKey)
+ if err != nil {
+ t.Fatal("error on verify: ", err)
+ }
+
+ if i != 1 {
+ t.Fatal("signature index should be 1 for EC key")
+ }
+
+ if bytes.Compare(output, input) != 0 {
+ t.Fatal("input/output do not match", output, input)
+ }
+}
+
+func GenerateSigningTestKey(sigAlg SignatureAlgorithm) (sig, ver interface{}) {
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ sig = rsaTestKey
+ ver = &rsaTestKey.PublicKey
+ case HS256, HS384, HS512:
+ sig, _, _ = randomKeyGenerator{size: 16}.genKey()
+ ver = sig
+ case ES256:
+ key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ sig = key
+ ver = &key.PublicKey
+ case ES384:
+ key, _ := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+ sig = key
+ ver = &key.PublicKey
+ case ES512:
+ key, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ sig = key
+ ver = &key.PublicKey
+ default:
+ panic("Must update test case")
+ }
+
+ return
+}
+
+func TestInvalidSignerAlg(t *testing.T) {
+ _, err := NewSigner("XYZ", nil)
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ _, err = NewSigner("XYZ", []byte{})
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+}
+
+func TestInvalidJWS(t *testing.T) {
+ signer, err := NewSigner(PS256, rsaTestKey)
+ if err != nil {
+ panic(err)
+ }
+
+ obj, err := signer.Sign([]byte("Lorem ipsum dolor sit amet"))
+ obj.Signatures[0].header = &rawHeader{
+ Crit: []string{"TEST"},
+ }
+
+ _, err = obj.Verify(&rsaTestKey.PublicKey)
+ if err == nil {
+ t.Error("should not verify message with unknown crit header")
+ }
+
+ // Try without alg header
+ obj.Signatures[0].protected = &rawHeader{}
+ obj.Signatures[0].header = &rawHeader{}
+
+ _, err = obj.Verify(&rsaTestKey.PublicKey)
+ if err == nil {
+ t.Error("should not verify message with missing headers")
+ }
+}
+
+func TestSignerKid(t *testing.T) {
+ kid := "DEADBEEF"
+ payload := []byte("Lorem ipsum dolor sit amet")
+
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Error("problem generating test signing key", err)
+ }
+
+ basejwk := JsonWebKey{Key: key}
+ jsonbar, err := basejwk.MarshalJSON()
+ if err != nil {
+ t.Error("problem marshalling base JWK", err)
+ }
+
+ var jsonmsi map[string]interface{}
+ err = json.Unmarshal(jsonbar, &jsonmsi)
+ if err != nil {
+ t.Error("problem unmarshalling base JWK", err)
+ }
+ jsonmsi["kid"] = kid
+ jsonbar2, err := json.Marshal(jsonmsi)
+ if err != nil {
+ t.Error("problem marshalling kided JWK", err)
+ }
+
+ var jwk JsonWebKey
+ err = jwk.UnmarshalJSON(jsonbar2)
+ if err != nil {
+ t.Error("problem unmarshalling kided JWK", err)
+ }
+
+ signer, err := NewSigner(ES256, &jwk)
+ if err != nil {
+ t.Error("problem creating signer", err)
+ }
+ signed, err := signer.Sign(payload)
+
+ serialized := signed.FullSerialize()
+
+ parsed, err := ParseSigned(serialized)
+ if err != nil {
+ t.Error("problem parsing signed object", err)
+ }
+
+ if parsed.Signatures[0].Header.KeyID != kid {
+ t.Error("KeyID did not survive trip")
+ }
+}
+
+func TestEmbedJwk(t *testing.T) {
+ var payload = []byte("Lorem ipsum dolor sit amet")
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Error("Failed to generate key")
+ }
+
+ signer, err := NewSigner(ES256, key)
+ if err != nil {
+ t.Error("Failed to create signer")
+ }
+
+ object, err := signer.Sign(payload)
+ if err != nil {
+ t.Error("Failed to sign payload")
+ }
+
+ object, err = ParseSigned(object.FullSerialize())
+ if err != nil {
+ t.Error("Failed to parse jws")
+ }
+
+ if object.Signatures[0].protected.Jwk == nil {
+ t.Error("JWK isn't set in protected header")
+ }
+
+ // Now sign it again, but don't embed JWK.
+ signer.SetEmbedJwk(false)
+
+ object, err = signer.Sign(payload)
+ if err != nil {
+ t.Error("Failed to sign payload")
+ }
+
+ object, err = ParseSigned(object.FullSerialize())
+ if err != nil {
+ t.Error("Failed to parse jws")
+ }
+
+ if object.Signatures[0].protected.Jwk != nil {
+ t.Error("JWK is set in protected header")
+ }
+}
+
+func TestSignerWithJWKAndKeyID(t *testing.T) {
+ enc, err := NewSigner(HS256, &JsonWebKey{
+ KeyID: "test-id",
+ Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ })
+ if err != nil {
+ t.Error(err)
+ }
+
+ signed, _ := enc.Sign([]byte("Lorem ipsum dolor sit amet"))
+
+ serialized1, _ := signed.CompactSerialize()
+ serialized2 := signed.FullSerialize()
+
+ parsed1, _ := ParseSigned(serialized1)
+ parsed2, _ := ParseSigned(serialized2)
+
+ if parsed1.Signatures[0].Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Signatures[0].Header.KeyID)
+ }
+ if parsed2.Signatures[0].Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Signatures[0].Header.KeyID)
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/symmetric.go b/vendor/gopkg.in/square/go-jose.v1/symmetric.go
new file mode 100644
index 000000000..51f8cb394
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/symmetric.go
@@ -0,0 +1,349 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "errors"
+ "hash"
+ "io"
+
+ "gopkg.in/square/go-jose.v1/cipher"
+)
+
+// Random reader (stubbed out in tests)
+var randReader = rand.Reader
+
+// Dummy key cipher for shared symmetric key mode
+type symmetricKeyCipher struct {
+ key []byte // Pre-shared content-encryption key
+}
+
+// Signer/verifier for MAC modes
+type symmetricMac struct {
+ key []byte
+}
+
+// Input/output from an AEAD operation
+type aeadParts struct {
+ iv, ciphertext, tag []byte
+}
+
+// A content cipher based on an AEAD construction
+type aeadContentCipher struct {
+ keyBytes int
+ authtagBytes int
+ getAead func(key []byte) (cipher.AEAD, error)
+}
+
+// Random key generator
+type randomKeyGenerator struct {
+ size int
+}
+
+// Static key generator
+type staticKeyGenerator struct {
+ key []byte
+}
+
+// Create a new content cipher based on AES-GCM
+func newAESGCM(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize,
+ authtagBytes: 16,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ aes, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return cipher.NewGCM(aes)
+ },
+ }
+}
+
+// Create a new content cipher based on AES-CBC+HMAC
+func newAESCBC(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize * 2,
+ authtagBytes: 16,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ return josecipher.NewCBCHMAC(key, aes.NewCipher)
+ },
+ }
+}
+
+// Get an AEAD cipher object for the given content encryption algorithm
+func getContentCipher(alg ContentEncryption) contentCipher {
+ switch alg {
+ case A128GCM:
+ return newAESGCM(16)
+ case A192GCM:
+ return newAESGCM(24)
+ case A256GCM:
+ return newAESGCM(32)
+ case A128CBC_HS256:
+ return newAESCBC(16)
+ case A192CBC_HS384:
+ return newAESCBC(24)
+ case A256CBC_HS512:
+ return newAESCBC(32)
+ default:
+ return nil
+ }
+}
+
+// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
+func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
+ switch keyAlg {
+ case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &symmetricKeyCipher{
+ key: key,
+ },
+ }, nil
+}
+
+// newSymmetricSigner creates a recipientSigInfo based on the given key.
+func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case HS256, HS384, HS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ signer: &symmetricMac{
+ key: key,
+ },
+ }, nil
+}
+
+// Generate a random key for the given content cipher
+func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ key := make([]byte, ctx.size)
+ _, err := io.ReadFull(randReader, key)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ return key, rawHeader{}, nil
+}
+
+// Key size for random generator
+func (ctx randomKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Generate a static key (for direct mode)
+func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, rawHeader{}, nil
+}
+
+// Key size for static generator
+func (ctx staticKeyGenerator) keySize() int {
+ return len(ctx.key)
+}
+
+// Get key size for this cipher
+func (ctx aeadContentCipher) keySize() int {
+ return ctx.keyBytes
+}
+
+// Encrypt some data
+func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
+ // Get a new AEAD instance
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize a new nonce
+ iv := make([]byte, aead.NonceSize())
+ _, err = io.ReadFull(randReader, iv)
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
+ offset := len(ciphertextAndTag) - ctx.authtagBytes
+
+ return &aeadParts{
+ iv: iv,
+ ciphertext: ciphertextAndTag[:offset],
+ tag: ciphertextAndTag[offset:],
+ }, nil
+}
+
+// Decrypt some data
+func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
+}
+
+// Encrypt the content encryption key.
+func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case DIRECT:
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ parts, err := aead.encrypt(ctx.key, []byte{}, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ header: &rawHeader{
+ Iv: newBuffer(parts.iv),
+ Tag: newBuffer(parts.tag),
+ },
+ encryptedKey: parts.ciphertext,
+ }, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &rawHeader{},
+ }, nil
+ }
+
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the content encryption key.
+func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ switch KeyAlgorithm(headers.Alg) {
+ case DIRECT:
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ parts := &aeadParts{
+ iv: headers.Iv.bytes(),
+ ciphertext: recipient.encryptedKey,
+ tag: headers.Tag.bytes(),
+ }
+
+ cek, err := aead.decrypt(ctx.key, []byte{}, parts)
+ if err != nil {
+ return nil, err
+ }
+
+ return cek, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return nil, err
+ }
+
+ cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return cek, nil
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ mac, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return Signature{}, errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ return Signature{
+ Signature: mac,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
+ expected, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ if len(mac) != len(expected) {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ match := subtle.ConstantTimeCompare(mac, expected)
+ if match != 1 {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ return nil
+}
+
+// Compute the HMAC based on the given alg value
+func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
+ var hash func() hash.Hash
+
+ switch alg {
+ case HS256:
+ hash = sha256.New
+ case HS384:
+ hash = sha512.New384
+ case HS512:
+ hash = sha512.New
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ hmac := hmac.New(hash, ctx.key)
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hmac.Write(payload)
+ return hmac.Sum(nil), nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go b/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go
new file mode 100644
index 000000000..67f535e3b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go
@@ -0,0 +1,131 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/rand"
+ "io"
+ "testing"
+)
+
+func TestInvalidSymmetricAlgorithms(t *testing.T) {
+ _, err := newSymmetricRecipient("XYZ", []byte{})
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ enc := &symmetricKeyCipher{}
+ _, err = enc.encryptKey([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should not accept invalid algorithm")
+ }
+}
+
+func TestAeadErrors(t *testing.T) {
+ aead := &aeadContentCipher{
+ keyBytes: 16,
+ authtagBytes: 16,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ return nil, ErrCryptoFailure
+ },
+ }
+
+ parts, err := aead.encrypt([]byte{}, []byte{}, []byte{})
+ if err != ErrCryptoFailure {
+ t.Error("should handle aead failure")
+ }
+
+ _, err = aead.decrypt([]byte{}, []byte{}, parts)
+ if err != ErrCryptoFailure {
+ t.Error("should handle aead failure")
+ }
+}
+
+func TestInvalidKey(t *testing.T) {
+ gcm := newAESGCM(16).(*aeadContentCipher)
+ _, err := gcm.getAead([]byte{})
+ if err == nil {
+ t.Error("should not accept invalid key")
+ }
+}
+
+func TestStaticKeyGen(t *testing.T) {
+ key := make([]byte, 32)
+ io.ReadFull(rand.Reader, key)
+
+ gen := &staticKeyGenerator{key: key}
+ if gen.keySize() != len(key) {
+ t.Error("static key generator reports incorrect size")
+ }
+
+ generated, _, err := gen.genKey()
+ if err != nil {
+ t.Error("static key generator should always succeed", err)
+ }
+ if !bytes.Equal(generated, key) {
+ t.Error("static key generator returns different data")
+ }
+}
+
+func TestVectorsAESGCM(t *testing.T) {
+ // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.1
+ plaintext := []byte{
+ 84, 104, 101, 32, 116, 114, 117, 101, 32, 115, 105, 103, 110, 32,
+ 111, 102, 32, 105, 110, 116, 101, 108, 108, 105, 103, 101, 110, 99,
+ 101, 32, 105, 115, 32, 110, 111, 116, 32, 107, 110, 111, 119, 108,
+ 101, 100, 103, 101, 32, 98, 117, 116, 32, 105, 109, 97, 103, 105,
+ 110, 97, 116, 105, 111, 110, 46}
+
+ aad := []byte{
+ 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69,
+ 116, 84, 48, 70, 70, 85, 67, 73, 115, 73, 109, 86, 117, 89, 121, 73,
+ 54, 73, 107, 69, 121, 78, 84, 90, 72, 81, 48, 48, 105, 102, 81}
+
+ expectedCiphertext := []byte{
+ 229, 236, 166, 241, 53, 191, 115, 196, 174, 43, 73, 109, 39, 122,
+ 233, 96, 140, 206, 120, 52, 51, 237, 48, 11, 190, 219, 186, 80, 111,
+ 104, 50, 142, 47, 167, 59, 61, 181, 127, 196, 21, 40, 82, 242, 32,
+ 123, 143, 168, 226, 73, 216, 176, 144, 138, 247, 106, 60, 16, 205,
+ 160, 109, 64, 63, 192}
+
+ expectedAuthtag := []byte{
+ 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145}
+
+ // Mock random reader
+ randReader = bytes.NewReader([]byte{
+ 177, 161, 244, 128, 84, 143, 225, 115, 63, 180, 3, 255, 107, 154,
+ 212, 246, 138, 7, 110, 91, 112, 46, 34, 105, 47, 130, 203, 46, 122,
+ 234, 64, 252, 227, 197, 117, 252, 2, 219, 233, 68, 180, 225, 77, 219})
+ defer resetRandReader()
+
+ enc := newAESGCM(32)
+ key, _, _ := randomKeyGenerator{size: 32}.genKey()
+ out, err := enc.encrypt(key, aad, plaintext)
+ if err != nil {
+ t.Error("Unable to encrypt:", err)
+ return
+ }
+
+ if bytes.Compare(out.ciphertext, expectedCiphertext) != 0 {
+ t.Error("Ciphertext did not match")
+ }
+ if bytes.Compare(out.tag, expectedAuthtag) != 0 {
+ t.Error("Auth tag did not match")
+ }
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/utils.go b/vendor/gopkg.in/square/go-jose.v1/utils.go
new file mode 100644
index 000000000..4ca2bc06b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/utils.go
@@ -0,0 +1,74 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+)
+
+// LoadPublicKey loads a public key from PEM/DER-encoded data.
+func LoadPublicKey(data []byte) (interface{}, error) {
+ input := data
+
+ block, _ := pem.Decode(data)
+ if block != nil {
+ input = block.Bytes
+ }
+
+ // Try to load SubjectPublicKeyInfo
+ pub, err0 := x509.ParsePKIXPublicKey(input)
+ if err0 == nil {
+ return pub, nil
+ }
+
+ cert, err1 := x509.ParseCertificate(input)
+ if err1 == nil {
+ return cert.PublicKey, nil
+ }
+
+ return nil, fmt.Errorf("square/go-jose: parse error, got '%s' and '%s'", err0, err1)
+}
+
+// LoadPrivateKey loads a private key from PEM/DER-encoded data.
+func LoadPrivateKey(data []byte) (interface{}, error) {
+ input := data
+
+ block, _ := pem.Decode(data)
+ if block != nil {
+ input = block.Bytes
+ }
+
+ var priv interface{}
+ priv, err0 := x509.ParsePKCS1PrivateKey(input)
+ if err0 == nil {
+ return priv, nil
+ }
+
+ priv, err1 := x509.ParsePKCS8PrivateKey(input)
+ if err1 == nil {
+ return priv, nil
+ }
+
+ priv, err2 := x509.ParseECPrivateKey(input)
+ if err2 == nil {
+ return priv, nil
+ }
+
+ return nil, fmt.Errorf("square/go-jose: parse error, got '%s', '%s' and '%s'", err0, err1, err2)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v1/utils_test.go b/vendor/gopkg.in/square/go-jose.v1/utils_test.go
new file mode 100644
index 000000000..6ad622da7
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v1/utils_test.go
@@ -0,0 +1,225 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/hex"
+ "math/big"
+ "regexp"
+ "testing"
+)
+
+// Reset random reader to original value
+func resetRandReader() {
+ randReader = rand.Reader
+}
+
+// Build big int from hex-encoded string. Strips whitespace (for testing).
+func fromHexInt(base16 string) *big.Int {
+ re := regexp.MustCompile(`\s+`)
+ val, ok := new(big.Int).SetString(re.ReplaceAllString(base16, ""), 16)
+ if !ok {
+ panic("Invalid test data")
+ }
+ return val
+}
+
+// Build big int from base64-encoded string. Strips whitespace (for testing).
+func fromBase64Int(base64 string) *big.Int {
+ re := regexp.MustCompile(`\s+`)
+ val, err := base64URLDecode(re.ReplaceAllString(base64, ""))
+ if err != nil {
+ panic("Invalid test data")
+ }
+ return new(big.Int).SetBytes(val)
+}
+
+// Decode hex-encoded string into byte array. Strips whitespace (for testing).
+func fromHexBytes(base16 string) []byte {
+ re := regexp.MustCompile(`\s+`)
+ val, err := hex.DecodeString(re.ReplaceAllString(base16, ""))
+ if err != nil {
+ panic("Invalid test data")
+ }
+ return val
+}
+
+// Decode base64-encoded string into byte array. Strips whitespace (for testing).
+func fromBase64Bytes(b64 string) []byte {
+ re := regexp.MustCompile(`\s+`)
+ val, err := base64.StdEncoding.DecodeString(re.ReplaceAllString(b64, ""))
+ if err != nil {
+ panic("Invalid test data")
+ }
+ return val
+}
+
+// Test vectors below taken from crypto/x509/x509_test.go in the Go std lib.
+
+var pkixPublicKey = `-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3VoPN9PKUjKFLMwOge6+
+wnDi8sbETGIx2FKXGgqtAKpzmem53kRGEQg8WeqRmp12wgp74TGpkEXsGae7RS1k
+enJCnma4fii+noGH7R0qKgHvPrI2Bwa9hzsH8tHxpyM3qrXslOmD45EH9SxIDUBJ
+FehNdaPbLP1gFyahKMsdfxFJLUvbUycuZSJ2ZnIgeVxwm4qbSvZInL9Iu4FzuPtg
+fINKcbbovy1qq4KvPIrXzhbY3PWDc6btxCf3SE0JdE1MCPThntB62/bLMSQ7xdDR
+FF53oIpvxe/SCOymfWq/LW849Ytv3Xwod0+wzAP8STXG4HSELS4UedPYeHJJJYcZ
++QIDAQAB
+-----END PUBLIC KEY-----`
+
+var pkcs1PrivateKey = `-----BEGIN RSA PRIVATE KEY-----
+MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0
+fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu
+/ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu
+RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/
+EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A
+IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS
+tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V
+-----END RSA PRIVATE KEY-----`
+
+var ecdsaSHA256p384CertPem = `
+-----BEGIN CERTIFICATE-----
+MIICSjCCAdECCQDje/no7mXkVzAKBggqhkjOPQQDAjCBjjELMAkGA1UEBhMCVVMx
+EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDAS
+BgNVBAoMC0dvb2dsZSwgSW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEG
+CSqGSIb3DQEJARYUZ29sYW5nLWRldkBnbWFpbC5jb20wHhcNMTIwNTIxMDYxMDM0
+WhcNMjIwNTE5MDYxMDM0WjCBjjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlm
+b3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDASBgNVBAoMC0dvb2dsZSwg
+SW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEGCSqGSIb3DQEJARYUZ29s
+YW5nLWRldkBnbWFpbC5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARRuzRNIKRK
+jIktEmXanNmrTR/q/FaHXLhWRZ6nHWe26Fw7Rsrbk+VjGy4vfWtNn7xSFKrOu5ze
+qxKnmE0h5E480MNgrUiRkaGO2GMJJVmxx20aqkXOk59U8yGA4CghE6MwCgYIKoZI
+zj0EAwIDZwAwZAIwBZEN8gvmRmfeP/9C1PRLzODIY4JqWub2PLRT4mv9GU+yw3Gr
+PU9A3CHMdEcdw/MEAjBBO1lId8KOCh9UZunsSMfqXiVurpzmhWd6VYZ/32G+M+Mh
+3yILeYQzllt/g0rKVRk=
+-----END CERTIFICATE-----`
+
+var ecdsaSHA256p384CertDer = fromBase64Bytes(`
+MIICSjCCAdECCQDje/no7mXkVzAKBggqhkjOPQQDAjCBjjELMAkGA1UEBhMCVVMx
+EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDAS
+BgNVBAoMC0dvb2dsZSwgSW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEG
+CSqGSIb3DQEJARYUZ29sYW5nLWRldkBnbWFpbC5jb20wHhcNMTIwNTIxMDYxMDM0
+WhcNMjIwNTE5MDYxMDM0WjCBjjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlm
+b3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDASBgNVBAoMC0dvb2dsZSwg
+SW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEGCSqGSIb3DQEJARYUZ29s
+YW5nLWRldkBnbWFpbC5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARRuzRNIKRK
+jIktEmXanNmrTR/q/FaHXLhWRZ6nHWe26Fw7Rsrbk+VjGy4vfWtNn7xSFKrOu5ze
+qxKnmE0h5E480MNgrUiRkaGO2GMJJVmxx20aqkXOk59U8yGA4CghE6MwCgYIKoZI
+zj0EAwIDZwAwZAIwBZEN8gvmRmfeP/9C1PRLzODIY4JqWub2PLRT4mv9GU+yw3Gr
+PU9A3CHMdEcdw/MEAjBBO1lId8KOCh9UZunsSMfqXiVurpzmhWd6VYZ/32G+M+Mh
+3yILeYQzllt/g0rKVRk=`)
+
+var pkcs8ECPrivateKey = `
+-----BEGIN PRIVATE KEY-----
+MIHtAgEAMBAGByqGSM49AgEGBSuBBAAjBIHVMIHSAgEBBEHqkl65VsjYDQWIHfgv
+zQLPa0JZBsaJI16mjiH8k6VA4lgfK/KNldlEsY433X7wIzo43u8OpX7Nv7n8pVRH
+15XWK6GBiQOBhgAEAfDuikMI4bWsyse7t8iSCmjt9fneW/qStZuIPuVLo7mSJdud
+Cs3J/x9wOnnhLv1u+0atnq5HKKdL4ff3itJPlhmSAQzByKQ5LTvB7d6fn95GJVK/
+hNuS5qGBpB7qeMXVFoki0/2RZIOway8/fXjmNYwe4v/XB5LLn4hcTvEUGYcF8M9K
+-----END PRIVATE KEY-----`
+
+var ecPrivateKey = `
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu
+N8yrywiQaTDEqn1zVcLwrnqoQux3gWN1jxugBwYFK4EEACOhgYkDgYYABAFJgaM/
+2a3+gE6Khm/1PYftqNwAzQ21HSLp27q2lTN+GBFho691ARFRkr9UzlQ8gRnhkTbu
+yGfASamlHsYlr3Tv+gFc4BY8SU0q8kzpQ0dOHWFk7dfGFmKwhJrSFIIOeRn/LY03
+XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA==
+-----END EC PRIVATE KEY-----`
+
+var ecPrivateKeyDer = fromBase64Bytes(`
+MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu
+N8yrywiQaTDEqn1zVcLwrnqoQux3gWN1jxugBwYFK4EEACOhgYkDgYYABAFJgaM/
+2a3+gE6Khm/1PYftqNwAzQ21HSLp27q2lTN+GBFho691ARFRkr9UzlQ8gRnhkTbu
+yGfASamlHsYlr3Tv+gFc4BY8SU0q8kzpQ0dOHWFk7dfGFmKwhJrSFIIOeRn/LY03
+XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA==`)
+
+var invalidPemKey = `
+-----BEGIN PUBLIC KEY-----
+MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu
+XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA==
+-----END PUBLIC KEY-----`
+
+func TestLoadPublicKey(t *testing.T) {
+ pub, err := LoadPublicKey([]byte(pkixPublicKey))
+ switch pub.(type) {
+ case *rsa.PublicKey:
+ default:
+ t.Error("failed to parse RSA PKIX public key:", err)
+ }
+
+ pub, err = LoadPublicKey([]byte(ecdsaSHA256p384CertPem))
+ switch pub.(type) {
+ case *ecdsa.PublicKey:
+ default:
+ t.Error("failed to parse ECDSA X.509 cert:", err)
+ }
+
+ pub, err = LoadPublicKey([]byte(ecdsaSHA256p384CertDer))
+ switch pub.(type) {
+ case *ecdsa.PublicKey:
+ default:
+ t.Error("failed to parse ECDSA X.509 cert:", err)
+ }
+
+ pub, err = LoadPublicKey([]byte("###"))
+ if err == nil {
+ t.Error("should not parse invalid key")
+ }
+
+ pub, err = LoadPublicKey([]byte(invalidPemKey))
+ if err == nil {
+ t.Error("should not parse invalid key")
+ }
+}
+
+func TestLoadPrivateKey(t *testing.T) {
+ priv, err := LoadPrivateKey([]byte(pkcs1PrivateKey))
+ switch priv.(type) {
+ case *rsa.PrivateKey:
+ default:
+ t.Error("failed to parse RSA PKCS1 private key:", err)
+ }
+
+ priv, err = LoadPrivateKey([]byte(pkcs8ECPrivateKey))
+ if _, ok := priv.(*ecdsa.PrivateKey); !ok {
+ t.Error("failed to parse EC PKCS8 private key:", err)
+ }
+
+ priv, err = LoadPrivateKey([]byte(ecPrivateKey))
+ if _, ok := priv.(*ecdsa.PrivateKey); !ok {
+ t.Error("failed to parse EC private key:", err)
+ }
+
+ priv, err = LoadPrivateKey([]byte(ecPrivateKeyDer))
+ if _, ok := priv.(*ecdsa.PrivateKey); !ok {
+ t.Error("failed to parse EC private key:", err)
+ }
+
+ priv, err = LoadPrivateKey([]byte("###"))
+ if err == nil {
+ t.Error("should not parse invalid key")
+ }
+
+ priv, err = LoadPrivateKey([]byte(invalidPemKey))
+ if err == nil {
+ t.Error("should not parse invalid key")
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index 085cddc44..b13ab9f07 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -251,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
- if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go
index c159760b6..3da6fadf8 100644
--- a/vendor/gopkg.in/yaml.v2/decode_test.go
+++ b/vendor/gopkg.in/yaml.v2/decode_test.go
@@ -660,6 +660,7 @@ var unmarshalerTests = []struct {
{`_: BAR!`, "!!str", "BAR!"},
{`_: "BAR!"`, "!!str", "BAR!"},
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+ {`_: ""`, "!!str", ""},
}
var unmarshalerResult = map[int]error{}